Project import generated by Copybara.

GitOrigin-RevId: 5633bcff0c6162b9e4b5f1264264611e950c8ec7
This commit is contained in:
Default email 2024-10-09 18:51:18 +02:00
parent adc5d0fb05
commit 555cd8a8f9
654 changed files with 27671 additions and 17314 deletions

View file

@ -160,6 +160,9 @@ ad815aebfbfe1415ff6436521d545029c803c3fb
# nixos/nvidia: apply nixfmt-rfc-style (#313440)
fbdcdde04a7caa007e825a8b822c75fab9adb2d6
# treewide: reformat files which need reformatting after (#341407)
e0464e47880a69896f0fb1810f00e0de469f770a
# step-cli: format package.nix with nixfmt (#331629)
fc7a83f8b62e90de5679e993d4d49ca014ea013d

View file

@ -15,6 +15,8 @@
/.github/workflows @NixOS/Security @Mic92 @zowoq
/.github/workflows/check-nix-format.yml @infinisil
/.github/workflows/nixpkgs-vet.yml @infinisil @philiptaron
/.github/workflows/codeowners.yml @infinisil
/.github/OWNERS @infinisil
/ci @infinisil @philiptaron @NixOS/Security
# Development support
@ -28,7 +30,7 @@
/lib/cli.nix @infinisil @Profpatsch
/lib/debug.nix @infinisil @Profpatsch
/lib/asserts.nix @infinisil @Profpatsch
/lib/path.* @infinisil
/lib/path/* @infinisil
/lib/fileset @infinisil
## Libraries / Module system
/lib/modules.nix @infinisil @roberth
@ -105,7 +107,7 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @NixOS/nix-team @raitobeza
/nixos/lib/test-driver @tfc
# NixOS QEMU virtualisation
/nixos/virtualisation/qemu-vm.nix @raitobezarius
/nixos/modules/virtualisation/qemu-vm.nix @raitobezarius
# ACME
/nixos/modules/security/acme @arianvp @flokli @aanderse @emilazy # no merge permission: @m1cr0man
@ -170,7 +172,7 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @NixOS/nix-team @raitobeza
# Audio
/nixos/modules/services/audio/botamusique.nix @mweinelt
/nixos/modules/services/audio/snapserver.nix @mweinelt
/nixos/tests/modules/services/audio/botamusique.nix @mweinelt
/nixos/tests/botamusique.nix @mweinelt
/nixos/tests/snapcast.nix @mweinelt
# Browsers
@ -204,21 +206,20 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# PostgreSQL and related stuff
/pkgs/servers/sql/postgresql @thoughtpolice
/nixos/modules/services/databases/postgresql.xml @thoughtpolice
/nixos/modules/services/databases/postgresql.md @thoughtpolice
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
/nixos/tests/postgresql.nix @thoughtpolice
# Hardened profile & related modules
/nixos/modules/profiles/hardened.nix @joachifm
/nixos/modules/security/hidepid.nix @joachifm
/nixos/modules/security/lock-kernel-modules.nix @joachifm
/nixos/modules/security/misc.nix @joachifm
/nixos/tests/hardened.nix @joachifm
/pkgs/os-specific/linux/kernel/hardened-config.nix @joachifm
/pkgs/os-specific/linux/kernel/hardened/config.nix @joachifm
# Home Automation
/nixos/modules/services/misc/home-assistant.nix @mweinelt
/nixos/modules/services/misc/zigbee2mqtt.nix @mweinelt
/nixos/modules/services/home-automation/home-assistant.nix @mweinelt
/nixos/modules/services/home-automation/zigbee2mqtt.nix @mweinelt
/nixos/tests/home-assistant.nix @mweinelt
/nixos/tests/zigbee2mqtt.nix @mweinelt
/pkgs/servers/home-assistant @mweinelt
@ -316,8 +317,6 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# nim
/pkgs/development/compilers/nim @ehmry
/pkgs/development/nim-packages @ehmry
/pkgs/top-level/nim-packages.nix @ehmry
# terraform providers
/pkgs/applications/networking/cluster/terraform-providers @zowoq

19
third_party/nixpkgs/.github/OWNERS vendored Normal file
View file

@ -0,0 +1,19 @@
#
# Currently unused! Use CODEOWNERS for now, see workflows/codeowners.yml
#
####################
#
# This file is used to describe who owns what in this repository.
# Users/teams will get review requests for PRs that change their files.
#
# This file does not replace `meta.maintainers`
# but is instead used for other things than derivations and modules,
# like documentation, package sets, and other assets.
#
# This file uses the same syntax as the natively supported CODEOWNERS file,
# see https://help.github.com/articles/about-codeowners/ for documentation.
# However it comes with some notable differences:
# - There is no need for user/team listed here to have write access.
# - No reviews will be requested for PRs that target the wrong base branch.
#
# Processing of this file is implemented in workflows/codeowners.yml

View file

@ -385,9 +385,11 @@
- changed-files:
- any-glob-to-any-file:
- nixos/modules/virtualisation/xen*
- pkgs/applications/virtualization/xen/**
- pkgs/by-name/xe/xen/*
- pkgs/by-name/qe/qemu_xen/*
- pkgs/by-name/xe/xen-guest-agent/*
- pkgs/by-name/xt/xtf/*
- pkgs/build-support/xen/*
- pkgs/development/ocaml-modules/xen*/*
- pkgs/development/ocaml-modules/vchan/*

View file

@ -20,7 +20,7 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
- uses: cachix/cachix-action@ad2ddac53f961de1989924296a1f236fcfbaa4fc # v15
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.

View file

@ -21,7 +21,7 @@ jobs:
sparse-checkout: |
lib
maintainers
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -38,7 +38,7 @@ jobs:
# This should not be a URL, because it would allow PRs to run arbitrary code in CI!
rev=$(jq -r .rev ci/pinned-nixpkgs.json)
echo "url=https://github.com/NixOS/nixpkgs/archive/$rev.tar.gz" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -32,7 +32,7 @@ jobs:
# This should not be a URL, because it would allow PRs to run arbitrary code in CI!
rev=$(jq -r .rev ci/pinned-nixpkgs.json)
echo "url=https://github.com/NixOS/nixpkgs/archive/$rev.tar.gz" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -14,7 +14,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
- name: Build shell
run: nix-build shell.nix
@ -26,6 +26,6 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
- name: Build shell
run: nix-build shell.nix

View file

@ -0,0 +1,88 @@
name: Codeowners
# This workflow depends on a GitHub App with the following permissions:
# - Repository > Administration: read-only
# - Organization > Members: read-only
# - Repository > Pull Requests: read-write
# The App needs to be installed on this repository
# the OWNER_APP_ID repository variable needs to be set
# the OWNER_APP_PRIVATE_KEY repository secret needs to be set
on:
pull_request_target:
types: [opened, ready_for_review, synchronize, reopened, edited]
env:
# TODO: Once confirmed that this works by seeing that the action would request
# reviews from the same people (or refuse for wrong base branches),
# move all entries from CODEOWNERS to OWNERS and change this value here
# OWNERS_FILE: .github/OWNERS
OWNERS_FILE: .github/CODEOWNERS
# Also remove this
DRY_MODE: 1
jobs:
# Check that code owners is valid
check:
name: Check
runs-on: ubuntu-latest
steps:
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR itself.
# We later build and run code from the base branch with access to secrets,
# so it's important this is not the PRs code.
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
path: base
- name: Build codeowners validator
run: nix-build base/ci -A codeownersValidator
- uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token
with:
app-id: ${{ vars.OWNER_APP_ID }}
private-key: ${{ secrets.OWNER_APP_PRIVATE_KEY }}
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
ref: refs/pull/${{ github.event.number }}/merge
path: pr
- name: Validate codeowners
run: result/bin/codeowners-validator
env:
OWNERS_FILE: pr/${{ env.OWNERS_FILE }}
GITHUB_ACCESS_TOKEN: ${{ steps.app-token.outputs.token }}
REPOSITORY_PATH: pr
OWNER_CHECKER_REPOSITORY: ${{ github.repository }}
# Set this to "notowned,avoid-shadowing" to check that all files are owned by somebody
EXPERIMENTAL_CHECKS: "avoid-shadowing"
# Request reviews from code owners
request:
name: Request
runs-on: ubuntu-latest
steps:
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR head.
# This is intentional, because we need to request the review of owners as declared in the base branch.
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token
with:
app-id: ${{ vars.OWNER_APP_ID }}
private-key: ${{ secrets.OWNER_APP_PRIVATE_KEY }}
- name: Build review request package
run: nix-build ci -A requestReviews
- name: Request reviews
run: result/bin/request-reviews.sh ${{ github.repository }} ${{ github.event.number }} "$OWNERS_FILE"
env:
GH_TOKEN: ${{ steps.app-token.outputs.token }}
# Don't do anything on draft PRs
DRY_MODE: ${{ github.event.pull_request.draft && '1' || '' }}

View file

@ -29,7 +29,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
# nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0

View file

@ -19,7 +19,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -21,7 +21,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -30,7 +30,7 @@ jobs:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
if: ${{ env.CHANGED_FILES && env.CHANGED_FILES != '' }}
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: Parse all changed or added nix files

View file

@ -85,7 +85,7 @@ jobs:
base=$(mktemp -d)
git worktree add "$base" "$(git rev-parse HEAD^1)"
echo "base=$base" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
if: env.mergedSha
- name: Fetching the pinned tool
if: env.mergedSha

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: cachix/install-nix-action@9f70348d77d0422624097c4b7a75563948901306 # v29
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup

View file

@ -637,7 +637,7 @@ Names of files and directories should be in lowercase, with dashes between words
```nix
{
buildInputs = lib.optional stdenv.isDarwin iconv;
buildInputs = lib.optional stdenv.hostPlatform.isDarwin iconv;
}
```
@ -645,7 +645,7 @@ Names of files and directories should be in lowercase, with dashes between words
```nix
{
buildInputs = if stdenv.isDarwin then [ iconv ] else null;
buildInputs = if stdenv.hostPlatform.isDarwin then [ iconv ] else null;
}
```

View file

@ -0,0 +1,31 @@
{
buildGoModule,
fetchFromGitHub,
fetchpatch,
}:
buildGoModule {
name = "codeowners-validator";
src = fetchFromGitHub {
owner = "mszostok";
repo = "codeowners-validator";
rev = "f3651e3810802a37bd965e6a9a7210728179d076";
hash = "sha256-5aSmmRTsOuPcVLWfDF6EBz+6+/Qpbj66udAmi1CLmWQ=";
};
patches = [
# https://github.com/mszostok/codeowners-validator/pull/222
(fetchpatch {
name = "user-write-access-check";
url = "https://github.com/mszostok/codeowners-validator/compare/f3651e3810802a37bd965e6a9a7210728179d076...840eeb88b4da92bda3e13c838f67f6540b9e8529.patch";
hash = "sha256-t3Dtt8SP9nbO3gBrM0nRE7+G6N/ZIaczDyVHYAG/6mU=";
})
# Undoes part of the above PR: We don't want to require write access
# to the repository, that's only needed for GitHub's native CODEOWNERS.
# Furthermore, it removes an unneccessary check from the code
# that breaks tokens generated for GitHub Apps.
./permissions.patch
# Allows setting a custom CODEOWNERS path using the OWNERS_FILE env var
./owners-file-name.patch
];
postPatch = "rm -r docs/investigation";
vendorHash = "sha256-R+pW3xcfpkTRqfS2ETVOwG8PZr0iH5ewroiF7u8hcYI=";
}

View file

@ -0,0 +1,15 @@
diff --git a/pkg/codeowners/owners.go b/pkg/codeowners/owners.go
index 6910bd2..e0c95e9 100644
--- a/pkg/codeowners/owners.go
+++ b/pkg/codeowners/owners.go
@@ -39,6 +39,10 @@ func NewFromPath(repoPath string) ([]Entry, error) {
// openCodeownersFile finds a CODEOWNERS file and returns content.
// see: https://help.github.com/articles/about-code-owners/#codeowners-file-location
func openCodeownersFile(dir string) (io.Reader, error) {
+ if file, ok := os.LookupEnv("OWNERS_FILE"); ok {
+ return fs.Open(file)
+ }
+
var detectedFiles []string
for _, p := range []string{".", "docs", ".github"} {
pth := path.Join(dir, p)

View file

@ -0,0 +1,36 @@
diff --git a/internal/check/valid_owner.go b/internal/check/valid_owner.go
index a264bcc..610eda8 100644
--- a/internal/check/valid_owner.go
+++ b/internal/check/valid_owner.go
@@ -16,7 +16,6 @@ import (
const scopeHeader = "X-OAuth-Scopes"
var reqScopes = map[github.Scope]struct{}{
- github.ScopeReadOrg: {},
}
type ValidOwnerConfig struct {
@@ -223,10 +222,7 @@ func (v *ValidOwner) validateTeam(ctx context.Context, name string) *validateErr
for _, t := range v.repoTeams {
// GitHub normalizes name before comparison
if strings.EqualFold(t.GetSlug(), team) {
- if t.Permissions["push"] {
- return nil
- }
- return newValidateError("Team %q cannot review PRs on %q as neither it nor any parent team has write permissions.", team, v.orgRepoName)
+ return nil
}
}
@@ -245,10 +241,7 @@ func (v *ValidOwner) validateGitHubUser(ctx context.Context, name string) *valid
for _, u := range v.repoUsers {
// GitHub normalizes name before comparison
if strings.EqualFold(u.GetLogin(), userName) {
- if u.Permissions["push"] {
- return nil
- }
- return newValidateError("User %q cannot review PRs on %q as they don't have write permissions.", userName, v.orgRepoName)
+ return nil
}
}

29
third_party/nixpkgs/ci/default.nix vendored Normal file
View file

@ -0,0 +1,29 @@
let
pinnedNixpkgs = builtins.fromJSON (builtins.readFile ./pinned-nixpkgs.json);
in
{
system ? builtins.currentSystem,
nixpkgs ? null,
}:
let
nixpkgs' =
if nixpkgs == null then
fetchTarball {
url = "https://github.com/NixOS/nixpkgs/archive/${pinnedNixpkgs.rev}.tar.gz";
sha256 = pinnedNixpkgs.sha256;
}
else
nixpkgs;
pkgs = import nixpkgs' {
inherit system;
config = { };
overlays = [ ];
};
in
{
inherit pkgs;
requestReviews = pkgs.callPackage ./request-reviews { };
codeownersValidator = pkgs.callPackage ./codeowners-validator { };
}

View file

@ -0,0 +1,43 @@
{
lib,
stdenvNoCC,
makeWrapper,
coreutils,
codeowners,
jq,
curl,
github-cli,
gitMinimal,
}:
stdenvNoCC.mkDerivation {
name = "request-reviews";
src = lib.fileset.toSource {
root = ./.;
fileset = lib.fileset.unions [
./get-reviewers.sh
./request-reviews.sh
./verify-base-branch.sh
./dev-branches.txt
];
};
nativeBuildInputs = [ makeWrapper ];
dontBuild = true;
installPhase = ''
mkdir -p $out/bin
mv dev-branches.txt $out/bin
for bin in *.sh; do
mv "$bin" "$out/bin"
wrapProgram "$out/bin/$bin" \
--set PATH ${
lib.makeBinPath [
coreutils
codeowners
jq
curl
github-cli
gitMinimal
]
}
done
'';
}

View file

@ -0,0 +1,7 @@
# Trusted development branches:
# These generally require PRs to update and are built by Hydra.
master
staging
release-*
staging-*
haskell-updates

View file

@ -0,0 +1,87 @@
#!/usr/bin/env bash
# Get the code owners of the files changed by a PR,
# suitable to be consumed by the API endpoint to request reviews:
# https://docs.github.com/en/rest/pulls/review-requests?apiVersion=2022-11-28#request-reviewers-for-a-pull-request
set -euo pipefail
log() {
echo "$@" >&2
}
if (( "$#" < 5 )); then
log "Usage: $0 GIT_REPO BASE_REF HEAD_REF OWNERS_FILE PR_AUTHOR"
exit 1
fi
gitRepo=$1
baseRef=$2
headRef=$3
ownersFile=$4
prAuthor=$5
tmp=$(mktemp -d)
trap 'rm -rf "$tmp"' exit
git -C "$gitRepo" diff --name-only --merge-base "$baseRef" "$headRef" > "$tmp/touched-files"
readarray -t touchedFiles < "$tmp/touched-files"
log "This PR touches ${#touchedFiles[@]} files"
# Get the owners file from the base, because we don't want to allow PRs to
# remove code owners to avoid pinging them
git -C "$gitRepo" show "$baseRef":"$ownersFile" > "$tmp"/codeowners
# Associative arrays with the team/user as the key for easy deduplication
declare -A teams users
for file in "${touchedFiles[@]}"; do
result=$(codeowners --file "$tmp"/codeowners "$file")
read -r file owners <<< "$result"
if [[ "$owners" == "(unowned)" ]]; then
log "File $file is unowned"
continue
fi
log "File $file is owned by $owners"
# Split up multiple owners, separated by arbitrary amounts of spaces
IFS=" " read -r -a entries <<< "$owners"
for entry in "${entries[@]}"; do
# GitHub technically also supports Emails as code owners,
# but we can't easily support that, so let's not
if [[ ! "$entry" =~ @(.*) ]]; then
warn -e "\e[33mCodeowner \"$entry\" for file $file is not valid: Must start with \"@\"\e[0m" >&2
# Don't fail, because the PR for which this script runs can't fix it,
# it has to be fixed in the base branch
continue
fi
# The first regex match is everything after the @
entry=${BASH_REMATCH[1]}
if [[ "$entry" =~ .*/(.*) ]]; then
# Teams look like $org/$team, where we only need $team for the API
# call to request reviews from teams
teams[${BASH_REMATCH[1]}]=
else
# Everything else is a user
users[$entry]=
fi
done
done
# Cannot request a review from the author
if [[ -v users[$prAuthor] ]]; then
log "One or more files are owned by the PR author, ignoring"
unset 'users[$prAuthor]'
fi
# Turn it into a JSON for the GitHub API call to request PR reviewers
jq -n \
--arg users "${!users[*]}" \
--arg teams "${!teams[*]}" \
'{
reviewers: $users | split(" "),
team_reviewers: $teams | split(" ")
}'

View file

@ -0,0 +1,97 @@
#!/usr/bin/env bash
# Requests reviews for a PR after verifying that the base branch is correct
set -euo pipefail
tmp=$(mktemp -d)
trap 'rm -rf "$tmp"' exit
SCRIPT_DIR=$(dirname "$0")
log() {
echo "$@" >&2
}
effect() {
if [[ -n "${DRY_MODE:-}" ]]; then
log "Skipping in dry mode:" "${@@Q}"
else
"$@"
fi
}
if (( $# < 3 )); then
log "Usage: $0 GITHUB_REPO PR_NUMBER OWNERS_FILE"
exit 1
fi
baseRepo=$1
prNumber=$2
ownersFile=$3
log "Fetching PR info"
prInfo=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/$baseRepo/pulls/$prNumber")
baseBranch=$(jq -r .base.ref <<< "$prInfo")
log "Base branch: $baseBranch"
prRepo=$(jq -r .head.repo.full_name <<< "$prInfo")
log "PR repo: $prRepo"
prBranch=$(jq -r .head.ref <<< "$prInfo")
log "PR branch: $prBranch"
prAuthor=$(jq -r .user.login <<< "$prInfo")
log "PR author: $prAuthor"
extraArgs=()
if pwdRepo=$(git rev-parse --show-toplevel 2>/dev/null); then
# Speedup for local runs
extraArgs+=(--reference-if-able "$pwdRepo")
fi
log "Fetching Nixpkgs commit history"
# We only need the commit history, not the contents, so we can do a tree-less clone using tree:0
# https://github.blog/open-source/git/get-up-to-speed-with-partial-clone-and-shallow-clone/#user-content-quick-summary
git clone --bare --filter=tree:0 --no-tags --origin upstream "${extraArgs[@]}" https://github.com/"$baseRepo".git "$tmp"/nixpkgs.git
log "Fetching the PR commit history"
# Fetch the PR
git -C "$tmp/nixpkgs.git" remote add fork https://github.com/"$prRepo".git
# This remote config is the same as --filter=tree:0 when cloning
git -C "$tmp/nixpkgs.git" config remote.fork.partialclonefilter tree:0
git -C "$tmp/nixpkgs.git" config remote.fork.promisor true
# This should not conflict with any refs in Nixpkgs
headRef=refs/remotes/fork/pr
# Only fetch into a remote ref, because the local ref namespace is used by Nixpkgs, don't want any conflicts
git -C "$tmp/nixpkgs.git" fetch --no-tags fork "$prBranch":"$headRef"
log "Checking correctness of the base branch"
if ! "$SCRIPT_DIR"/verify-base-branch.sh "$tmp/nixpkgs.git" "$headRef" "$baseRepo" "$baseBranch" "$prRepo" "$prBranch" | tee "$tmp/invalid-base-error" >&2; then
log "Posting error as comment"
if ! response=$(effect gh api \
--method POST \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/$baseRepo/issues/$prNumber/comments" \
-F "body=@$tmp/invalid-base-error"); then
log "Failed to post the comment: $response"
fi
exit 1
fi
log "Getting code owners to request reviews from"
"$SCRIPT_DIR"/get-reviewers.sh "$tmp/nixpkgs.git" "$baseBranch" "$headRef" "$ownersFile" "$prAuthor" > "$tmp/reviewers.json"
log "Requesting reviews from: $(<"$tmp/reviewers.json")"
if ! response=$(effect gh api \
--method POST \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/$baseRepo/pulls/$prNumber/requested_reviewers" \
--input "$tmp/reviewers.json"); then
log "Failed to request reviews: $response"
exit 1
fi
log "Successfully requested reviews"

View file

@ -0,0 +1,103 @@
#!/usr/bin/env bash
# Check that a PR doesn't include commits from other development branches.
# Fails with next steps if it does
set -euo pipefail
tmp=$(mktemp -d)
trap 'rm -rf "$tmp"' exit
SCRIPT_DIR=$(dirname "$0")
log() {
echo "$@" >&2
}
# Small helper to check whether an element is in a list
# Usage: `elementIn foo "${list[@]}"`
elementIn() {
local e match=$1
shift
for e; do
if [[ "$e" == "$match" ]]; then
return 0
fi
done
return 1
}
if (( $# < 6 )); then
log "Usage: $0 LOCAL_REPO HEAD_REF BASE_REPO BASE_BRANCH PR_REPO PR_BRANCH"
exit 1
fi
localRepo=$1
headRef=$2
baseRepo=$3
baseBranch=$4
prRepo=$5
prBranch=$6
# All development branches
devBranchPatterns=()
while read -r pattern; do
if [[ "$pattern" != '#'* ]]; then
devBranchPatterns+=("$pattern")
fi
done < "$SCRIPT_DIR/dev-branches.txt"
git -C "$localRepo" branch --list --format "%(refname:short)" "${devBranchPatterns[@]}" > "$tmp/dev-branches"
readarray -t devBranches < "$tmp/dev-branches"
if [[ "$baseRepo" == "$prRepo" ]] && elementIn "$prBranch" "${devBranches[@]}"; then
log "This PR merges $prBranch into $baseBranch, no commit check necessary"
exit 0
fi
# The current merge base of the PR
prMergeBase=$(git -C "$localRepo" merge-base "$baseBranch" "$headRef")
log "The PR's merge base with the base branch $baseBranch is $prMergeBase"
# This is purely for debugging
git -C "$localRepo" rev-list --reverse "$baseBranch".."$headRef" > "$tmp/pr-commits"
log "The PR includes these $(wc -l < "$tmp/pr-commits") commits:"
cat <"$tmp/pr-commits" >&2
for testBranch in "${devBranches[@]}"; do
if [[ -z "$(git -C "$localRepo" rev-list -1 --since="1 month ago" "$testBranch")" ]]; then
log "Not checking $testBranch, was inactive for the last month"
continue
fi
log "Checking if commits from $testBranch are included in the PR"
# We need to check for any commits that are in the PR which are also in the test branch.
# We could check each commit from the PR individually, but that's unnecessarily slow.
#
# This does _almost_ what we want: `git rev-list --count headRef testBranch ^baseBranch`,
# except that it includes commits that are reachable from _either_ headRef or testBranch,
# instead of restricting it to ones reachable by both
# Easily fixable though, because we can use `git merge-base testBranch headRef`
# to get the least common ancestor (aka merge base) commit reachable by both.
# If the branch being tested is indeed the right base branch,
# this is then also the commit from that branch that the PR is based on top of.
testMergeBase=$(git -C "$localRepo" merge-base "$testBranch" "$headRef")
# And then use the `git rev-list --count`, but replacing the non-working
# `headRef testBranch` with the merge base of the two.
extraCommits=$(git -C "$localRepo" rev-list --count "$testMergeBase" ^"$baseBranch")
if (( extraCommits != 0 )); then
log -e "\e[33m"
echo "The PR's base branch is set to $baseBranch, but $extraCommits commits from the $testBranch branch are included. Make sure you know the [right base branch for your changes](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#branch-conventions), then:"
echo "- If the changes should go to the $testBranch branch, [change the base branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-base-branch-of-a-pull-request) to $testBranch"
echo "- If the changes should go to the $baseBranch branch, rebase your PR onto the merge base with the $testBranch branch:"
echo " \`\`\`"
echo " git rebase --onto $prMergeBase $testMergeBase"
echo " git push --force-with-lease"
echo " \`\`\`"
log -e "\e[m"
exit 1
fi
done
log "Base branch is correct, no commits from development branches are included"

View file

@ -349,8 +349,8 @@ let
nodePackages.prettier
];
inputs = basePackages ++ lib.optionals stdenv.isLinux [ inotify-tools ]
++ lib.optionals stdenv.isDarwin
inputs = basePackages ++ lib.optionals stdenv.hostPlatform.isLinux [ inotify-tools ]
++ lib.optionals stdenv.hostPlatform.isDarwin
(with darwin.apple_sdk.frameworks; [ CoreFoundation CoreServices ]);
# define shell startup command

View file

@ -84,7 +84,7 @@ One advantage is that when `pkgs.zlib` is updated, it will automatically update
echo "================= /testing zlib using node ================="
'';
postPatch = pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
postPatch = pkgs.lib.optionalString pkgs.stdenv.hostPlatform.isDarwin ''
substituteInPlace configure \
--replace-fail '/usr/bin/libtool' 'ar' \
--replace-fail 'AR="libtool"' 'AR="ar"' \

View file

@ -125,8 +125,8 @@ On Darwin, if a script has too many `-Idir` flags in its first line (its “sheb
hash = "sha256-vOhB/FwQMC8PPvdnjDvxRpU6jAZcC6GMQfc0AH4uwKg=";
};
nativeBuildInputs = lib.optional stdenv.isDarwin shortenPerlShebang;
postInstall = lib.optionalString stdenv.isDarwin ''
nativeBuildInputs = lib.optional stdenv.hostPlatform.isDarwin shortenPerlShebang;
postInstall = lib.optionalString stdenv.hostPlatform.isDarwin ''
shortenPerlShebang $out/bin/exiftool
'';
};

View file

@ -411,7 +411,7 @@ let
};
};
pythonEnv = testPython.withPackages (ps: [ ps.my-editable ]);
pythonEnv = myPython.withPackages (ps: [ ps.my-editable ]);
in pkgs.mkShell {
packages = [ pythonEnv ];
@ -1306,7 +1306,7 @@ for example:
] ++ lib.optionals (pythonAtLeast "3.8") [
# broken due to python3.8 async changes
"async"
] ++ lib.optionals stdenv.isDarwin [
] ++ lib.optionals stdenv.buildPlatform.isDarwin [
# can fail when building with other packages
"socket"
];

View file

@ -22,7 +22,7 @@ Some common issues when packaging software for Darwin:
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
makeFlags = lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib";
makeFlags = lib.optional stdenv.hostPlatform.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib";
}
```

View file

@ -5,7 +5,7 @@
let
inherit (builtins) head length;
inherit (lib.trivial) isInOldestRelease mergeAttrs warn warnIf;
inherit (lib.trivial) oldestSupportedReleaseIsAtLeast mergeAttrs warn warnIf;
inherit (lib.strings) concatStringsSep concatMapStringsSep escapeNixIdentifier sanitizeDerivationName;
inherit (lib.lists) foldr foldl' concatMap elemAt all partition groupBy take foldl;
in
@ -2137,6 +2137,6 @@ rec {
"lib.zip is a deprecated alias of lib.zipAttrsWith." zipAttrsWith;
# DEPRECATED
cartesianProductOfSets = warnIf (isInOldestRelease 2405)
cartesianProductOfSets = warnIf (oldestSupportedReleaseIsAtLeast 2405)
"lib.cartesianProductOfSets is a deprecated alias of lib.cartesianProduct." cartesianProduct;
}

View file

@ -73,7 +73,7 @@ let
inherit (self.trivial) id const pipe concat or and xor bitAnd bitOr bitXor
bitNot boolToString mergeAttrs flip mapNullable inNixShell isFloat min max
importJSON importTOML warn warnIf warnIfNot throwIf throwIfNot checkListOfEnum
info showWarnings nixpkgsVersion version isInOldestRelease
info showWarnings nixpkgsVersion version isInOldestRelease oldestSupportedReleaseIsAtLeast
mod compare splitByAndCompare seq deepSeq lessThan add sub
functionArgs setFunctionArgs isFunction toFunction mirrorFunctionArgs
fromHexString toHexString toBaseDigits inPureEvalMode isBool isInt pathExists

View file

@ -670,7 +670,7 @@ lib.mapAttrs mkLicense ({
# Intel's license, seems free
iasl = {
spdxId = "Intel-ACPI";
fullName = "iASL";
fullName = "Intel ACPI Software License Agreement";
url = "https://old.calculate-linux.org/packages/licenses/iASL";
};
@ -889,7 +889,7 @@ lib.mapAttrs mkLicense ({
spdxId = "MIT";
fullName = "MIT License";
};
# https://spdx.org/licenses/MIT-feh.html
mit-feh = {
spdxId = "MIT-feh";
fullName = "feh License";
@ -1097,7 +1097,7 @@ lib.mapAttrs mkLicense ({
};
purdueBsd = {
fullName = " Purdue BSD-Style License"; # also know as lsof license
fullName = "Purdue BSD-Style License"; # also known as lsof license
url = "https://enterprise.dejacode.com/licenses/public/purdue-bsd";
};

View file

@ -23,7 +23,7 @@ let
isAttrs
isBool
isFunction
isInOldestRelease
oldestSupportedReleaseIsAtLeast
isList
isString
length
@ -1030,7 +1030,7 @@ let
mkForce = mkOverride 50;
mkVMOverride = mkOverride 10; # used by nixos-rebuild build-vm
defaultPriority = warnIf (isInOldestRelease 2305) "lib.modules.defaultPriority is deprecated, please use lib.modules.defaultOverridePriority instead." defaultOverridePriority;
defaultPriority = warnIf (oldestSupportedReleaseIsAtLeast 2305) "lib.modules.defaultPriority is deprecated, please use lib.modules.defaultOverridePriority instead." defaultOverridePriority;
mkFixStrictness = warn "lib.mkFixStrictness has no effect and will be removed. It returns its argument unmodified, so you can just remove any calls." id;
@ -1146,8 +1146,8 @@ let
}: doRename {
inherit from to;
visible = false;
warn = isInOldestRelease sinceRelease;
use = warnIf (isInOldestRelease sinceRelease)
warn = oldestSupportedReleaseIsAtLeast sinceRelease;
use = warnIf (oldestSupportedReleaseIsAtLeast sinceRelease)
"Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
};

View file

@ -256,15 +256,15 @@ let
in {
pathType = lib.warnIf (lib.isInOldestRelease 2305)
pathType = lib.warnIf (lib.oldestSupportedReleaseIsAtLeast 2305)
"lib.sources.pathType has been moved to lib.filesystem.pathType."
lib.filesystem.pathType;
pathIsDirectory = lib.warnIf (lib.isInOldestRelease 2305)
pathIsDirectory = lib.warnIf (lib.oldestSupportedReleaseIsAtLeast 2305)
"lib.sources.pathIsDirectory has been moved to lib.filesystem.pathIsDirectory."
lib.filesystem.pathIsDirectory;
pathIsRegularFile = lib.warnIf (lib.isInOldestRelease 2305)
pathIsRegularFile = lib.warnIf (lib.oldestSupportedReleaseIsAtLeast 2305)
"lib.sources.pathIsRegularFile has been moved to lib.filesystem.pathIsRegularFile."
lib.filesystem.pathIsRegularFile;

View file

@ -2272,7 +2272,7 @@ rec {
isCoercibleToString :: a -> bool
```
*/
isCoercibleToString = lib.warnIf (lib.isInOldestRelease 2305)
isCoercibleToString = lib.warnIf (lib.oldestSupportedReleaseIsAtLeast 2305)
"lib.strings.isCoercibleToString is deprecated in favor of either isStringLike or isConvertibleWithToString. Only use the latter if it needs to return true for null, numbers, booleans and list of similarly coercibles."
isConvertibleWithToString;

View file

@ -26,6 +26,8 @@ rec {
cooperlake = [ "sse3" "ssse3" "sse4_1" "sse4_2" "aes" "avx" "avx2" "avx512" "fma" ];
tigerlake = [ "sse3" "ssse3" "sse4_1" "sse4_2" "aes" "avx" "avx2" "avx512" "fma" ];
alderlake = [ "sse3" "ssse3" "sse4_1" "sse4_2" "aes" "avx" "avx2" "fma" ];
sapphirerapids = [ "sse3" "ssse3" "sse4_1" "sse4_2" "aes" "avx" "avx2" "avx512" "fma" ];
emeraldrapids = [ "sse3" "ssse3" "sse4_1" "sse4_2" "aes" "avx" "avx2" "avx512" "fma" ];
# x86_64 AMD
btver1 = [ "sse3" "ssse3" "sse4_1" "sse4_2" ];
btver2 = [ "sse3" "ssse3" "sse4_1" "sse4_2" "aes" "avx" ];
@ -73,6 +75,8 @@ rec {
cascadelake = [ "cannonlake" ] ++ inferiors.cannonlake;
cooperlake = [ "cascadelake" ] ++ inferiors.cascadelake;
tigerlake = [ "icelake-server" ] ++ inferiors.icelake-server;
sapphirerapids = [ "tigerlake" ] ++ inferiors.tigerlake;
emeraldrapids = [ "sapphirerapids" ] ++ inferiors.sapphirerapids;
# CX16 does not exist on alderlake, while it does on nearly all other intel CPUs
alderlake = [ ];

View file

@ -397,6 +397,15 @@ in {
Set it to the upcoming release, matching the nixpkgs/.version file.
*/
isInOldestRelease =
lib.warnIf (lib.oldestSupportedReleaseIsAtLeast 2411)
"lib.isInOldestRelease is deprecated. Use lib.oldestSupportedReleaseIsAtLeast instead."
lib.oldestSupportedReleaseIsAtLeast;
/**
Alias for `isInOldestRelease` introduced in 24.11.
Use `isInOldestRelease` in expressions outside of Nixpkgs for greater compatibility.
*/
oldestSupportedReleaseIsAtLeast =
release:
release <= lib.trivial.oldestSupportedRelease;

View file

@ -905,6 +905,18 @@
githubId = 217050;
name = "Albert Chae";
};
albertodvp = {
email = "alberto.fanton@protonmail.com";
github = "albertodvp";
githubId = 16022854;
matrix = "@albertodvp:matrix.org";
name = "Alberto Fanton";
keys = [
{
fingerprint = "63FD 3A4F 4832 946C B808 8E3C C852 4052 69E7 A087";
}
];
};
aldoborrero = {
email = "aldoborrero+nixos@pm.me";
github = "aldoborrero";
@ -8832,6 +8844,13 @@
name = "Luna Perego";
keys = [ { fingerprint = "09E4 B981 9B93 5B0C 0B91 1274 0578 7332 9217 08FF"; } ];
};
hustlerone = {
email = "nine-ball@tutanota.com";
matrix = "@hustlerone:matrix.org";
github = "hustlerone";
name = "Hustler One";
githubId = 167621692;
};
huyngo = {
email = "huyngo@disroot.org";
github = "Huy-Ngo";
@ -9470,6 +9489,13 @@
githubId = 7558482;
name = "Jack Gerrits";
};
jacobkoziej = {
name = "Jacob Koziej";
email = "jacobkoziej@gmail.com";
github = "jacobkoziej";
githubId = 45084216;
keys = [ { fingerprint = "1BF9 8D10 E0D0 0B41 5723 5836 4C13 3A84 E646 9228"; } ];
};
jaduff = {
email = "jdduffpublic@proton.me";
github = "jaduff";
@ -11192,7 +11218,7 @@
name = "kintrix";
};
kinzoku = {
email = "kinzokudev4869@gmail.com";
email = "kinzoku@the-nebula.xyz";
github = "kinzoku-dev";
githubId = 140647311;
name = "Ayman Hamza";
@ -12572,6 +12598,12 @@
githubId = 2486026;
name = "Luca Fulchir";
};
lukts30 = {
email = "llukas21307@gmail.com";
github = "lukts30";
githubId = 24390575;
name = "lukts30";
};
luleyleo = {
email = "git@leopoldluley.de";
github = "luleyleo";
@ -12683,6 +12715,12 @@
githubId = 3044438;
name = "Lucas Savva";
};
m1dugh = {
email = "romain103paris@gmail.com";
name = "Romain LE MIERE";
github = "m1dugh";
githubId = 42266017;
};
ma27 = {
email = "maximilian@mbosch.me";
matrix = "@ma27:nicht-so.sexy";
@ -13885,6 +13923,13 @@
name = "Mark Vainomaa";
keys = [ { fingerprint = "DB43 2895 CF68 F0CE D4B7 EF60 DA01 5B05 B5A1 1B22"; } ];
};
mikut = {
email = "mikut@mikut.dev";
github = "Mikutut";
githubId = 65046942;
name = "Marcin Mikuła";
keys = [ { fingerprint = "5547 2A56 AC30 69C9 15C8 B98D 997F 71FA 1D74 6E37"; } ];
};
milahu = {
email = "milahu@gmail.com";
github = "milahu";
@ -20755,6 +20800,12 @@
githubId = 18656090;
name = "Yuki Takagi";
};
takeda = {
name = "Derek Kuliński";
email = "d@kulinski.us";
github = "takeda";
githubId = 411978;
};
taketwo = {
email = "alexandrov88@gmail.com";
github = "taketwo";

View file

@ -252,7 +252,7 @@ In addition to numerous new and updated packages, this release has the following
}
```
- The default module options for [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall), [services.tmate-ssh-server.openFirewall](#opt-services.tmate-ssh-server.openFirewall) and [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) have been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
- The default module options for [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall), [services.tmate-ssh-server.openFirewall](#opt-services.tmate-ssh-server.openFirewall) and `services.unifi-video.openFirewall` have been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
- The option `i18n.inputMethod.fcitx5.enableRimeData` has been removed. Default RIME data is now included in `fcitx5-rime` by default, and can be customized using

View file

@ -54,15 +54,23 @@
was added through the `boot.initrd.systemd.dmVerity` option.
- The [Xen Project Hypervisor](https://xenproject.org) is once again available as a virtualisation option under [`virtualisation.xen`](#opt-virtualisation.xen.enable).
- This release includes Xen [4.17.5](https://wiki.xenproject.org/wiki/Xen_Project_4.17_Release_Notes), [4.18.3](https://wiki.xenproject.org/wiki/Xen_Project_4.18_Release_Notes) and [4.19.0](https://wiki.xenproject.org/wiki/Xen_Project_4.19_Release_Notes), as well as support for booting the hypervisor on EFI systems.
- This release includes Xen [4.19.0](https://wiki.xenproject.org/wiki/Xen_Project_4.19_Release_Notes) and support for booting the hypervisor on EFI systems.
::: {.warning}
Booting into the Xen Project Hypervisor through a legacy BIOS bootloader or with the legacy script-based Stage 1 initrd have been **deprecated**. Only EFI booting and the new systemd-based Stage 1 initrd are supported.
:::
- There are two flavours of Xen available by default: `xen`, which includes all built-in components, and `xen-slim`, which replaces the built-in components with their Nixpkgs equivalents.
- The `qemu-xen-traditional` component has been deprecated by the upstream Xen Project, and is no longer available in any of the Xen Project Hypervisor packages.
- The `qemu-xen-traditional` component has been deprecated by the upstream Xen Project, and is no longer included in the Xen build.
- The OCaml-based Xen Store can now be configured using [`virtualisation.xen.store.settings`](#opt-virtualisation.xen.store.settings).
- The `virtualisation.xen.bridge` options have been deprecated in this release cycle. Users who need network bridges are encouraged to set up their own networking configurations.
- A new option [`systemd.enableStrictShellChecks`](#opt-systemd.enableStrictShellChecks) has been added. When enabled, all systemd scripts generated by NixOS will
be checked with [shellcheck](https://www.shellcheck.net) and any errors or warnings will cause the build to fail.
This affects all scripts that have been created through the `script`, `reload`, `preStart`, `postStart`, `preStop` and `postStop` options for systemd services.
This does not affect commandlines passed directly to `ExecStart`, `ExecReload`, `ExecStartPre`, `ExecStartPost`, `ExecStop` or `ExecStopPost`.
It therefore also does not affect systemd units that are coming from packages and that are not defined through the NixOS config.
This option is disabled by default, and although some services have already been fixed, it is still likely that you will encounter build failures when enabling this.
We encourage people to enable this option when they are willing and able to submit fixes for potential build failures to nixpkgs.
The option can also be enabled or disabled for individual services using the `enableStrictShellChecks` option on the service itself, which will take precedence over the global setting.
## New Modules {#sec-release-24.11-new-modules}
- [TaskChampion Sync-Server](https://github.com/GothenburgBitFactory/taskchampion-sync-server), a [Taskwarrior 3](https://taskwarrior.org/docs/upgrade-3/) sync server, replacing Taskwarrior 2's sync server named [`taskserver`](https://github.com/GothenburgBitFactory/taskserver).
@ -187,6 +195,12 @@
- `transmission-gtk`: `~/.config/transmission`
- `transmission-daemon` using NixOS module: `${config.services.transmission.home}/.config/transmission-daemon` (defaults to `/var/lib/transmission/.config/transmission-daemon`)
- The default `mongodb` version has been updated from 5.0 to 7.0.
For more information, see the compatibility changes for MongoDB [6.0](https://www.mongodb.com/docs/manual/release-notes/6.0-compatibility/) and [7.0](https://www.mongodb.com/docs/manual/release-notes/7.0-compatibility/).
- `unifi` has been updated to UniFi 8.
`unifi7` was removed as it is vulnerable to CVE-2024-42025 and required a version of MongoDB that has reached end of life.
- `androidenv.androidPkgs_9_0` has been removed, and replaced with `androidenv.androidPkgs` for a more complete Android SDK including support for Android 9 and later.
- `grafana` has been updated to version 11.1. This version doesn't support setting `http_addr` to a hostname anymore, an IP address is expected.
@ -213,6 +227,8 @@
- `buildbot` was updated to 4.0, the AngularJS frontend has been replaced by a React frontend, see the [upstream release notes](https://docs.buildbot.net/current/manual/upgrading/4.0-upgrade.html).
- `headscale` has been updated to version 0.23.0 which reworked large parts of the configuration including DNS, Magic DNS prefixes and ACL policy files. See the [upstream changelog](https://github.com/juanfont/headscale/releases/tag/v0.23.0) for details.
- `nginx` package no longer includes `gd` and `geoip` dependencies. For enabling it, override `nginx` package with the optionals `withImageFilter` and `withGeoIP`.
- `systemd.enableUnifiedCgroupHierarchy` option has been removed.
@ -484,8 +500,13 @@
- `ffmpeg_5` has been removed. Please use the unversioned `ffmpeg`,
pin a newer version, or if necessary pin `ffmpeg_4` for compatibility.
- The `rss-bridge` service drops the support to load a configuration file from `${config.services.rss-bridge.dataDir}/config.ini.php`.
Consider using the `services.rss-bridge.config` option instead.
- The `xdg.portal.gtkUsePortal` option has been removed, as it had been deprecated for over 2 years. Using the `GTK_USE_PORTAL` environment variable in this manner is not intended nor encouraged by the GTK developers, but can still be done manually via `environment.sessionVariables`.
- Support for the legacy CUPS browsing and LDAP have been removed from `services.printing`. If `cups` or `ldap` are in the `BrowseRemoteProtocols` setting in `services.printing.browsedConf`, it needs to be removed.
- The `services.trust-dns` module has been renamed to `services.hickory-dns`.
- The option `services.prometheus.exporters.pgbouncer.connectionStringFile` has been removed since
@ -496,6 +517,8 @@
- The `lsh` package and the `services.lshd` module have been removed as they had no maintainer in Nixpkgs and hadnt seen an upstream release in over a decade. It is recommended to migrate to `openssh` and `services.openssh`.
- `ceph` has been upgraded to v19. See the [Ceph "squid" release notes](https://docs.ceph.com/en/latest/releases/squid/#v19-2-0-squid) for details and recommended upgrade procedure.
- `opencv2` and `opencv3` have been removed, as they are obsolete and
were not used by any other package. External users are encouraged to
migrate to OpenCV 4.
@ -533,6 +556,8 @@
- Compatible string matching for `hardware.deviceTree.overlays` has been changed to a more correct behavior. See [below](#sec-release-24.11-migration-dto-compatible) for details.
- The `rustic` package was upgrade to `0.9.0`, which contains [breaking changes to the config file format](https://github.com/rustic-rs/rustic/releases/tag/v0.9.0).
## Other Notable Changes {#sec-release-24.11-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -565,6 +590,8 @@
- `nixosTests` now provide a working IPv6 setup for VLAN 1 by default.
- `services.dhcpcd` is now started with additional systemd sandbox/hardening options for better security. When using `networking.dhcpcd.runHook` these settings are not applied.
- Kanidm can now be provisioned using the new [`services.kanidm.provision`] option, but requires using a patched version available via `pkgs.kanidm.withSecretProvisioning`.
- Kanidm previously had an incorrect systemd service type, causing dependent units with an `after` and `requires` directive to start before `kanidm*` finished startup. The module has now been updated in line with upstream recommendations.

View file

@ -386,18 +386,27 @@ in rec {
''}
''; # */
makeJobScript = name: text:
makeJobScript = { name, text, enableStrictShellChecks }:
let
scriptName = replaceStrings [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
out = (pkgs.writeShellScriptBin scriptName ''
set -e
${text}
'').overrideAttrs (_: {
out = (
if ! enableStrictShellChecks then
pkgs.writeShellScriptBin scriptName ''
set -e
${text}
''
else
pkgs.writeShellApplication {
name = scriptName;
inherit text;
}
).overrideAttrs (_: {
# The derivation name is different from the script file name
# to keep the script file name short to avoid cluttering logs.
name = "unit-script-${scriptName}";
});
in "${out}/bin/${scriptName}";
in lib.getExe out;
unitConfig = { config, name, options, ... }: {
config = {
@ -448,10 +457,16 @@ in rec {
};
};
serviceConfig = { name, config, ... }: {
serviceConfig =
let
nixosConfig = config;
in
{ name, lib, config, ... }: {
config = {
name = "${name}.service";
environment.PATH = mkIf (config.path != []) "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
enableStrictShellChecks = lib.mkOptionDefault nixosConfig.systemd.enableStrictShellChecks;
};
};

View file

@ -17,6 +17,7 @@ let
concatMap
filterOverrides
isList
literalExpression
mergeEqualOption
mkIf
mkMerge
@ -357,6 +358,14 @@ in rec {
'';
};
enableStrictShellChecks = mkOption {
type = types.bool;
description = "Enable running shellcheck on the generated scripts for this unit.";
# The default gets set in systemd-lib.nix because we don't have access to
# the full NixOS config here.
defaultText = literalExpression "config.systemd.enableStrictShellChecks";
};
script = mkOption {
type = types.lines;
default = "";
@ -428,27 +437,51 @@ in rec {
config = mkMerge [
(mkIf (config.preStart != "") rec {
jobScripts = makeJobScript "${name}-pre-start" config.preStart;
jobScripts = makeJobScript {
name = "${name}-pre-start";
text = config.preStart;
inherit (config) enableStrictShellChecks;
};
serviceConfig.ExecStartPre = [ jobScripts ];
})
(mkIf (config.script != "") rec {
jobScripts = makeJobScript "${name}-start" config.script;
jobScripts = makeJobScript {
name = "${name}-start";
text = config.script;
inherit (config) enableStrictShellChecks;
};
serviceConfig.ExecStart = jobScripts + " " + config.scriptArgs;
})
(mkIf (config.postStart != "") rec {
jobScripts = (makeJobScript "${name}-post-start" config.postStart);
jobScripts = makeJobScript {
name = "${name}-post-start";
text = config.postStart;
inherit (config) enableStrictShellChecks;
};
serviceConfig.ExecStartPost = [ jobScripts ];
})
(mkIf (config.reload != "") rec {
jobScripts = makeJobScript "${name}-reload" config.reload;
jobScripts = makeJobScript {
name = "${name}-reload";
text = config.reload;
inherit (config) enableStrictShellChecks;
};
serviceConfig.ExecReload = jobScripts;
})
(mkIf (config.preStop != "") rec {
jobScripts = makeJobScript "${name}-pre-stop" config.preStop;
jobScripts = makeJobScript {
name = "${name}-pre-stop";
text = config.preStop;
inherit (config) enableStrictShellChecks;
};
serviceConfig.ExecStop = jobScripts;
})
(mkIf (config.postStop != "") rec {
jobScripts = makeJobScript "${name}-post-stop" config.postStop;
jobScripts = makeJobScript {
name = "${name}-post-stop";
text = config.postStop;
inherit (config) enableStrictShellChecks;
};
serviceConfig.ExecStopPost = jobScripts;
})
];

View file

@ -151,7 +151,7 @@ in
nodesCompat =
mapAttrs
(name: config: config // {
config = lib.warnIf (lib.isInOldestRelease 2211)
config = lib.warnIf (lib.oldestSupportedReleaseIsAtLeast 2211)
"Module argument `nodes.${name}.config` is deprecated. Use `nodes.${name}` instead."
config;
})

View file

@ -1,8 +1,9 @@
{ config, lib, pkgs, ... }:
{
options.hardware.usbStorage.manageStartStop = lib.mkOption {
options.hardware.usbStorage.manageShutdown = lib.mkOption {
type = lib.types.bool;
default = true;
default = false;
description = ''
Enable this option to gracefully spin-down external storage during shutdown.
If you suspect improper head parking after poweroff, install `smartmontools` and check
@ -10,9 +11,11 @@
'';
};
config = lib.mkIf config.hardware.usbStorage.manageStartStop {
config = lib.mkIf config.hardware.usbStorage.manageShutdown {
services.udev.extraRules = ''
ACTION=="add|change", SUBSYSTEM=="scsi_disk", DRIVERS=="usb-storage", ATTR{manage_system_start_stop}="1"
ACTION=="add|change", SUBSYSTEM=="scsi_disk", DRIVERS=="usb-storage|uas", ATTR{manage_shutdown}="1"
'';
};
imports = [(lib.mkRenamedOptionModule [ "hardware" "usbStorage" "manageStartStop" ] [ "hardware" "usbStorage" "manageShutdown" ])];
}

View file

@ -1231,6 +1231,7 @@
./services/networking/syncthing.nix
./services/networking/tailscale.nix
./services/networking/tailscale-auth.nix
./services/networking/tailscale-derper.nix
./services/networking/tayga.nix
./services/networking/tcpcrypt.nix
./services/networking/teamspeak3.nix
@ -1375,7 +1376,6 @@
./services/video/mirakurun.nix
./services/video/photonvision.nix
./services/video/mediamtx.nix
./services/video/unifi-video.nix
./services/video/v4l2-relayd.nix
./services/wayland/cage.nix
./services/wayland/hypridle.nix

View file

@ -105,13 +105,7 @@ in
# The linux builder is a lightweight VM for remote building; not evaluation.
nix.channel.enable = false;
# remote builder uses `nix-daemon` (ssh-ng:) or `nix-store --serve` (ssh:)
# --force: do not complain when missing
# TODO: install a store-only nix
# https://github.com/NixOS/rfcs/blob/master/rfcs/0134-nix-store-layer.md#detailed-design
environment.extraSetup = ''
rm --force $out/bin/{nix-instantiate,nix-build,nix-shell,nix-prefetch*,nix}
'';
# Deployment is by image.
# TODO system.switch.enable = false;?
system.disableInstallerTools = true;

View file

@ -74,19 +74,13 @@ in {
wantedBy = [ "basic.target" ];
serviceConfig.Type = "oneshot";
script = ''
umask u=rw
nncpCfgDir=$(mktemp --directory nncp.XXX)
for f in ${jsonCfgFile} ${builtins.toString config.programs.nncp.secrets}; do
tmpdir=$(mktemp --directory nncp.XXX)
nncp-cfgdir -cfg $f -dump $tmpdir
find $tmpdir -size 1c -delete
cp -a $tmpdir/* $nncpCfgDir/
rm -rf $tmpdir
done
nncp-cfgdir -load $nncpCfgDir > ${nncpCfgFile}
rm -rf $nncpCfgDir
umask 127
rm -f ${nncpCfgFile}
for f in ${jsonCfgFile} ${builtins.toString config.programs.nncp.secrets}
do
${lib.getExe pkgs.hjson-go} -c <"$f"
done |${lib.getExe pkgs.jq} --slurp add >${nncpCfgFile}
chgrp ${programCfg.group} ${nncpCfgFile}
chmod g+r ${nncpCfgFile}
'';
};
};

View file

@ -108,6 +108,7 @@ in
used instead.
'')
(mkRemovedOptionModule [ "services" "tvheadend" ] "The tvheadend package and the corresponding module have been removed as nobody was willing to maintain them and they were stuck on an unmaintained version that required FFmpeg 4; please see https://github.com/NixOS/nixpkgs/pull/332259 if you are interested in maintaining a newer version.")
(mkRemovedOptionModule [ "services" "unifi-video" ] "The unifi-video package and the corresponding module have been removed as the software has been unsupported since 2021 and requires a MongoDB version that has reached end of life.")
(mkRemovedOptionModule [ "services" "venus" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "wakeonlan"] "This module was removed in favor of enabling it with networking.interfaces.<name>.wakeOnLan")
(mkRemovedOptionModule [ "services" "winstone" ] "The corresponding package was removed from nixpkgs.")

View file

@ -44,10 +44,9 @@ in {
telephony-service
teleports
]);
variables = {
# To override the keyboard layouts in Lomiri
NIXOS_XKB_LAYOUTS = config.services.xserver.xkb.layout;
};
# To override the default keyboard layout in Lomiri
etc.${pkgs.lomiri.lomiri.passthru.etcLayoutsFile}.text = lib.strings.replaceStrings [","] ["\n"] config.services.xserver.xkb.layout;
};
hardware = {

View file

@ -207,14 +207,14 @@ in
config = lib.mkIf enableDHCP {
assertions = [ {
# dhcpcd doesn't start properly with malloc ∉ [ libc scudo ]
# dhcpcd doesn't start properly with malloc ∉ [ jemalloc libc mimalloc scudo ]
# see https://github.com/NixOS/nixpkgs/issues/151696
assertion =
dhcpcd.enablePrivSep
-> lib.elem config.environment.memoryAllocator.provider [ "libc" "scudo" ];
-> lib.elem config.environment.memoryAllocator.provider [ "jemalloc" "libc" "mimalloc" "scudo" ];
message = ''
dhcpcd with privilege separation is incompatible with chosen system malloc.
Currently only the `libc` and `scudo` allocators are known to work.
Currently `graphene-hardened` allocator is known to be broken.
To disable dhcpcd's privilege separation, overlay Nixpkgs and override dhcpcd
to set `enablePrivSep = false`.
'';
@ -251,6 +251,39 @@ in
ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${lib.optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always";
} // lib.optionalAttrs (cfg.runHook == "") {
# Proc filesystem
ProcSubset = "all";
ProtectProc = "invisible";
# Access write directories
UMask = "0027";
# Capabilities
CapabilityBoundingSet = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" "CAP_SETGID" "CAP_SETUID" "CAP_SYS_CHROOT" ];
# Security
NoNewPrivileges = true;
# Sandboxing
ProtectSystem = true;
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = false;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = false;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" "AF_PACKET" ];
RestrictNamespaces = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "chroot" "gettid" "setgroups" "setuid" ];
};
};

View file

@ -3,25 +3,33 @@
lib,
pkgs,
...
}:
with lib; let
}: let
cfg = config.services.headscale;
dataDir = "/var/lib/headscale";
runDir = "/run/headscale";
cliConfig = {
# Turn off update checks since the origin of our package
# is nixpkgs and not Github.
disable_check_updates = true;
unix_socket = "${runDir}/headscale.sock";
};
settingsFormat = pkgs.formats.yaml {};
configFile = settingsFormat.generate "headscale.yaml" cfg.settings;
cliConfigFile = settingsFormat.generate "headscale.yaml" cliConfig;
in {
options = {
services.headscale = {
enable = mkEnableOption "headscale, Open Source coordination server for Tailscale";
enable = lib.mkEnableOption "headscale, Open Source coordination server for Tailscale";
package = mkPackageOption pkgs "headscale" { };
package = lib.mkPackageOption pkgs "headscale" {};
user = mkOption {
user = lib.mkOption {
default = "headscale";
type = types.str;
type = lib.types.str;
description = ''
User account under which headscale runs.
@ -33,9 +41,9 @@ in {
'';
};
group = mkOption {
group = lib.mkOption {
default = "headscale";
type = types.str;
type = lib.types.str;
description = ''
Group under which headscale runs.
@ -47,8 +55,8 @@ in {
'';
};
address = mkOption {
type = types.str;
address = lib.mkOption {
type = lib.types.str;
default = "127.0.0.1";
description = ''
Listening address of headscale.
@ -56,8 +64,8 @@ in {
example = "0.0.0.0";
};
port = mkOption {
type = types.port;
port = lib.mkOption {
type = lib.types.port;
default = 8080;
description = ''
Listening port of headscale.
@ -65,18 +73,33 @@ in {
example = 443;
};
settings = mkOption {
settings = lib.mkOption {
description = ''
Overrides to {file}`config.yaml` as a Nix attribute set.
Check the [example config](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
for possible options.
'';
type = types.submodule {
type = lib.types.submodule {
freeformType = settingsFormat.type;
imports = with lib; [
(mkAliasOptionModule ["acl_policy_path"] ["policy" "path"])
(mkAliasOptionModule ["db_host"] ["database" "postgres" "host"])
(mkAliasOptionModule ["db_name"] ["database" "postgres" "name"])
(mkAliasOptionModule ["db_password_file"] ["database" "postgres" "password_file"])
(mkAliasOptionModule ["db_path"] ["database" "sqlite" "path"])
(mkAliasOptionModule ["db_port"] ["database" "postgres" "port"])
(mkAliasOptionModule ["db_type"] ["database" "type"])
(mkAliasOptionModule ["db_user"] ["database" "postgres" "user"])
(mkAliasOptionModule ["dns_config" "base_domain"] ["dns" "base_domain"])
(mkAliasOptionModule ["dns_config" "domains"] ["dns" "search_domains"])
(mkAliasOptionModule ["dns_config" "magic_dns"] ["dns" "magic_dns"])
(mkAliasOptionModule ["dns_config" "nameservers"] ["dns" "nameservers" "global"])
];
options = {
server_url = mkOption {
type = types.str;
server_url = lib.mkOption {
type = lib.types.str;
default = "http://127.0.0.1:8080";
description = ''
The url clients will connect to.
@ -84,25 +107,49 @@ in {
example = "https://myheadscale.example.com:443";
};
private_key_path = mkOption {
type = types.path;
default = "${dataDir}/private.key";
description = ''
Path to private key file, generated automatically if it does not exist.
'';
};
noise.private_key_path = mkOption {
type = types.path;
noise.private_key_path = lib.mkOption {
type = lib.types.path;
default = "${dataDir}/noise_private.key";
description = ''
Path to noise private key file, generated automatically if it does not exist.
'';
};
prefixes = let
prefDesc = ''
Each prefix consists of either an IPv4 or IPv6 address,
and the associated prefix length, delimited by a slash.
It must be within IP ranges supported by the Tailscale
client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
'';
in {
v4 = lib.mkOption {
type = lib.types.str;
default = "100.64.0.0/10";
description = prefDesc;
};
v6 = lib.mkOption {
type = lib.types.str;
default = "fd7a:115c:a1e0::/48";
description = prefDesc;
};
allocation = lib.mkOption {
type = lib.types.enum ["sequential" "random"];
example = "random";
default = "sequential";
description = ''
Strategy used for allocation of IPs to nodes, available options:
- sequential (default): assigns the next free IP from the previous given IP.
- random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
'';
};
};
derp = {
urls = mkOption {
type = types.listOf types.str;
urls = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = ["https://controlplane.tailscale.com/derpmap/default"];
description = ''
List of urls containing DERP maps.
@ -110,8 +157,8 @@ in {
'';
};
paths = mkOption {
type = types.listOf types.path;
paths = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
description = ''
List of file paths containing DERP maps.
@ -119,8 +166,8 @@ in {
'';
};
auto_update_enable = mkOption {
type = types.bool;
auto_update_enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to automatically update DERP maps on a set frequency.
@ -128,18 +175,26 @@ in {
example = false;
};
update_frequency = mkOption {
type = types.str;
update_frequency = lib.mkOption {
type = lib.types.str;
default = "24h";
description = ''
Frequency to update DERP maps.
'';
example = "5m";
};
server.private_key_path = lib.mkOption {
type = lib.types.path;
default = "${dataDir}/derp_server_private.key";
description = ''
Path to derp private key file, generated automatically if it does not exist.
'';
};
};
ephemeral_node_inactivity_timeout = mkOption {
type = types.str;
ephemeral_node_inactivity_timeout = lib.mkOption {
type = lib.types.str;
default = "30m";
description = ''
Time before an inactive ephemeral node is deleted.
@ -147,104 +202,100 @@ in {
example = "5m";
};
db_type = mkOption {
type = types.enum ["sqlite3" "postgres"];
example = "postgres";
default = "sqlite3";
description = "Database engine to use.";
};
db_host = mkOption {
type = types.nullOr types.str;
default = null;
example = "127.0.0.1";
description = "Database host address.";
};
db_port = mkOption {
type = types.nullOr types.port;
default = null;
example = 3306;
description = "Database host port.";
};
db_name = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = "Database name.";
};
db_user = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = "Database user.";
};
db_password_file = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/keys/headscale-dbpassword";
description = ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
db_path = mkOption {
type = types.nullOr types.str;
default = "${dataDir}/db.sqlite";
description = "Path to the sqlite3 database file.";
};
log.level = mkOption {
type = types.str;
default = "info";
description = ''
headscale log level.
'';
example = "debug";
};
log.format = mkOption {
type = types.str;
default = "text";
description = ''
headscale log format.
'';
example = "json";
};
dns_config = {
nameservers = mkOption {
type = types.listOf types.str;
default = ["1.1.1.1"];
database = {
type = lib.mkOption {
type = lib.types.enum ["sqlite" "sqlite3" "postgres"];
example = "postgres";
default = "sqlite";
description = ''
List of nameservers to pass to Tailscale clients.
Database engine to use.
Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
All new development, testing and optimisations are done with SQLite in mind.
'';
};
override_local_dns = mkOption {
type = types.bool;
default = false;
description = ''
Whether to use [Override local DNS](https://tailscale.com/kb/1054/dns/).
'';
example = true;
sqlite = {
path = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = "${dataDir}/db.sqlite";
description = "Path to the sqlite3 database file.";
};
write_ahead_log = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Enable WAL mode for SQLite. This is recommended for production environments.
https://www.sqlite.org/wal.html
'';
example = true;
};
};
domains = mkOption {
type = types.listOf types.str;
default = [];
postgres = {
host = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "127.0.0.1";
description = "Database host address.";
};
port = lib.mkOption {
type = lib.types.nullOr lib.types.port;
default = null;
example = 3306;
description = "Database host port.";
};
name = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "headscale";
description = "Database name.";
};
user = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "headscale";
description = "Database user.";
};
password_file = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/keys/headscale-dbpassword";
description = ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
};
};
log = {
level = lib.mkOption {
type = lib.types.str;
default = "info";
description = ''
Search domains to inject to Tailscale clients.
headscale log level.
'';
example = ["mydomain.internal"];
example = "debug";
};
magic_dns = mkOption {
type = types.bool;
format = lib.mkOption {
type = lib.types.str;
default = "text";
description = ''
headscale log format.
'';
example = "json";
};
};
dns = {
magic_dns = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
@ -253,8 +304,8 @@ in {
example = false;
};
base_domain = mkOption {
type = types.str;
base_domain = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
Defines the base domain to create the hostnames for MagicDNS.
@ -264,11 +315,30 @@ in {
`myhost.mynamespace.example.com`).
'';
};
nameservers = {
global = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
List of nameservers to pass to Tailscale clients.
'';
};
};
search_domains = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
Search domains to inject to Tailscale clients.
'';
example = ["mydomain.internal"];
};
};
oidc = {
issuer = mkOption {
type = types.str;
issuer = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
URL to OpenID issuer.
@ -276,33 +346,33 @@ in {
example = "https://openid.example.com";
};
client_id = mkOption {
type = types.str;
client_id = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
OpenID Connect client ID.
'';
};
client_secret_path = mkOption {
type = types.nullOr types.str;
client_secret_path = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
Path to OpenID Connect client secret file. Expands environment variables in format ''${VAR}.
'';
};
scope = mkOption {
type = types.listOf types.str;
scope = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = ["openid" "profile" "email"];
description = ''
Scopes used in the OIDC flow.
'';
};
extra_params = mkOption {
type = types.attrsOf types.str;
default = { };
extra_params = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = {};
description = ''
Custom query parameters to send with the Authorize Endpoint request.
'';
@ -311,27 +381,27 @@ in {
};
};
allowed_domains = mkOption {
type = types.listOf types.str;
default = [ ];
allowed_domains = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
Allowed principal domains. if an authenticated user's domain
is not in this list authentication request will be rejected.
'';
example = [ "example.com" ];
example = ["example.com"];
};
allowed_users = mkOption {
type = types.listOf types.str;
default = [ ];
allowed_users = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
Users allowed to authenticate even if not in allowedDomains.
'';
example = [ "alice@example.com" ];
example = ["alice@example.com"];
};
strip_email_domain = mkOption {
type = types.bool;
strip_email_domain = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether the domain part of the email address should be removed when generating namespaces.
@ -339,16 +409,16 @@ in {
};
};
tls_letsencrypt_hostname = mkOption {
type = types.nullOr types.str;
tls_letsencrypt_hostname = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = "";
description = ''
Domain name to request a TLS certificate for.
'';
};
tls_letsencrypt_challenge_type = mkOption {
type = types.enum ["TLS-ALPN-01" "HTTP-01"];
tls_letsencrypt_challenge_type = lib.mkOption {
type = lib.types.enum ["TLS-ALPN-01" "HTTP-01"];
default = "HTTP-01";
description = ''
Type of ACME challenge to use, currently supported types:
@ -356,8 +426,8 @@ in {
'';
};
tls_letsencrypt_listen = mkOption {
type = types.nullOr types.str;
tls_letsencrypt_listen = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = ":http";
description = ''
When HTTP-01 challenge is chosen, letsencrypt must set up a
@ -366,28 +436,40 @@ in {
'';
};
tls_cert_path = mkOption {
type = types.nullOr types.path;
tls_cert_path = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
Path to already created certificate.
'';
};
tls_key_path = mkOption {
type = types.nullOr types.path;
tls_key_path = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
Path to key for already created certificate.
'';
};
acl_policy_path = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path to a file containing ACL policies.
'';
policy = {
mode = lib.mkOption {
type = lib.types.enum ["file" "database"];
default = "file";
description = ''
The mode can be "file" or "database" that defines
where the ACL policies are stored and read from.
'';
};
path = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
If the mode is set to "file", the path to a
HuJSON file containing ACL policies.
'';
};
};
};
};
@ -395,67 +477,49 @@ in {
};
};
imports = [
# TODO address + port = listen_addr
(mkRenamedOptionModule ["services" "headscale" "serverUrl"] ["services" "headscale" "settings" "server_url"])
(mkRenamedOptionModule ["services" "headscale" "privateKeyFile"] ["services" "headscale" "settings" "private_key_path"])
(mkRenamedOptionModule ["services" "headscale" "derp" "urls"] ["services" "headscale" "settings" "derp" "urls"])
(mkRenamedOptionModule ["services" "headscale" "derp" "paths"] ["services" "headscale" "settings" "derp" "paths"])
imports = with lib; [
(mkRenamedOptionModule ["services" "headscale" "derp" "autoUpdate"] ["services" "headscale" "settings" "derp" "auto_update_enable"])
(mkRenamedOptionModule ["services" "headscale" "derp" "paths"] ["services" "headscale" "settings" "derp" "paths"])
(mkRenamedOptionModule ["services" "headscale" "derp" "updateFrequency"] ["services" "headscale" "settings" "derp" "update_frequency"])
(mkRenamedOptionModule ["services" "headscale" "derp" "urls"] ["services" "headscale" "settings" "derp" "urls"])
(mkRenamedOptionModule ["services" "headscale" "ephemeralNodeInactivityTimeout"] ["services" "headscale" "settings" "ephemeral_node_inactivity_timeout"])
(mkRenamedOptionModule ["services" "headscale" "database" "type"] ["services" "headscale" "settings" "db_type"])
(mkRenamedOptionModule ["services" "headscale" "database" "path"] ["services" "headscale" "settings" "db_path"])
(mkRenamedOptionModule ["services" "headscale" "database" "host"] ["services" "headscale" "settings" "db_host"])
(mkRenamedOptionModule ["services" "headscale" "database" "port"] ["services" "headscale" "settings" "db_port"])
(mkRenamedOptionModule ["services" "headscale" "database" "name"] ["services" "headscale" "settings" "db_name"])
(mkRenamedOptionModule ["services" "headscale" "database" "user"] ["services" "headscale" "settings" "db_user"])
(mkRenamedOptionModule ["services" "headscale" "database" "passwordFile"] ["services" "headscale" "settings" "db_password_file"])
(mkRenamedOptionModule ["services" "headscale" "logLevel"] ["services" "headscale" "settings" "log" "level"])
(mkRenamedOptionModule ["services" "headscale" "dns" "nameservers"] ["services" "headscale" "settings" "dns_config" "nameservers"])
(mkRenamedOptionModule ["services" "headscale" "dns" "domains"] ["services" "headscale" "settings" "dns_config" "domains"])
(mkRenamedOptionModule ["services" "headscale" "dns" "magicDns"] ["services" "headscale" "settings" "dns_config" "magic_dns"])
(mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
(mkRenamedOptionModule ["services" "headscale" "serverUrl"] ["services" "headscale" "settings" "server_url"])
(mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
(mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
(mkRemovedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ''
Headscale no longer uses domain_map. If you're using an old version of headscale you can still set this option via services.headscale.settings.oidc.domain_map.
'')
];
config = mkIf cfg.enable {
services.headscale.settings = {
listen_addr = mkDefault "${cfg.address}:${toString cfg.port}";
config = lib.mkIf cfg.enable {
services.headscale.settings = lib.mkMerge [
cliConfig
{
listen_addr = lib.mkDefault "${cfg.address}:${toString cfg.port}";
# Turn off update checks since the origin of our package
# is nixpkgs and not Github.
disable_check_updates = true;
unix_socket = "${runDir}/headscale.sock";
tls_letsencrypt_cache_dir = "${dataDir}/.cache";
};
tls_letsencrypt_cache_dir = "${dataDir}/.cache";
}
];
environment = {
# Setup the headscale configuration in a known path in /etc to
# allow both the Server and the Client use it to find the socket
# for communication.
etc."headscale/config.yaml".source = configFile;
# Headscale CLI needs a minimal config to be able to locate the unix socket
# to talk to the server instance.
etc."headscale/config.yaml".source = cliConfigFile;
systemPackages = [ cfg.package ];
systemPackages = [cfg.package];
};
users.groups.headscale = mkIf (cfg.group == "headscale") {};
users.groups.headscale = lib.mkIf (cfg.group == "headscale") {};
users.users.headscale = mkIf (cfg.user == "headscale") {
users.users.headscale = lib.mkIf (cfg.user == "headscale") {
description = "headscale user";
home = dataDir;
group = cfg.group;
@ -464,23 +528,20 @@ in {
systemd.services.headscale = {
description = "headscale coordination server for Tailscale";
wants = [ "network-online.target" ];
wants = ["network-online.target"];
after = ["network-online.target"];
wantedBy = ["multi-user.target"];
restartTriggers = [configFile];
environment.GIN_MODE = "release";
script = ''
${optionalString (cfg.settings.db_password_file != null) ''
export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})"
${lib.optionalString (cfg.settings.database.postgres.password_file != null) ''
export HEADSCALE_DATABASE_POSTGRES_PASS="$(head -n1 ${lib.escapeShellArg cfg.settings.database.postgres.password_file})"
''}
exec ${cfg.package}/bin/headscale serve
exec ${lib.getExe cfg.package} serve --config ${configFile}
'';
serviceConfig = let
capabilityBoundingSet = ["CAP_CHOWN"] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
capabilityBoundingSet = ["CAP_CHOWN"] ++ lib.optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
in {
Restart = "always";
Type = "simple";
@ -525,5 +586,5 @@ in {
};
};
meta.maintainers = with maintainers; [kradalby misterio77];
meta.maintainers = with lib.maintainers; [kradalby misterio77];
}

View file

@ -0,0 +1,132 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.tailscale.derper;
in
{
meta.maintainers = with lib.maintainers; [ SuperSandro2000 ];
options = {
services.tailscale.derper = {
enable = lib.mkEnableOption "Tailscale Derper. See upstream doc <https://tailscale.com/kb/1118/custom-derp-servers> how to configure it on clients";
domain = lib.mkOption {
type = lib.types.str;
description = "Domain name under which the derper server is reachable.";
};
openFirewall = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to open the firewall for the specified port.
Derper requires the used ports to be opened, otherwise it doesn't work as expected.
'';
};
package = lib.mkPackageOption pkgs [
"tailscale"
"derper"
] { };
stunPort = lib.mkOption {
type = lib.types.port;
default = 3478;
description = ''
STUN port to listen on.
See online docs <https://tailscale.com/kb/1118/custom-derp-servers#prerequisites> on how to configure a different external port.
'';
};
port = lib.mkOption {
type = lib.types.port;
default = 8010;
description = "The port the derper process will listen on. This is not the port tailscale will connect to.";
};
verifyClients = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to verify clients against a locally running tailscale daemon if they are allowed to connect to this node or not.
'';
};
};
};
config = lib.mkIf cfg.enable {
networking.firewall = lib.mkIf cfg.openFirewall {
# port 80 and 443 are opened by nginx already
allowedUDPPorts = [ cfg.stunPort ];
};
services = {
nginx = {
enable = true;
upstreams.tailscale-derper = {
servers."127.0.0.1:${toString cfg.port}" = { };
extraConfig = ''
keepalive 64;
'';
};
virtualHosts."${cfg.domain}" = {
addSSL = true; # this cannot be forceSSL as derper sends some information over port 80, too.
locations."/" = {
proxyPass = "http://tailscale-derper";
proxyWebsockets = true;
extraConfig = ''
keepalive_timeout 0;
proxy_buffering off;
'';
};
};
};
tailscale.enable = lib.mkIf cfg.verifyClients true;
};
systemd.services.tailscale-derper = {
serviceConfig = {
ExecStart =
"${lib.getExe' cfg.package "derper"} -a :${toString cfg.port} -c /var/lib/derper/derper.key -hostname=${cfg.domain} -stun-port ${toString cfg.stunPort}"
+ lib.optionalString cfg.verifyClients " -verify-clients";
DynamicUser = true;
Restart = "always";
RestartSec = "5sec"; # don't crash loop immediately
StateDirectory = "derper";
Type = "simple";
CapabilityBoundingSet = [ "" ];
DeviceAllow = null;
LockPersonality = true;
NoNewPrivileges = true;
MemoryDenyWriteExecute = true;
PrivateDevices = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" ];
};
wantedBy = [ "multi-user.target" ];
};
};
}

View file

@ -2,8 +2,8 @@
let
cfg = config.services.unifi;
stateDir = "/var/lib/unifi";
cmd = lib.escapeShellArgs ([ "@${cfg.jrePackage}/bin/java" "java" ]
++ lib.optionals (lib.versionAtLeast (lib.getVersion cfg.jrePackage) "16") [
cmd = lib.escapeShellArgs ([
"@${cfg.jrePackage}/bin/java" "java"
"--add-opens=java.base/java.lang=ALL-UNNAMED"
"--add-opens=java.base/java.time=ALL-UNNAMED"
"--add-opens=java.base/sun.security.util=ALL-UNNAMED"
@ -27,24 +27,19 @@ in
'';
};
services.unifi.jrePackage = lib.mkOption {
type = lib.types.package;
default = if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3") then pkgs.jdk11 else pkgs.jre8;
defaultText = lib.literalExpression ''if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3" then pkgs.jdk11 else pkgs.jre8'';
description = ''
The JRE package to use. Check the release notes to ensure it is supported.
services.unifi.jrePackage = lib.mkPackageOption pkgs "jdk" {
default = "jdk17_headless";
extraDescription = ''
Check the UniFi controller release notes to ensure it is supported.
'';
};
services.unifi.unifiPackage = lib.mkPackageOption pkgs "unifi5" { };
services.unifi.unifiPackage = lib.mkPackageOption pkgs "unifi" {
default = "unifi8";
};
services.unifi.mongodbPackage = lib.mkPackageOption pkgs "mongodb" {
default = "mongodb-5_0";
extraDescription = ''
::: {.note}
unifi7 officially only supports mongodb up until 4.4 but works with 5.0.
:::
'';
default = "mongodb-7_0";
};
services.unifi.openFirewall = lib.mkOption {
@ -92,6 +87,29 @@ in
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = lib.versionAtLeast config.system.stateVersion "24.11"
|| (
options.services.unifi.unifiPackage.highestPrio < (lib.mkOptionDefault { }).priority
&& options.services.unifi.mongodbPackage.highestPrio < (lib.mkOptionDefault { }).priority
);
message = ''
Support for UniFi < 8 has been dropped; please explicitly set
`services.unifi.unifiPackage` and `services.unifi.mongodbPackage`.
Note that the previous default MongoDB version was 5.0 and MongoDB
only supports migrating one major version at a time; therefore, you
may wish to set `services.unifi.mongodbPackage = pkgs.mongodb-6_0;`
and activate your configuration before upgrading again to the default
`mongodb-7_0` supported by `unifi8`.
For more information, see the MongoDB upgrade notes:
<https://www.mongodb.com/docs/manual/release-notes/7.0-upgrade-standalone/#upgrade-recommendations-and-checklists>
'';
}
];
users.users.unifi = {
isSystemUser = true;
group = "unifi";

View file

@ -184,8 +184,8 @@ in
type = types.bool;
default = false;
description = ''
Whether to open the firewall for TCP/UDP ports specified in
listenAdrresses option.
Whether to open the firewall for TCP ports specified in
listenAddresses option.
'';
};
@ -493,7 +493,6 @@ in
listenPorts = parsePorts cfg.listenAddresses;
in mkIf cfg.openFirewall {
allowedTCPPorts = listenPorts;
allowedUDPPorts = listenPorts;
};
};

View file

@ -1,252 +0,0 @@
{ config, lib, options, pkgs, utils, ... }:
with lib;
let
cfg = config.services.unifi-video;
opt = options.services.unifi-video;
mainClass = "com.ubnt.airvision.Main";
cmd = ''
${pkgs.jsvc}/bin/jsvc \
-cwd ${stateDir} \
-debug \
-verbose:class \
-nodetach \
-user unifi-video \
-home ${cfg.jrePackage}/lib/openjdk \
-cp ${pkgs.commonsDaemon}/share/java/commons-daemon-1.2.4.jar:${stateDir}/lib/airvision.jar \
-pidfile ${cfg.pidFile} \
-procname unifi-video \
-Djava.security.egd=file:/dev/./urandom \
-Xmx${toString cfg.maximumJavaHeapSize}M \
-Xss512K \
-XX:+UseG1GC \
-XX:+UseStringDeduplication \
-XX:MaxMetaspaceSize=768M \
-Djava.library.path=${stateDir}/lib \
-Djava.awt.headless=true \
-Djavax.net.ssl.trustStore=${stateDir}/etc/ufv-truststore \
-Dfile.encoding=UTF-8 \
-Dav.tempdir=/var/cache/unifi-video
'';
mongoConf = pkgs.writeTextFile {
name = "mongo.conf";
executable = false;
text = ''
# for documentation of all options, see https://www.mongodb.com/docs/manual/reference/configuration-options/
storage:
dbPath: ${cfg.dataDir}/db
journal:
enabled: true
syncPeriodSecs: 60
systemLog:
destination: file
logAppend: true
path: ${stateDir}/logs/mongod.log
net:
port: 7441
bindIp: 127.0.0.1
http:
enabled: false
operationProfiling:
slowOpThresholdMs: 500
mode: off
'';
};
mongoWtConf = pkgs.writeTextFile {
name = "mongowt.conf";
executable = false;
text = ''
# for documentation of all options, see:
# https://www.mongodb.com/docs/manual/reference/configuration-options/
storage:
dbPath: ${cfg.dataDir}/db-wt
journal:
enabled: true
wiredTiger:
engineConfig:
cacheSizeGB: 1
systemLog:
destination: file
logAppend: true
path: logs/mongod.log
net:
port: 7441
bindIp: 127.0.0.1
operationProfiling:
slowOpThresholdMs: 500
mode: off
'';
};
stateDir = "/var/lib/unifi-video";
in
{
options.services.unifi-video = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether or not to enable the unifi-video service.
'';
};
jrePackage = mkPackageOption pkgs "jre8" { };
unifiVideoPackage = mkPackageOption pkgs "unifi-video" { };
mongodbPackage = mkPackageOption pkgs "mongodb" {
default = "mongodb-5_0";
};
logDir = mkOption {
type = types.str;
default = "${stateDir}/logs";
description = ''
Where to store the logs.
'';
};
dataDir = mkOption {
type = types.str;
default = "${stateDir}/data";
description = ''
Where to store the database and other data.
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Whether or not to open the required ports on the firewall.
'';
};
maximumJavaHeapSize = mkOption {
type = types.nullOr types.int;
default = 1024;
example = 4096;
description = ''
Set the maximum heap size for the JVM in MB.
'';
};
pidFile = mkOption {
type = types.path;
default = "${cfg.dataDir}/unifi-video.pid";
defaultText = literalExpression ''"''${config.${opt.dataDir}}/unifi-video.pid"'';
description = "Location of unifi-video pid file.";
};
};
config = mkIf cfg.enable {
warnings = optional
(options.services.unifi-video.openFirewall.highestPrio >= (mkOptionDefault null).priority)
"The current services.unifi-video.openFirewall = true default is deprecated and will change to false in 22.11. Set it explicitly to silence this warning.";
users.users.unifi-video = {
description = "UniFi Video controller daemon user";
home = stateDir;
group = "unifi-video";
isSystemUser = true;
};
users.groups.unifi-video = {};
networking.firewall = mkIf cfg.openFirewall {
# https://help.ui.com/hc/en-us/articles/217875218-UniFi-Video-Ports-Used
allowedTCPPorts = [
7080 # HTTP portal
7443 # HTTPS portal
7445 # Video over HTTP (mobile app)
7446 # Video over HTTPS (mobile app)
7447 # RTSP via the controller
7442 # Camera management from cameras to NVR over WAN
];
allowedUDPPorts = [
6666 # Inbound camera streams sent over WAN
];
};
systemd.tmpfiles.rules = [
"d '${stateDir}' 0700 unifi-video unifi-video - -"
"d '/var/cache/unifi-video' 0700 unifi-video unifi-video - -"
"d '${stateDir}/logs' 0700 unifi-video unifi-video - -"
"C '${stateDir}/etc' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/etc"
"C '${stateDir}/webapps' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/webapps"
"C '${stateDir}/email' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/email"
"C '${stateDir}/fw' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/fw"
"C '${stateDir}/lib' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/lib"
"d '${stateDir}/data' 0700 unifi-video unifi-video - -"
"d '${stateDir}/data/db' 0700 unifi-video unifi-video - -"
"C '${stateDir}/data/system.properties' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/etc/system.properties"
"d '${stateDir}/bin' 0700 unifi-video unifi-video - -"
"f '${stateDir}/bin/evostreamms' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/evostreamms"
"f '${stateDir}/bin/libavcodec.so.54' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/libavcodec.so.54"
"f '${stateDir}/bin/libavformat.so.54' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/libavformat.so.54"
"f '${stateDir}/bin/libavutil.so.52' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/libavutil.so.52"
"f '${stateDir}/bin/ubnt.avtool' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/ubnt.avtool"
"f '${stateDir}/bin/ubnt.updater' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/ubnt.updater"
"C '${stateDir}/bin/mongo' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongo"
"C '${stateDir}/bin/mongod' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongod"
"C '${stateDir}/bin/mongoperf' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongoperf"
"C '${stateDir}/bin/mongos' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongos"
"d '${stateDir}/conf' 0700 unifi-video unifi-video - -"
"C '${stateDir}/conf/evostream' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/evostream"
"Z '${stateDir}/conf/evostream' 0700 unifi-video unifi-video - -"
"L+ '${stateDir}/conf/mongodv3.0+.conf' 0700 unifi-video unifi-video - ${mongoConf}"
"L+ '${stateDir}/conf/mongodv3.6+.conf' 0700 unifi-video unifi-video - ${mongoConf}"
"L+ '${stateDir}/conf/mongod-wt.conf' 0700 unifi-video unifi-video - ${mongoWtConf}"
"L+ '${stateDir}/conf/catalina.policy' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/catalina.policy"
"L+ '${stateDir}/conf/catalina.properties' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/catalina.properties"
"L+ '${stateDir}/conf/context.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/context.xml"
"L+ '${stateDir}/conf/logging.properties' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/logging.properties"
"L+ '${stateDir}/conf/server.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/server.xml"
"L+ '${stateDir}/conf/tomcat-users.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/tomcat-users.xml"
"L+ '${stateDir}/conf/web.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/web.xml"
];
systemd.services.unifi-video = {
description = "UniFi Video NVR daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ] ;
unitConfig.RequiresMountsFor = stateDir;
# Make sure package upgrades trigger a service restart
restartTriggers = [ cfg.unifiVideoPackage cfg.mongodbPackage ];
path = with pkgs; [ gawk coreutils busybox which jre8 lsb-release libcap util-linux ];
serviceConfig = {
Type = "simple";
ExecStart = "${(removeSuffix "\n" cmd)} ${mainClass} start";
ExecStop = "${(removeSuffix "\n" cmd)} stop ${mainClass} stop";
Restart = "on-failure";
UMask = "0077";
User = "unifi-video";
WorkingDirectory = "${stateDir}";
};
};
};
imports = [
(mkRenamedOptionModule [ "services" "unifi-video" "openPorts" ] [ "services" "unifi-video" "openFirewall" ])
];
meta.maintainers = with lib.maintainers; [ rsynnest ];
}

View file

@ -10,7 +10,7 @@ in
options.services.node-red = {
enable = mkEnableOption "the Node-RED service";
package = mkPackageOption pkgs [ "nodePackages" "node-red" ] { };
package = mkPackageOption pkgs [ "node-red" ] { };
openFirewall = mkOption {
type = types.bool;
@ -31,8 +31,8 @@ in
configFile = mkOption {
type = types.path;
default = "${cfg.package}/lib/node_modules/node-red/settings.js";
defaultText = literalExpression ''"''${package}/lib/node_modules/node-red/settings.js"'';
default = "${cfg.package}/lib/node_modules/node-red/packages/node_modules/node-red/settings.js";
defaultText = literalExpression ''"''${package}/lib/node_modules/node-red/packages/node_modules/node-red/settings.js"'';
description = ''
Path to the JavaScript configuration file.
See <https://github.com/node-red/node-red/blob/master/packages/node_modules/node-red/settings.js>

View file

@ -5,7 +5,6 @@ let
poolName = "rss-bridge";
configAttr = lib.recursiveUpdate { FileCache.path = "${cfg.dataDir}/cache/"; } cfg.config;
cfgHalf = lib.mapAttrsRecursive (path: value: let
envName = lib.toUpper ("RSSBRIDGE_" + lib.concatStringsSep "_" path);
envValue = if lib.isList value then
@ -14,7 +13,7 @@ let
lib.boolToString value
else
toString value;
in "fastcgi_param \"${envName}\" \"${envValue}\";") configAttr;
in if (value != null) then "fastcgi_param \"${envName}\" \"${envValue}\";" else null) cfg.config;
cfgEnv = lib.concatStringsSep "\n" (lib.collect lib.isString cfgHalf);
in
{
@ -70,9 +69,26 @@ in
};
config = mkOption {
type = with types; attrsOf (attrsOf (oneOf [ bool int str (listOf str) ]));
default = {};
defaultText = options.literalExpression "FileCache.path = \"\${config.services.rss-bridge.dataDir}/cache/\"";
type = types.submodule {
freeformType = (pkgs.formats.ini {}).type;
options = {
system = {
enabled_bridges = mkOption {
type = with types; nullOr (either str (listOf str));
description = "Only enabled bridges are available for feed production";
default = null;
};
};
FileCache = {
path = mkOption {
type = types.str;
description = "Directory where to store cache files (if cache.type = \"file\").";
default = "${cfg.dataDir}/cache/";
defaultText = options.literalExpression "\${config.services.rss-bridge.dataDir}/cache/";
};
};
};
};
example = options.literalExpression ''
{
system.enabled_bridges = [ "*" ];
@ -112,15 +128,13 @@ in
};
};
};
systemd.tmpfiles.settings.rss-bridge = let
perm = {
mode = "0750";
user = cfg.user;
group = cfg.group;
};
in {
"${configAttr.FileCache.path}".d = perm;
"${cfg.dataDir}/config.ini.php".z = perm;
systemd.tmpfiles.settings.rss-bridge = {
"${cfg.config.FileCache.path}".d = {
mode = "0750";
user = cfg.user;
group = cfg.group;
};
};
services.nginx = mkIf (cfg.virtualHost != null) {
@ -139,7 +153,6 @@ in
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:${config.services.phpfpm.pools.${cfg.pool}.socket};
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param RSSBRIDGE_DATA ${cfg.dataDir};
${cfgEnv}
'';
};

View file

@ -234,11 +234,12 @@ in
system.activationScripts.var = ""; # obsolete
systemd.tmpfiles.rules = [
"D /var/empty 0555 root root -"
"h /var/empty - - - - +i"
] ++ lib.optionals config.nix.enable [
# Prevent the current configuration from being garbage-collected.
"d /nix/var/nix/gcroots -"
"L+ /nix/var/nix/gcroots/current-system - - - - /run/current-system"
"D /var/empty 0555 root root -"
"h /var/empty - - - - +i"
];
system.activationScripts.usrbinenv = if config.environment.usrbinenv != null

View file

@ -197,6 +197,8 @@ in
package = mkPackageOption pkgs "systemd" {};
enableStrictShellChecks = mkEnableOption "running shellcheck on the generated scripts for systemd units.";
units = mkOption {
description = "Definition of systemd units; see {manpage}`systemd.unit(5)`.";
default = {};

View file

@ -281,15 +281,19 @@ in
) cfg.settings);
systemd.tmpfiles.rules = [
"d /nix/var 0755 root root - -"
"L+ /nix/var/nix/gcroots/booted-system 0755 root root - /run/booted-system"
"d /run/lock 0755 root root - -"
"d /var/db 0755 root root - -"
"L /var/lock - - - - ../run/lock"
# Boot-time cleanup
] ++ lib.optionals config.nix.enable [
"d /nix/var 0755 root root - -"
"L+ /nix/var/nix/gcroots/booted-system 0755 root root - /run/booted-system"
]
# Boot-time cleanup
++ [
"R! /etc/group.lock - - - - -"
"R! /etc/passwd.lock - - - - -"
"R! /etc/shadow.lock - - - - -"
] ++ lib.optionals config.nix.enable [
"R! /nix/var/nix/gcroots/tmp - - - - -"
"R! /nix/var/nix/temproots - - - - -"
];

View file

@ -1,27 +1,85 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.boot.initrd.unl0kr;
settingsFormat = pkgs.formats.ini { };
in
{
options.boot.initrd.unl0kr = {
enable = lib.mkEnableOption "unl0kr in initrd" // {
description = ''Whether to enable the unl0kr on-screen keyboard in initrd to unlock LUKS.'';
};
allowVendorDrivers = lib.mkEnableOption "load optional drivers" // {
description = ''Whether to load additional drivers for certain vendors (I.E: Wacom, Intel, etc.)'';
};
settings = lib.mkOption {
description = ''
Whether to enable the unl0kr on-screen keyboard in initrd to unlock LUKS.
Configuration for `unl0kr`.
See `unl0kr.conf(5)` for supported values.
Alternatively, visit `https://gitlab.com/postmarketOS/buffybox/-/blob/unl0kr-2.0.0/unl0kr.conf`
'';
example = lib.literalExpression ''
{
general.animations = true;
theme = {
default = "pmos-dark";
alternate = "pmos-light";
};
}
'';
default = { };
type = lib.types.submodule { freeformType = settingsFormat.type; };
};
};
config = lib.mkIf cfg.enable {
meta.maintainers = [];
meta.maintainers = with lib.maintainers; [ hustlerone ];
assertions = [
{
assertion = cfg.enable -> config.boot.initrd.systemd.enable;
message = "boot.initrd.unl0kr is only supported with boot.initrd.systemd.";
}
{
assertion = !config.boot.plymouth.enable;
message = "unl0kr will not work if plymouth is enabled.";
}
{
assertion = !config.hardware.amdgpu.initrd.enable;
message = "unl0kr has issues with video drivers that are loaded on stage 1.";
}
];
boot.initrd.availableKernelModules =
lib.optionals cfg.enable [
"hid-multitouch"
"hid-generic"
"usbhid"
"i2c-designware-core"
"i2c-designware-platform"
"i2c-hid-acpi"
"usbtouchscreen"
"evdev"
]
++ lib.optionals cfg.allowVendorDrivers [
"intel_lpss_pci"
"elo"
"wacom"
];
boot.initrd.systemd = {
contents."/etc/unl0kr.conf".source = settingsFormat.generate "unl0kr.conf" cfg.settings;
storePaths = with pkgs; [
"${pkgs.gnugrep}/bin/grep"
libinput
@ -42,9 +100,7 @@ in
"systemd-vconsole-setup.service"
"udev.service"
];
before = [
"shutdown.target"
];
before = [ "shutdown.target" ];
script = ''
# This script acts as a Password Agent: https://systemd.io/PASSWORD_AGENTS/
@ -56,7 +112,7 @@ in
do
for file in `ls $DIR/ask.*`; do
socket="$(cat "$file" | ${pkgs.gnugrep}/bin/grep "Socket=" | cut -d= -f2)"
${pkgs.unl0kr}/bin/unl0kr | ${config.boot.initrd.systemd.package}/lib/systemd/systemd-reply-password 1 "$socket"
${pkgs.unl0kr}/bin/unl0kr -v -C "/etc/unl0kr.conf" | ${config.boot.initrd.systemd.package}/lib/systemd/systemd-reply-password 1 "$socket"
done
done
'';

View file

@ -33,7 +33,8 @@ with lib;
if ! [ -e /root/.ssh/authorized_keys ]; then
echo "obtaining SSH key..."
mkdir -m 0700 -p /root/.ssh
mkdir -p /root/.ssh
chown 0700 /root/.ssh
if [ -s /etc/ec2-metadata/public-keys-0-openssh-key ]; then
(umask 177; cat /etc/ec2-metadata/public-keys-0-openssh-key >> /root/.ssh/authorized_keys)
echo "new key added to authorized_keys"
@ -45,19 +46,20 @@ with lib;
# generate one normally.
userData=/etc/ec2-metadata/user-data
mkdir -m 0755 -p /etc/ssh
mkdir -p /etc/ssh
chown 0755 /etc/ssh
if [ -s "$userData" ]; then
key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' $userData)"
key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' $userData)"
if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
if [ -n "$key" ] && [ -n "$key_pub" ] && [ ! -e /etc/ssh/ssh_host_dsa_key ]; then
(umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
fi
key="$(sed 's/|/\n/g; s/SSH_HOST_ED25519_KEY://; t; d' $userData)"
key_pub="$(sed 's/SSH_HOST_ED25519_KEY_PUB://; t; d' $userData)"
if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_ed25519_key ]; then
if [ -n "$key" ] && [ -n "$key_pub" ] && [ ! -e /etc/ssh/ssh_host_ed25519_key ]; then
(umask 077; echo "$key" > /etc/ssh/ssh_host_ed25519_key)
echo "$key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
fi
@ -79,7 +81,7 @@ with lib;
# ec2-get-console-output.
echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
for i in /etc/ssh/ssh_host_*_key.pub; do
${config.programs.ssh.package}/bin/ssh-keygen -l -f $i || true > /dev/console
${config.programs.ssh.package}/bin/ssh-keygen -l -f "$i" || true > /dev/console
done
echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
'';

View file

@ -1,5 +1,6 @@
metaDir=/etc/ec2-metadata
mkdir -m 0755 -p "$metaDir"
mkdir -p "$metaDir"
chown 0755 "$metaDir"
rm -f "$metaDir/*"
get_imds_token() {
@ -40,7 +41,7 @@ while [ $try -le 3 ]; do
sleep 1
done
if [ "x$IMDS_TOKEN" == "x" ]; then
if [ "$IMDS_TOKEN" == "" ]; then
echo "failed to fetch an IMDS2v token."
fi

View file

@ -301,6 +301,27 @@ in
'';
};
shutdownTimeout = mkOption {
type = types.ints.unsigned;
default = 300;
description = ''
Number of seconds we're willing to wait for a guest to shut down.
If parallel shutdown is enabled, this timeout applies as a timeout
for shutting down all guests on a single URI defined in the variable URIS.
If this is 0, then there is no time out (use with caution, as guests might not
respond to a shutdown request).
'';
};
startDelay = mkOption {
type = types.ints.unsigned;
default = 0;
description = ''
Number of seconds to wait between each guest start.
If set to 0, all guests will start up in parallel.
'';
};
allowedBridges = mkOption {
type = types.listOf types.str;
default = [ "virbr0" ];
@ -495,6 +516,8 @@ in
environment.ON_BOOT = "${cfg.onBoot}";
environment.ON_SHUTDOWN = "${cfg.onShutdown}";
environment.PARALLEL_SHUTDOWN = "${toString cfg.parallelShutdown}";
environment.SHUTDOWN_TIMEOUT = "${toString cfg.shutdownTimeout}";
environment.START_DELAY = "${toString cfg.startDelay}";
};
systemd.sockets.virtlogd = {

View file

@ -8,6 +8,35 @@
}:
let
inherit (builtins) readFile;
inherit (lib.modules) mkRemovedOptionModule mkRenamedOptionModule mkIf;
inherit (lib.options)
mkOption
mkEnableOption
literalExpression
mkPackageOption
;
inherit (lib.types)
listOf
str
ints
lines
enum
path
submodule
addCheck
float
bool
int
nullOr
;
inherit (lib.lists) optional optionals;
inherit (lib.strings) hasSuffix optionalString;
inherit (lib.meta) getExe;
inherit (lib.attrsets) optionalAttrs;
inherit (lib.trivial) boolToString;
inherit (lib.teams.xen) members;
cfg = config.virtualisation.xen;
xenBootBuilder = pkgs.writeShellApplication {
@ -22,7 +51,7 @@ let
gnused
jq
])
++ lib.lists.optionals (cfg.efi.bootBuilderVerbosity == "info") (
++ optionals (cfg.efi.bootBuilderVerbosity == "info") (
with pkgs;
[
bat
@ -36,12 +65,12 @@ let
# We disable SC2016 because we don't want to expand the regexes in the sed commands.
excludeShellChecks = [ "SC2016" ];
text = builtins.readFile ./xen-boot-builder.sh;
text = readFile ./xen-boot-builder.sh;
};
in
{
imports = with lib.modules; [
imports = [
(mkRemovedOptionModule
[
"virtualisation"
@ -123,59 +152,33 @@ in
options.virtualisation.xen = {
enable = lib.options.mkEnableOption "the Xen Project Hypervisor, a virtualisation technology defined as a *type-1 hypervisor*, which allows multiple virtual machines, known as *domains*, to run concurrently on the physical machine. NixOS runs as the privileged *Domain 0*. This option requires a reboot into a Xen kernel to take effect";
enable = mkEnableOption "the Xen Project Hypervisor, a virtualisation technology defined as a *type-1 hypervisor*, which allows multiple virtual machines, known as *domains*, to run concurrently on the physical machine. NixOS runs as the privileged *Domain 0*. This option requires a reboot into a Xen kernel to take effect";
debug = lib.options.mkEnableOption "Xen debug features for Domain 0. This option enables some hidden debugging tests and features, and should not be used in production";
debug = mkEnableOption "Xen debug features for Domain 0. This option enables some hidden debugging tests and features, and should not be used in production";
trace = lib.options.mkOption {
type = lib.types.bool;
trace = mkOption {
type = bool;
default = cfg.debug;
defaultText = lib.options.literalExpression "false";
defaultText = literalExpression "false";
example = true;
description = "Whether to enable Xen debug tracing and logging for Domain 0.";
};
package = lib.options.mkOption {
type = lib.types.package;
default = pkgs.xen;
defaultText = lib.options.literalExpression "pkgs.xen";
example = lib.options.literalExpression "pkgs.xen-slim";
description = ''
The package used for Xen Project Hypervisor.
'';
relatedPackages = [
"xen"
"xen-slim"
];
};
package = mkPackageOption pkgs "Xen Hypervisor" { default = [ "xen" ]; };
qemu = {
package = lib.options.mkOption {
type = lib.types.package;
default = pkgs.xen;
defaultText = lib.options.literalExpression "pkgs.xen";
example = lib.options.literalExpression "pkgs.qemu_xen";
description = ''
The package with QEMU binaries that runs in Domain 0
and virtualises the unprivileged domains.
'';
relatedPackages = [
"xen"
{
name = "qemu_xen";
comment = "For use with `pkgs.xen-slim`.";
}
];
package = mkPackageOption pkgs "QEMU (with Xen Hypervisor support)" {
default = [ "qemu_xen" ];
};
pidFile = lib.options.mkOption {
type = lib.types.path;
pidFile = mkOption {
type = path;
default = "/run/xen/qemu-dom0.pid";
example = "/var/run/xen/qemu-dom0.pid";
description = "Path to the QEMU PID file.";
};
};
bootParams = lib.options.mkOption {
bootParams = mkOption {
default = [ ];
example = ''
[
@ -184,7 +187,7 @@ in
"vga=ask"
]
'';
type = lib.types.listOf lib.types.str;
type = listOf str;
description = ''
Xen Command Line parameters passed to Domain 0 at boot time.
Note: these are different from `boot.kernelParams`. See
@ -193,8 +196,8 @@ in
};
efi = {
bootBuilderVerbosity = lib.options.mkOption {
type = lib.types.enum [
bootBuilderVerbosity = mkOption {
type = enum [
"default"
"info"
"debug"
@ -218,11 +221,11 @@ in
'';
};
path = lib.options.mkOption {
type = lib.types.path;
path = mkOption {
type = path;
default = "${cfg.package.boot}/${cfg.package.efi}";
defaultText = lib.options.literalExpression "\${config.virtualisation.xen.package.boot}/\${config.virtualisation.xen.package.efi}";
example = lib.options.literalExpression "\${config.virtualisation.xen.package}/boot/efi/efi/nixos/xen-\${config.virtualisation.xen.package.version}.efi";
defaultText = literalExpression "\${config.virtualisation.xen.package.boot}/\${config.virtualisation.xen.package.efi}";
example = literalExpression "\${config.virtualisation.xen.package}/boot/efi/efi/nixos/xen-\${config.virtualisation.xen.package.version}.efi";
description = ''
Path to xen.efi. `pkgs.xen` is patched to install the xen.efi file
on `$boot/boot/xen.efi`, but an unpatched Xen build may install it
@ -234,10 +237,10 @@ in
};
dom0Resources = {
maxVCPUs = lib.options.mkOption {
maxVCPUs = mkOption {
default = 0;
example = 4;
type = lib.types.ints.unsigned;
type = ints.unsigned;
description = ''
Amount of virtual CPU cores allocated to Domain 0 on boot.
If set to 0, all cores are assigned to Domain 0, and
@ -245,10 +248,10 @@ in
'';
};
memory = lib.options.mkOption {
memory = mkOption {
default = 0;
example = 512;
type = lib.types.ints.unsigned;
type = ints.unsigned;
description = ''
Amount of memory (in MiB) allocated to Domain 0 on boot.
If set to 0, all memory is assigned to Domain 0, and
@ -256,11 +259,11 @@ in
'';
};
maxMemory = lib.options.mkOption {
maxMemory = mkOption {
default = cfg.dom0Resources.memory;
defaultText = lib.options.literalExpression "config.virtualisation.xen.dom0Resources.memory";
defaultText = literalExpression "config.virtualisation.xen.dom0Resources.memory";
example = 1024;
type = lib.types.ints.unsigned;
type = ints.unsigned;
description = ''
Maximum amount of memory (in MiB) that Domain 0 can
dynamically allocate to itself. Does nothing if set
@ -271,8 +274,8 @@ in
};
domains = {
extraConfig = lib.options.mkOption {
type = lib.types.lines;
extraConfig = mkOption {
type = lines;
default = "";
example = ''
XENDOMAINS_SAVE=/persist/xen/save
@ -288,28 +291,28 @@ in
};
store = {
path = lib.options.mkOption {
type = lib.types.path;
path = mkOption {
type = path;
default = "${cfg.package}/bin/oxenstored";
defaultText = lib.options.literalExpression "\${config.virtualisation.xen.package}/bin/oxenstored";
example = lib.options.literalExpression "\${config.virtualisation.xen.package}/bin/xenstored";
defaultText = literalExpression "\${config.virtualisation.xen.package}/bin/oxenstored";
example = literalExpression "\${config.virtualisation.xen.package}/bin/xenstored";
description = ''
Path to the Xen Store Daemon. This option is useful to
switch between the legacy C-based Xen Store Daemon, and
the newer OCaml-based Xen Store Daemon, `oxenstored`.
'';
};
type = lib.options.mkOption {
type = lib.types.enum [
type = mkOption {
type = enum [
"c"
"ocaml"
];
default = if (lib.strings.hasSuffix "oxenstored" cfg.store.path) then "ocaml" else "c";
default = if (hasSuffix "oxenstored" cfg.store.path) then "ocaml" else "c";
internal = true;
readOnly = true;
description = "Helper internal option that determines the type of the Xen Store Daemon based on cfg.store.path.";
};
settings = lib.options.mkOption {
settings = mkOption {
default = { };
example = {
enableMerge = false;
@ -324,34 +327,34 @@ in
The OCaml-based Xen Store Daemon configuration. This
option does nothing with the C-based `xenstored`.
'';
type = lib.types.submodule {
type = submodule {
options = {
pidFile = lib.options.mkOption {
pidFile = mkOption {
default = "/run/xen/xenstored.pid";
example = "/var/run/xen/xenstored.pid";
type = lib.types.path;
type = path;
description = "Path to the Xen Store Daemon PID file.";
};
testEAGAIN = lib.options.mkOption {
testEAGAIN = mkOption {
default = cfg.debug;
defaultText = lib.options.literalExpression "config.virtualisation.xen.debug";
defaultText = literalExpression "config.virtualisation.xen.debug";
example = true;
type = lib.types.bool;
type = bool;
visible = false;
description = "Randomly fail a transaction with EAGAIN. This option is used for debugging purposes only.";
};
enableMerge = lib.options.mkOption {
enableMerge = mkOption {
default = true;
example = false;
type = lib.types.bool;
type = bool;
description = "Whether to enable transaction merge support.";
};
conflict = {
burstLimit = lib.options.mkOption {
burstLimit = mkOption {
default = 5.0;
example = 15.0;
type = lib.types.addCheck (
lib.types.float
type = addCheck (
float
// {
name = "nonnegativeFloat";
description = "nonnegative floating point number, meaning >=0";
@ -369,12 +372,12 @@ in
domain's requests are ignored.
'';
};
maxHistorySeconds = lib.options.mkOption {
maxHistorySeconds = mkOption {
default = 5.0e-2;
example = 1.0;
type = lib.types.addCheck (
lib.types.float // { description = "nonnegative floating point number, meaning >=0"; }
) (n: n >= 0);
type = addCheck (float // { description = "nonnegative floating point number, meaning >=0"; }) (
n: n >= 0
);
description = ''
Limits applied to domains whose writes cause other domains' transaction
commits to fail. Must include decimal point.
@ -384,10 +387,10 @@ in
is the minimum pause-time during which a domain will be ignored.
'';
};
rateLimitIsAggregate = lib.options.mkOption {
rateLimitIsAggregate = mkOption {
default = true;
example = false;
type = lib.types.bool;
type = bool;
description = ''
If the conflict.rateLimitIsAggregate option is `true`, then after each
tick one point of conflict-credit is given to just one domain: the
@ -408,16 +411,16 @@ in
};
};
perms = {
enable = lib.options.mkOption {
enable = mkOption {
default = true;
example = false;
type = lib.types.bool;
type = bool;
description = "Whether to enable the node permission system.";
};
enableWatch = lib.options.mkOption {
enableWatch = mkOption {
default = true;
example = false;
type = lib.types.bool;
type = bool;
description = ''
Whether to enable the watch permission system.
@ -432,144 +435,142 @@ in
};
};
quota = {
enable = lib.options.mkOption {
enable = mkOption {
default = true;
example = false;
type = lib.types.bool;
type = bool;
description = "Whether to enable the quota system.";
};
maxEntity = lib.options.mkOption {
maxEntity = mkOption {
default = 1000;
example = 1024;
type = lib.types.ints.positive;
type = ints.positive;
description = "Entity limit for transactions.";
};
maxSize = lib.options.mkOption {
maxSize = mkOption {
default = 2048;
example = 4096;
type = lib.types.ints.positive;
type = ints.positive;
description = "Size limit for transactions.";
};
maxWatch = lib.options.mkOption {
maxWatch = mkOption {
default = 100;
example = 256;
type = lib.types.ints.positive;
type = ints.positive;
description = "Maximum number of watches by the Xenstore Watchdog.";
};
transaction = lib.options.mkOption {
transaction = mkOption {
default = 10;
example = 50;
type = lib.types.ints.positive;
type = ints.positive;
description = "Maximum number of transactions.";
};
maxRequests = lib.options.mkOption {
maxRequests = mkOption {
default = 1024;
example = 1024;
type = lib.types.ints.positive;
type = ints.positive;
description = "Maximum number of requests per transaction.";
};
maxPath = lib.options.mkOption {
maxPath = mkOption {
default = 1024;
example = 1024;
type = lib.types.ints.positive;
type = ints.positive;
description = "Path limit for the quota system.";
};
maxOutstanding = lib.options.mkOption {
maxOutstanding = mkOption {
default = 1024;
example = 1024;
type = lib.types.ints.positive;
type = ints.positive;
description = "Maximum outstanding requests, i.e. in-flight requests / domain.";
};
maxWatchEvents = lib.options.mkOption {
maxWatchEvents = mkOption {
default = 1024;
example = 2048;
type = lib.types.ints.positive;
type = ints.positive;
description = "Maximum number of outstanding watch events per watch.";
};
};
persistent = lib.options.mkOption {
persistent = mkOption {
default = false;
example = true;
type = lib.types.bool;
type = bool;
description = "Whether to activate the filed base backend.";
};
xenstored = {
log = {
file = lib.options.mkOption {
file = mkOption {
default = "/var/log/xen/xenstored.log";
example = "/dev/null";
type = lib.types.path;
type = path;
description = "Path to the Xen Store log file.";
};
level = lib.options.mkOption {
level = mkOption {
default = if cfg.trace then "debug" else null;
defaultText = lib.options.literalExpression "if (config.virtualisation.xen.trace == true) then \"debug\" else null";
defaultText = literalExpression "if (config.virtualisation.xen.trace == true) then \"debug\" else null";
example = "error";
type = lib.types.nullOr (
lib.types.enum [
"debug"
"info"
"warn"
"error"
]
);
type = nullOr (enum [
"debug"
"info"
"warn"
"error"
]);
description = "Logging level for the Xen Store.";
};
# The hidden options below have no upstream documentation whatsoever.
# The nb* options appear to alter the log rotation behaviour, and
# the specialOps option appears to affect the Xenbus logging logic.
nbFiles = lib.options.mkOption {
nbFiles = mkOption {
default = 10;
example = 16;
type = lib.types.int;
type = int;
visible = false;
description = "Set `xenstored-log-nb-files`.";
};
};
accessLog = {
file = lib.options.mkOption {
file = mkOption {
default = "/var/log/xen/xenstored-access.log";
example = "/var/log/security/xenstored-access.log";
type = lib.types.path;
type = path;
description = "Path to the Xen Store access log file.";
};
nbLines = lib.options.mkOption {
nbLines = mkOption {
default = 13215;
example = 16384;
type = lib.types.int;
type = int;
visible = false;
description = "Set `access-log-nb-lines`.";
};
nbChars = lib.options.mkOption {
nbChars = mkOption {
default = 180;
example = 256;
type = lib.types.int;
type = int;
visible = false;
description = "Set `acesss-log-nb-chars`.";
};
specialOps = lib.options.mkOption {
specialOps = mkOption {
default = false;
example = true;
type = lib.types.bool;
type = bool;
visible = false;
description = "Set `access-log-special-ops`.";
};
};
xenfs = {
kva = lib.options.mkOption {
kva = mkOption {
default = "/proc/xen/xsd_kva";
example = cfg.store.settings.xenstored.xenfs.kva;
type = lib.types.path;
type = path;
visible = false;
description = ''
Path to the Xen Store Daemon KVA location inside the XenFS pseudo-filesystem.
While it is possible to alter this value, some drivers may be hardcoded to follow the default paths.
'';
};
port = lib.options.mkOption {
port = mkOption {
default = "/proc/xen/xsd_port";
example = cfg.store.settings.xenstored.xenfs.port;
type = lib.types.path;
type = path;
visible = false;
description = ''
Path to the Xen Store Daemon userspace port inside the XenFS pseudo-filesystem.
@ -578,11 +579,11 @@ in
};
};
};
ringScanInterval = lib.options.mkOption {
ringScanInterval = mkOption {
default = 20;
example = 30;
type = lib.types.addCheck (
lib.types.int
type = addCheck (
int
// {
name = "nonzeroInt";
description = "nonzero signed integer, meaning !=0";
@ -602,7 +603,7 @@ in
## Implementation ##
config = lib.modules.mkIf cfg.enable {
config = mkIf cfg.enable {
assertions = [
{
assertion = pkgs.stdenv.hostPlatform.isx86_64;
@ -639,18 +640,18 @@ in
];
virtualisation.xen.bootParams =
lib.lists.optionals cfg.trace [
optionals cfg.trace [
"loglvl=all"
"guest_loglvl=all"
]
++
lib.lists.optional (cfg.dom0Resources.memory != 0)
optional (cfg.dom0Resources.memory != 0)
"dom0_mem=${toString cfg.dom0Resources.memory}M${
lib.strings.optionalString (
optionalString (
cfg.dom0Resources.memory != cfg.dom0Resources.maxMemory
) ",max:${toString cfg.dom0Resources.maxMemory}M"
}"
++ lib.lists.optional (
++ optional (
cfg.dom0Resources.maxVCPUs != 0
) "dom0_max_vcpus=${toString cfg.dom0Resources.maxVCPUs}";
@ -701,7 +702,7 @@ in
# See the `xenBootBuilder` script in the main `let...in` statement of this file.
loader.systemd-boot.extraInstallCommands = ''
${lib.meta.getExe xenBootBuilder} ${cfg.efi.bootBuilderVerbosity}
${getExe xenBootBuilder} ${cfg.efi.bootBuilderVerbosity}
'';
};
@ -744,7 +745,7 @@ in
XENSTORED="${cfg.store.path}"
QEMU_XEN="${cfg.qemu.package}/${cfg.qemu.package.qemu-system-i386}"
${lib.strings.optionalString cfg.trace ''
${optionalString cfg.trace ''
XENSTORED_TRACE=yes
XENCONSOLED_TRACE=all
''}
@ -756,10 +757,10 @@ in
'';
}
# The OCaml-based Xen Store Daemon requires /etc/xen/oxenstored.conf to start.
// lib.attrsets.optionalAttrs (cfg.store.type == "ocaml") {
// optionalAttrs (cfg.store.type == "ocaml") {
"xen/oxenstored.conf".text = ''
pid-file = ${cfg.store.settings.pidFile}
test-eagain = ${lib.trivial.boolToString cfg.store.settings.testEAGAIN}
test-eagain = ${boolToString cfg.store.settings.testEAGAIN}
merge-activate = ${toString cfg.store.settings.enableMerge}
conflict-burst-limit = ${toString cfg.store.settings.conflict.burstLimit}
conflict-max-history-seconds = ${toString cfg.store.settings.conflict.maxHistorySeconds}
@ -775,7 +776,7 @@ in
quota-path-max = ${toString cfg.store.settings.quota.maxPath}
quota-maxoutstanding = ${toString cfg.store.settings.quota.maxOutstanding}
quota-maxwatchevents = ${toString cfg.store.settings.quota.maxWatchEvents}
persistent = ${lib.trivial.boolToString cfg.store.settings.persistent}
persistent = ${boolToString cfg.store.settings.persistent}
xenstored-log-file = ${cfg.store.settings.xenstored.log.file}
xenstored-log-level = ${
if isNull cfg.store.settings.xenstored.log.level then
@ -787,7 +788,7 @@ in
access-log-file = ${cfg.store.settings.xenstored.accessLog.file}
access-log-nb-lines = ${toString cfg.store.settings.xenstored.accessLog.nbLines}
acesss-log-nb-chars = ${toString cfg.store.settings.xenstored.accessLog.nbChars}
access-log-special-ops = ${lib.trivial.boolToString cfg.store.settings.xenstored.accessLog.specialOps}
access-log-special-ops = ${boolToString cfg.store.settings.xenstored.accessLog.specialOps}
ring-scan-interval = ${toString cfg.store.settings.ringScanInterval}
xenstored-kva = ${cfg.store.settings.xenstored.xenfs.kva}
xenstored-port = ${cfg.store.settings.xenstored.xenfs.port}
@ -870,5 +871,5 @@ in
};
};
};
meta.maintainers = lib.teams.xen.members;
meta.maintainers = members;
}

View file

@ -31,10 +31,11 @@
services.bind.forwarders = lib.mkForce [];
services.bind.zones = lib.singleton {
name = ".";
master = true;
file = let
addDot = zone: zone + lib.optionalString (!lib.hasSuffix "." zone) ".";
mkNsdZoneNames = zones: map addDot (lib.attrNames zones);
mkBindZoneNames = zones: map (zone: addDot zone.name) zones;
mkBindZoneNames = zones: map addDot (lib.attrNames zones);
getZones = cfg: mkNsdZoneNames cfg.services.nsd.zones
++ mkBindZoneNames cfg.services.bind.zones;

View file

@ -700,4 +700,92 @@ in
}
);
keymap =
let
pwInput = "qwerty";
pwOutput = "qwertz";
in
makeTest (
{ pkgs, lib, ... }:
{
name = "lomiri-keymap";
meta = {
maintainers = lib.teams.lomiri.members;
};
nodes.machine =
{ config, ... }:
{
imports = [ ./common/user-account.nix ];
virtualisation.memorySize = 2047;
users.users.${user} = {
inherit description;
password = lib.mkForce pwOutput;
};
services.desktopManager.lomiri.enable = lib.mkForce true;
services.displayManager.defaultSession = lib.mkForce "lomiri";
# Help with OCR
fonts.packages = [ pkgs.inconsolata ];
# Non-QWERTY keymap to test keymap patch
services.xserver.xkb.layout = "de";
};
enableOCR = true;
testScript =
{ nodes, ... }:
''
def wait_for_text(text):
"""
Wait for on-screen text, and try to optimise retry count for slow hardware.
"""
machine.sleep(10)
machine.wait_for_text(text)
start_all()
machine.wait_for_unit("multi-user.target")
# Lomiri in greeter mode should use the correct keymap
with subtest("lomiri greeter keymap works"):
machine.wait_for_unit("display-manager.service")
machine.wait_until_succeeds("pgrep -u lightdm -f 'lomiri --mode=greeter'")
# Start page shows current time
wait_for_text(r"(AM|PM)")
machine.screenshot("lomiri_greeter_launched")
# Advance to login part
machine.send_key("ret")
wait_for_text("${description}")
machine.screenshot("lomiri_greeter_login")
# Login
machine.send_chars("${pwInput}\n")
machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
# Output rendering from Lomiri has started when it starts printing performance diagnostics
machine.wait_for_console_text("Last frame took")
# Look for datetime's clock, one of the last elements to load
wait_for_text(r"(AM|PM)")
machine.screenshot("lomiri_launched")
# Lomiri in desktop mode should use the correct keymap
with subtest("lomiri session keymap works"):
machine.send_key("ctrl-alt-t")
wait_for_text(r"(${user}|machine)")
machine.screenshot("terminal_opens")
machine.send_chars("touch ${pwInput}\n")
machine.wait_for_file("/home/alice/${pwOutput}", 10)
machine.send_key("alt-f4")
'';
}
);
}

View file

@ -34,7 +34,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
node = {...}: {
environment.systemPackages = with pkgs; [
# remember to update mongodb.passthru.tests if you change this
mongodb-5_0
mongodb-7_0
];
};
};
@ -42,7 +42,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
testScript = ''
node.start()
''
+ runMongoDBTest pkgs.mongodb-5_0
+ runMongoDBTest pkgs.mongodb-7_0
+ ''
node.shutdown()
'';

View file

@ -1,12 +1,11 @@
import ./make-test-python.nix ({ pkgs, lib, ... }:
let
lualibs = [
luaLibs = [
pkgs.lua.pkgs.markdown
];
getPath = lib: type: "${lib}/share/lua/${pkgs.lua.luaversion}/?.${type}";
getLuaPath = lib: getPath lib "lua";
luaPath = lib.concatStringsSep ";" (map getLuaPath lualibs);
getLuaPath = lib: "${lib}/share/lua/${pkgs.lua.luaversion}/?.lua";
luaPath = lib.concatStringsSep ";" (map getLuaPath luaLibs);
in
{
name = "openresty-lua";

View file

@ -31,6 +31,5 @@ let
'';
};
in with pkgs; {
unifi7 = makeAppTest unifi7;
unifi8 = makeAppTest unifi8;
}

View file

@ -11,13 +11,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "praat";
version = "6.4.21";
version = "6.4.22";
src = fetchFromGitHub {
owner = "praat";
repo = "praat";
rev = "v${finalAttrs.version}";
hash = "sha256-2OeipesVdonv1XACbt9o99M9bxzxE0WQzCU2KWJmuzQ=";
hash = "sha256-bKWjazCCOIJm+VCAcnQGj3s0bbN4Ahx3RMNuLxZENXA=";
};
nativeBuildInputs = [

View file

@ -28,13 +28,13 @@ let
in
stdenv.mkDerivation rec {
pname = "reaper";
version = "7.22";
version = "7.24";
src = fetchurl {
url = url_for_platform version stdenv.hostPlatform.qemuArch;
hash = if stdenv.hostPlatform.isDarwin then "sha256-dIRZCUIfqnGTxBaLzczwzD6hA/PyAxPqfa+FfCRKdu0=" else {
x86_64-linux = "sha256-aa2KcL8yZYG+Dki7J6U473E2BQgdACAIzRLtD9zuHV0=";
aarch64-linux = "sha256-NECEEUKtTQajl0MZK8/NsbhcuyihHOo0Q5Y5UpAAgrM=";
hash = if stdenv.hostPlatform.isDarwin then "sha256-g+Bh7M9r/NfkWGH6NSTw2s3Whoh7eP80rmAosdfj0Bg=" else {
x86_64-linux = "sha256-3suK57NKevCLTGclJmbX/Mm01pRzH/rb8CSByfKHUvM=";
aarch64-linux = "sha256-bCJSSc5d9doc86aqvpas42gHuP3eyWKJQSumKR+oZoY=";
}.${stdenv.hostPlatform.system};
};

View file

@ -3,7 +3,8 @@
, fetchFromGitHub
, autoreconfHook
, alsa-lib
, python3
, perl
, pkg-config
, SDL2
, libXext
, Cocoa
@ -11,19 +12,27 @@
stdenv.mkDerivation rec {
pname = "schismtracker";
version = "20240328";
version = "20240809";
src = fetchFromGitHub {
owner = pname;
repo = pname;
rev = version;
sha256 = "sha256-hoP/14lbqsuQ37oJDErPoQWWk04UshImmApCFrf5wno=";
sha256 = "sha256-J4al7XU+vvehDnp2fRrVesWyUN4i63g5btUkjarpXbk=";
};
# If we let it try to get the version from git, it will fail and fall back
# on running `date`, which will output the epoch, which is considered invalid
# in this assert: https://github.com/schismtracker/schismtracker/blob/a106b57e0f809b95d9e8bcf5a3975d27e0681b5a/schism/version.c#L112
postPatch = ''
substituteInPlace configure.ac \
--replace-fail 'git log' 'echo ${version} #'
'';
configureFlags = [ "--enable-dependency-tracking" ]
++ lib.optional stdenv.hostPlatform.isDarwin "--disable-sdltest";
nativeBuildInputs = [ autoreconfHook python3 ];
nativeBuildInputs = [ autoreconfHook perl pkg-config ];
buildInputs = [ SDL2 ]
++ lib.optionals stdenv.hostPlatform.isLinux [ alsa-lib libXext ]

View file

@ -122,5 +122,7 @@ stdenv.mkDerivation rec {
license = licenses.mit;
maintainers = with maintainers; [ juaningan emmanuelrosa ];
platforms = [ "x86_64-linux" ];
# Requires OpenJFX 11 or 16, which are both EOL.
broken = true;
};
}

View file

@ -8,7 +8,7 @@
, automake
, gettext
, libtool
, lowdown
, lowdown-unsandboxed
, protobuf
, unzip
, which
@ -34,7 +34,7 @@ stdenv.mkDerivation rec {
# when building on darwin we need cctools to provide the correct libtool
# as libwally-core detects the host as darwin and tries to add the -static
# option to libtool, also we have to add the modified gsed package.
nativeBuildInputs = [ autoconf autogen automake gettext libtool lowdown protobuf py3 unzip which ]
nativeBuildInputs = [ autoconf autogen automake gettext libtool lowdown-unsandboxed protobuf py3 unzip which ]
++ lib.optionals stdenv.hostPlatform.isDarwin [ cctools darwin.autoSignDarwinBinariesHook ];
buildInputs = [ gmp libsodium sqlite zlib jq ];

View file

@ -27,7 +27,7 @@
}:
let
# submodules
# submodules; revs are taken from monero repo's `/external` at the given monero version tag.
supercop = fetchFromGitHub {
owner = "monero-project";
repo = "supercop";
@ -37,12 +37,11 @@ let
trezor-common = fetchFromGitHub {
owner = "trezor";
repo = "trezor-common";
rev = "bc28c316d05bf1e9ebfe3d7df1ab25831d98d168";
hash = "sha256-F1Hf1WwHqXMd/5OWrdkpomszACTozDuC7DQXW3p6248=";
rev = "bff7fdfe436c727982cc553bdfb29a9021b423b0";
hash = "sha256-VNypeEz9AV0ts8X3vINwYMOgO8VpNmyUPC4iY3OOuZI=";
};
in
stdenv.mkDerivation rec {
pname = "monero-cli";
version = "0.18.3.4";
@ -111,14 +110,28 @@ stdenv.mkDerivation rec {
"-DCMAKE_CXX_FLAGS=-fpermissive"
];
outputs = [ "out" "source" ];
outputs = [
"out"
"source"
];
meta = {
description = "Private, secure, untraceable currency";
homepage = "https://getmonero.org/";
license = lib.licenses.bsd3;
platforms = lib.platforms.all;
maintainers = with lib.maintainers; [ rnhmjoj ];
platforms = with lib.platforms; linux;
# macOS/ARM has a working `monerod` (at least), but `monero-wallet-cli`
# segfaults on start after entering the wallet password, when built in release mode.
# Building the same revision in debug mode to root-cause the above problem doesn't work
# because of https://github.com/monero-project/monero/issues/9486
badPlatforms = [ "aarch64-darwin" ];
maintainers = with lib.maintainers; [
pmw
rnhmjoj
];
mainProgram = "monero-wallet-cli";
};
}

View file

@ -7,13 +7,13 @@
stdenv.mkDerivation rec {
pname = "openvi";
version = "7.5.29";
version = "7.6.30";
src = fetchFromGitHub {
owner = "johnsonjh";
repo = "OpenVi";
rev = version;
hash = "sha256-ukNgTtVrYkL7Bf7O7ERyQ9TOR8ss/EHCTMbzHi3tkG4=";
hash = "sha256-P4w/PM9UmHmTzS9+WDK3x3MyZ7OoY2yO/Rx0vRMJuLI=";
};
buildInputs = [ ncurses perl ];

File diff suppressed because it is too large Load diff

View file

@ -38,12 +38,12 @@
};
apex = buildGrammar {
language = "apex";
version = "0.0.0+rev=69330ef";
version = "0.0.0+rev=943a3eb";
src = fetchFromGitHub {
owner = "aheber";
repo = "tree-sitter-sfapex";
rev = "69330ef89fb6b7b2dd16b639d86811e9262c7369";
hash = "sha256-OO+KttgnPk18EtYmxNphn3if2p3QRNRrXQTYZOmmglc=";
rev = "943a3eb7f55733929ccafe06841087c3004cb4e0";
hash = "sha256-eTdNxvK3vcC7MiE5g0DgptuChYs7fv+WjEmxhwmUI4U=";
};
location = "apex";
meta.homepage = "https://github.com/aheber/tree-sitter-sfapex";
@ -614,12 +614,12 @@
};
erlang = buildGrammar {
language = "erlang";
version = "0.0.0+rev=0dfcdf1";
version = "0.0.0+rev=f1919a3";
src = fetchFromGitHub {
owner = "WhatsApp";
repo = "tree-sitter-erlang";
rev = "0dfcdf18b35dd9cfcf92be42659794d07d819d88";
hash = "sha256-vGJrlugqmDHKMQtoDoFIyPMzWWZE8kUySBKEMDd8Kw0=";
rev = "f1919a34af3a9c79402c4a3d6c52986e9c2ea949";
hash = "sha256-0e01hr/QDZI+NSRoiTSQZftvpdCHKc6ZkEyxxbKIQyA=";
};
meta.homepage = "https://github.com/WhatsApp/tree-sitter-erlang";
};
@ -735,12 +735,12 @@
};
fsharp = buildGrammar {
language = "fsharp";
version = "0.0.0+rev=f920105";
version = "0.0.0+rev=5202637";
src = fetchFromGitHub {
owner = "ionide";
repo = "tree-sitter-fsharp";
rev = "f920105eec2d574eb911d7a25c81cdaa079a3f72";
hash = "sha256-iBuxpTtVkd9KiVLiTWrPgTbkZP7Go5V8KhZVsCCUimE=";
rev = "5202637c203fcf8876affbd18b04ff43256d4c4a";
hash = "sha256-OjCwEhTACaVcnR/NyfUGZN/juLUHgqY6h+3DSrqUuiQ=";
};
location = "fsharp";
meta.homepage = "https://github.com/ionide/tree-sitter-fsharp";
@ -1309,12 +1309,12 @@
};
java = buildGrammar {
language = "java";
version = "0.0.0+rev=245b05c";
version = "0.0.0+rev=490d878";
src = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-java";
rev = "245b05c6ba900fa708242835f6168ef76f9d951e";
hash = "sha256-C87uMEIoqXr6bYLCJAq6aiXsfH4+srVbNx7bMV9rseM=";
rev = "490d878cf33b0ad5ae7a7253ff30597a5bdc348e";
hash = "sha256-spf6dl7wvWuhJyhxwVU2YBLzt5xyNQDcBkk9g5cBiNQ=";
};
meta.homepage = "https://github.com/tree-sitter/tree-sitter-java";
};
@ -1485,12 +1485,12 @@
};
latex = buildGrammar {
language = "latex";
version = "0.0.0+rev=1e4e303";
version = "0.0.0+rev=87e4059";
src = fetchFromGitHub {
owner = "latex-lsp";
repo = "tree-sitter-latex";
rev = "1e4e30342b7a3b3a24886a632fbac53035d98871";
hash = "sha256-A2uvHRoe9xtgsHSLYdZiztGLXdqXzsfw4BYeZ/Cmr4k=";
rev = "87e4059f01bed363230dc349f794ce4cc580e862";
hash = "sha256-bUTJuwqdQ1htZQnxy3/fEm9zE7G5WDjiDib/iRteLTo=";
};
generate = true;
meta.homepage = "https://github.com/latex-lsp/tree-sitter-latex";
@ -1697,12 +1697,12 @@
};
mlir = buildGrammar {
language = "mlir";
version = "0.0.0+rev=02af5a1";
version = "0.0.0+rev=ccf732d";
src = fetchFromGitHub {
owner = "artagnon";
repo = "tree-sitter-mlir";
rev = "02af5a1a1cfa69a094e3136b10dfb602f968232e";
hash = "sha256-zCv47UvUIzdoJwQwKMrFyR1eMdU6ScSGfODdXomBapY=";
rev = "ccf732d3dbe6ca415a29b9be887c783111b297c7";
hash = "sha256-liYapDXD8R2nLHaDvynKAYvIfJPrXITVsn8IA/snclU=";
};
generate = true;
meta.homepage = "https://github.com/artagnon/tree-sitter-mlir";
@ -1742,12 +1742,12 @@
};
nickel = buildGrammar {
language = "nickel";
version = "0.0.0+rev=88d836a";
version = "0.0.0+rev=ddaa2bc";
src = fetchFromGitHub {
owner = "nickel-lang";
repo = "tree-sitter-nickel";
rev = "88d836a24b3b11c8720874a1a9286b8ae838d30a";
hash = "sha256-IvlUwNO/wLLPuqCZf0NtSxMdDx+4ASYYOobklY/97aQ=";
rev = "ddaa2bc22355effd97c0d6b09ff5962705c6368d";
hash = "sha256-jL054OJj+1eXksNYOTTTFzZjwPqTFp06syC3TInN8rc=";
};
meta.homepage = "https://github.com/nickel-lang/tree-sitter-nickel";
};
@ -1786,12 +1786,12 @@
};
nix = buildGrammar {
language = "nix";
version = "0.0.0+rev=fcf1857";
version = "0.0.0+rev=9ef77ce";
src = fetchFromGitHub {
owner = "cstrahan";
repo = "tree-sitter-nix";
rev = "fcf1857e254ab654e0fb73fe9706e33c52e79a5c";
hash = "sha256-ayiScuocBvhus3OUbQCSTxCdm/7+a61ATMpl3jFvCfY=";
rev = "9ef77ceefff61d31a63133d8d697f219ab62c841";
hash = "sha256-hBdruZbMKoPtcsoaMAVKsLJZree4WBiifRNCdzJLJUs=";
};
meta.homepage = "https://github.com/cstrahan/tree-sitter-nix";
};
@ -1943,24 +1943,24 @@
};
php = buildGrammar {
language = "php";
version = "0.0.0+rev=74c6b0d";
version = "0.0.0+rev=07a0459";
src = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-php";
rev = "74c6b0d560c2660db4d9e8c76b681f538d494160";
hash = "sha256-mJh8MILlVSjG3bOvYPw2Wc7XFhL+ozrdvcnr1qR6pZE=";
rev = "07a04599ed9ac97f82c6383a24ae139a807930f3";
hash = "sha256-Nd3v1UtM/LqxJlcLpp6Y057NR7L9XJapfKdFC5b4SQw=";
};
location = "php";
meta.homepage = "https://github.com/tree-sitter/tree-sitter-php";
};
php_only = buildGrammar {
language = "php_only";
version = "0.0.0+rev=74c6b0d";
version = "0.0.0+rev=07a0459";
src = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-php";
rev = "74c6b0d560c2660db4d9e8c76b681f538d494160";
hash = "sha256-mJh8MILlVSjG3bOvYPw2Wc7XFhL+ozrdvcnr1qR6pZE=";
rev = "07a04599ed9ac97f82c6383a24ae139a807930f3";
hash = "sha256-Nd3v1UtM/LqxJlcLpp6Y057NR7L9XJapfKdFC5b4SQw=";
};
location = "php_only";
meta.homepage = "https://github.com/tree-sitter/tree-sitter-php";
@ -2311,12 +2311,12 @@
};
readline = buildGrammar {
language = "readline";
version = "0.0.0+rev=3d4768b";
version = "0.0.0+rev=74addc9";
src = fetchFromGitHub {
owner = "ribru17";
repo = "tree-sitter-readline";
rev = "3d4768b04d7cfaf40533e12b28672603428b8f31";
hash = "sha256-kky3u5+NGOlxx8RxeMNszG+XJ6D36+z2us9c0nK/Jds=";
rev = "74addc90fc539d31d413c0c7cf7581997a7fa46e";
hash = "sha256-cbQnAPtgMnA41CTI9OyY8WYvdlJOC9g0ZMbitNSvtmI=";
};
meta.homepage = "https://github.com/ribru17/tree-sitter-readline";
};
@ -2432,12 +2432,12 @@
};
ruby = buildGrammar {
language = "ruby";
version = "0.0.0+rev=a66579f";
version = "0.0.0+rev=0b47296";
src = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-ruby";
rev = "a66579f70d6f50ffd81a16fc3d3358e2ac173c88";
hash = "sha256-ApuNco5q0hq4/36D7yWv87+d3h33Y9pKtdTUox4tIiw=";
rev = "0b4729672f9aec4810c01a0f971541dcb433fef5";
hash = "sha256-+FH/L028b/rpKypu0zdUoMYWiYMVkUIZXM3lmmN+nak=";
};
meta.homepage = "https://github.com/tree-sitter/tree-sitter-ruby";
};
@ -2454,12 +2454,12 @@
};
scala = buildGrammar {
language = "scala";
version = "0.0.0+rev=ec13dd6";
version = "0.0.0+rev=2cfbb6e";
src = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-scala";
rev = "ec13dd674bb8dd89213e0d6b1fe45efb68d5878f";
hash = "sha256-ireSo04kG2RMlCZD1hf6BJcjT7eXjYdOqOsoMtQAwKQ=";
rev = "2cfbb6e3fcdfd51e0d477a43cc37ae8c6f87dc2e";
hash = "sha256-8s5Li+fuHyr19KYaC/UzXc7ASLimwAu1VS+8lc5rNLA=";
};
meta.homepage = "https://github.com/tree-sitter/tree-sitter-scala";
};
@ -2499,12 +2499,12 @@
};
sflog = buildGrammar {
language = "sflog";
version = "0.0.0+rev=69330ef";
version = "0.0.0+rev=943a3eb";
src = fetchFromGitHub {
owner = "aheber";
repo = "tree-sitter-sfapex";
rev = "69330ef89fb6b7b2dd16b639d86811e9262c7369";
hash = "sha256-OO+KttgnPk18EtYmxNphn3if2p3QRNRrXQTYZOmmglc=";
rev = "943a3eb7f55733929ccafe06841087c3004cb4e0";
hash = "sha256-eTdNxvK3vcC7MiE5g0DgptuChYs7fv+WjEmxhwmUI4U=";
};
location = "sflog";
meta.homepage = "https://github.com/aheber/tree-sitter-sfapex";
@ -2522,12 +2522,12 @@
};
slint = buildGrammar {
language = "slint";
version = "0.0.0+rev=34ccfd5";
version = "0.0.0+rev=4e2765d";
src = fetchFromGitHub {
owner = "slint-ui";
repo = "tree-sitter-slint";
rev = "34ccfd58d3baee7636f62d9326f32092264e8407";
hash = "sha256-2R+TxjM3Pd2a9pyr2SwZd9+YYj1o8KsS+4n5dFxEMMM=";
rev = "4e2765d4cac1f03ada6f635eeb6008d1d0aff5a3";
hash = "sha256-cEitYvrK9P5McbqQAH/PmbD5W0pYULwj3eP9lKXSOTE=";
};
meta.homepage = "https://github.com/slint-ui/tree-sitter-slint";
};
@ -2577,24 +2577,24 @@
};
soql = buildGrammar {
language = "soql";
version = "0.0.0+rev=69330ef";
version = "0.0.0+rev=943a3eb";
src = fetchFromGitHub {
owner = "aheber";
repo = "tree-sitter-sfapex";
rev = "69330ef89fb6b7b2dd16b639d86811e9262c7369";
hash = "sha256-OO+KttgnPk18EtYmxNphn3if2p3QRNRrXQTYZOmmglc=";
rev = "943a3eb7f55733929ccafe06841087c3004cb4e0";
hash = "sha256-eTdNxvK3vcC7MiE5g0DgptuChYs7fv+WjEmxhwmUI4U=";
};
location = "soql";
meta.homepage = "https://github.com/aheber/tree-sitter-sfapex";
};
sosl = buildGrammar {
language = "sosl";
version = "0.0.0+rev=69330ef";
version = "0.0.0+rev=943a3eb";
src = fetchFromGitHub {
owner = "aheber";
repo = "tree-sitter-sfapex";
rev = "69330ef89fb6b7b2dd16b639d86811e9262c7369";
hash = "sha256-OO+KttgnPk18EtYmxNphn3if2p3QRNRrXQTYZOmmglc=";
rev = "943a3eb7f55733929ccafe06841087c3004cb4e0";
hash = "sha256-eTdNxvK3vcC7MiE5g0DgptuChYs7fv+WjEmxhwmUI4U=";
};
location = "sosl";
meta.homepage = "https://github.com/aheber/tree-sitter-sfapex";
@ -2678,12 +2678,12 @@
};
styled = buildGrammar {
language = "styled";
version = "0.0.0+rev=b729198";
version = "0.0.0+rev=764af55";
src = fetchFromGitHub {
owner = "mskelton";
repo = "tree-sitter-styled";
rev = "b729198642b3058d4ea0f864d86efb271d594595";
hash = "sha256-9hj6l3eI5p7q1XQihM19deb7+TdLVscIM31TbDRcqo8=";
rev = "764af55fc6b8e5ae177eb272f5c5de6238db23e6";
hash = "sha256-Zh35KWOYQbtsG3/F7g68dniBu5UZTA6ZuiX2GA0E2ww=";
};
meta.homepage = "https://github.com/mskelton/tree-sitter-styled";
};
@ -2698,6 +2698,18 @@
};
meta.homepage = "https://github.com/madskjeldgaard/tree-sitter-supercollider";
};
superhtml = buildGrammar {
language = "superhtml";
version = "0.0.0+rev=b684bbe";
src = fetchFromGitHub {
owner = "kristoff-it";
repo = "superhtml";
rev = "b684bbe28ecd740a7110ead5674355770186ca9c";
hash = "sha256-9Aw51LvTIBzptXuW3rEco/wTOdSADEhWJ/sI9OHr854=";
};
location = "tree-sitter-superhtml";
meta.homepage = "https://github.com/kristoff-it/superhtml";
};
surface = buildGrammar {
language = "surface";
version = "0.0.0+rev=f4586b3";
@ -2722,12 +2734,12 @@
};
swift = buildGrammar {
language = "swift";
version = "0.0.0+rev=032930d";
version = "0.0.0+rev=1466855";
src = fetchFromGitHub {
owner = "alex-pinkus";
repo = "tree-sitter-swift";
rev = "032930d6218d8ae23bde074cf29ce8d276b87533";
hash = "sha256-VhQ+OwkqOVJH9/R2eDVkCJbmh50EmZjVGX8Pk4uMGBw=";
rev = "14668554259c5a82fc0c8ca825aa3bb895034c67";
hash = "sha256-+coXjHJSa5jKOx3DE4zD4Crqp8NWn8jcsrK/eEgZQtk=";
};
generate = true;
meta.homepage = "https://github.com/alex-pinkus/tree-sitter-swift";
@ -2789,12 +2801,12 @@
};
tact = buildGrammar {
language = "tact";
version = "0.0.0+rev=d168040";
version = "0.0.0+rev=09c57b6";
src = fetchFromGitHub {
owner = "tact-lang";
repo = "tree-sitter-tact";
rev = "d16804029968f53f26f5afc695166a55bb0b68b2";
hash = "sha256-naug7uJeMQ8mFje6ZgOJ/3AbPlCOrCUak0u1RQ25Ky4=";
rev = "09c57b6b9759560b4d067e0546c9953ee0e065da";
hash = "sha256-WyCBuWPTYzNEApxtACTNt7StYoaSXIR9oqrOUlIquOY=";
};
meta.homepage = "https://github.com/tact-lang/tree-sitter-tact";
};
@ -2823,12 +2835,12 @@
};
templ = buildGrammar {
language = "templ";
version = "0.0.0+rev=80d1a04";
version = "0.0.0+rev=e3e894e";
src = fetchFromGitHub {
owner = "vrischmann";
repo = "tree-sitter-templ";
rev = "80d1a04e6bf3ced1c924bcb05527aa2eaf3f6239";
hash = "sha256-BY+j+0kMWxGbtwFk96SWHZA9ugRz6E7pRZOOM5j1XKA=";
rev = "e3e894ef9e490c3d36d94a51458ec55480991730";
hash = "sha256-uuPK/bWAAaoVGvWk4so+AulpaI1KAsyZwe5FzmPqWrg=";
};
meta.homepage = "https://github.com/vrischmann/tree-sitter-templ";
};
@ -2846,12 +2858,12 @@
};
textproto = buildGrammar {
language = "textproto";
version = "0.0.0+rev=8dacf02";
version = "0.0.0+rev=d900077";
src = fetchFromGitHub {
owner = "PorterAtGoogle";
repo = "tree-sitter-textproto";
rev = "8dacf02aa402892c91079f8577998ed5148c0496";
hash = "sha256-MpQTrNjjNO2Bj5qR6ESwI9SZtJPmcS6ckqjAR0qaLx8=";
rev = "d900077aef9f5dcb0d47c86be33585013ed5db9a";
hash = "sha256-PZMhYhIpGa7Y50jxvXZ0Z5l9e26P5q55sC18ptDi/uU=";
};
meta.homepage = "https://github.com/PorterAtGoogle/tree-sitter-textproto";
};
@ -2879,12 +2891,12 @@
};
tlaplus = buildGrammar {
language = "tlaplus";
version = "0.0.0+rev=b9e3978";
version = "0.0.0+rev=da9cf97";
src = fetchFromGitHub {
owner = "tlaplus-community";
repo = "tree-sitter-tlaplus";
rev = "b9e3978f363b3f8884c886a01d15e41bd14d30bd";
hash = "sha256-xC0iA7QvU/72RoqyW5oPmbVkTszPNraacwW6N8TELwo=";
rev = "da9cf9793686e236327aadfbad449414c895bf84";
hash = "sha256-VlYgKg9K/veFqxHWqF3nEYsrRGub2xK9txFK71Kn9JA=";
};
meta.homepage = "https://github.com/tlaplus-community/tree-sitter-tlaplus";
};
@ -3003,12 +3015,12 @@
};
typst = buildGrammar {
language = "typst";
version = "0.0.0+rev=abe60cb";
version = "0.0.0+rev=8b8b16e";
src = fetchFromGitHub {
owner = "uben0";
repo = "tree-sitter-typst";
rev = "abe60cbed7986ee475d93f816c1be287f220c5d8";
hash = "sha256-hwM1oEzABe9sqY0mpDXSfwT+tQsLV5ZNSG8yJhES6Qg=";
rev = "8b8b16ef1b40cbecbe3f754b1c1c966b5a0904fe";
hash = "sha256-eoaIt5yy0mIodjYq1sy6X7uq4ZhQXlbndMThAlCAifs=";
};
meta.homepage = "https://github.com/uben0/tree-sitter-typst";
};
@ -3036,12 +3048,12 @@
};
unison = buildGrammar {
language = "unison";
version = "0.0.0+rev=59d36a0";
version = "0.0.0+rev=bc06e1e";
src = fetchFromGitHub {
owner = "kylegoetz";
repo = "tree-sitter-unison";
rev = "59d36a09282be7e4d3374854126590f3dcebee6e";
hash = "sha256-89vFguMlPfKzQ4nmMNdTNFcEiCYH0eSws87Llm88e+I=";
rev = "bc06e1eb100e1c0fab9bd89a9ca55d646ac80fc4";
hash = "sha256-NbsUvRkFRd/khn37qYmPvq9ynzFvnr1zhwh8zPDIjxE=";
};
generate = true;
meta.homepage = "https://github.com/kylegoetz/tree-sitter-unison";
@ -3070,12 +3082,12 @@
};
v = buildGrammar {
language = "v";
version = "0.0.0+rev=4f93826";
version = "0.0.0+rev=bc5b3ca";
src = fetchFromGitHub {
owner = "vlang";
repo = "v-analyzer";
rev = "4f93826aeb31066eb241f4ccbca61f052239803f";
hash = "sha256-Tl4q6QksNu7Pm0Pt8rJka6o55LNN2GN6zK732XmLXb8=";
rev = "bc5b3caa85f7a8d4597f51aeaf92b83162ed6b33";
hash = "sha256-44WUptfNjp4hsHa3BQLdzjRIiCyppzNNOqoqU/rJGNA=";
};
location = "tree_sitter_v";
meta.homepage = "https://github.com/vlang/v-analyzer";
@ -3214,12 +3226,12 @@
};
wit = buildGrammar {
language = "wit";
version = "0.0.0+rev=c52f0b0";
version = "0.0.0+rev=81490b4";
src = fetchFromGitHub {
owner = "liamwh";
repo = "tree-sitter-wit";
rev = "c52f0b07786603df17ad0197f6cef680f312eb2c";
hash = "sha256-0MyRMippVOdb0RzyJQhPwX7GlWzFV9Z+/mghYuUW7NU=";
rev = "81490b4e74c792369e005f72b0d46fe082d3fed2";
hash = "sha256-L8dIOVJ3L2TXg1l4BXMOQeOsNxVkGPZimG619n3kHZE=";
};
meta.homepage = "https://github.com/liamwh/tree-sitter-wit";
};
@ -3301,4 +3313,28 @@
};
meta.homepage = "https://github.com/tree-sitter-grammars/tree-sitter-zig";
};
ziggy = buildGrammar {
language = "ziggy";
version = "0.0.0+rev=42b6f5d";
src = fetchFromGitHub {
owner = "kristoff-it";
repo = "ziggy";
rev = "42b6f5d7320340bc5903c4c29d34065e8517a549";
hash = "sha256-08y6Km7tO9YhJBmWXvPVjiku1QRRNcmJ2h2EbMa6Q/g=";
};
location = "tree-sitter-ziggy";
meta.homepage = "https://github.com/kristoff-it/ziggy";
};
ziggy_schema = buildGrammar {
language = "ziggy_schema";
version = "0.0.0+rev=42b6f5d";
src = fetchFromGitHub {
owner = "kristoff-it";
repo = "ziggy";
rev = "42b6f5d7320340bc5903c4c29d34065e8517a549";
hash = "sha256-08y6Km7tO9YhJBmWXvPVjiku1QRRNcmJ2h2EbMa6Q/g=";
};
location = "tree-sitter-ziggy-schema";
meta.homepage = "https://github.com/kristoff-it/ziggy";
};
}

View file

@ -263,6 +263,10 @@ in
dependencies = with self; [ nvim-cmp copilot-vim ];
};
cmp-ctags = super.cmp-ctags.overrideAttrs {
dependencies = with self; [ nvim-cmp ];
};
cmp-dap = super.cmp-dap.overrideAttrs {
dependencies = with self; [ nvim-cmp nvim-dap ];
};
@ -380,12 +384,12 @@ in
codeium-nvim = let
# Update according to https://github.com/Exafunction/codeium.nvim/blob/main/lua/codeium/versions.json
codeiumVersion = "1.16.18";
codeiumVersion = "1.20.9";
codeiumHashes = {
x86_64-linux = "sha256-/m+t4abPgVWeGpfDkPm5DGCIXm1LoM5znHfES9lotAo=";
aarch64-linux = "sha256-0kR799yuxSFmyedJ14f5/EqOiFHs9cWjeJKvDIpIRl0=";
x86_64-darwin = "sha256-7Go5qZVAe2UHn547HZG4fmh84iF2r15+0IIlJK72Fqg=";
aarch64-darwin = "sha256-fe4GrgLRr66Qmme3p0X5BEwvKZhqG1aiE8xs5A1Dt6E=";
x86_64-linux = "sha256-IeNK7UQtOhqC/eQv7MAya4jB1WIGykSR7IgutZatmHM=";
aarch64-linux = "sha256-ujTFki/3V79El2WCkG0PJhbaMT0knC9mrS9E7Uv9HD4=";
x86_64-darwin = "sha256-r2KloEQsUku9sk8h76kwyQuMTHcq/vwfTSK2dkiXDzE=";
aarch64-darwin = "sha256-1jNH0Up8mAahDgvPF6g42LV+RVDVsPqDM54lE2KYY48=";
};
codeium' = codeium.overrideAttrs rec {

View file

@ -146,6 +146,7 @@ https://github.com/dmitmel/cmp-cmdline-history/,HEAD,
https://github.com/PaterJason/cmp-conjure/,,
https://github.com/davidsierradz/cmp-conventionalcommits/,HEAD,
https://github.com/hrsh7th/cmp-copilot/,HEAD,
https://github.com/delphinus/cmp-ctags/,HEAD,
https://github.com/rcarriga/cmp-dap/,HEAD,
https://github.com/uga-rosa/cmp-dictionary/,HEAD,
https://github.com/dmitmel/cmp-digraphs/,HEAD,
@ -878,6 +879,7 @@ https://github.com/vladdoster/remember.nvim/,,
https://github.com/filipdutescu/renamer.nvim/,,
https://github.com/MeanderingProgrammer/render-markdown.nvim/,,
https://github.com/gabrielpoca/replacer.nvim/,HEAD,
https://github.com/stevearc/resession.nvim/,HEAD,
https://github.com/NTBBloodbath/rest.nvim/,,
https://github.com/vim-scripts/restore_view.vim/,HEAD,restore-view-vim
https://github.com/gu-fan/riv.vim/,,

View file

@ -0,0 +1,30 @@
{
lib,
vscode-utils,
jq,
moreutils,
pandoc,
}:
vscode-utils.buildVscodeMarketplaceExtension {
mktplcRef = {
name = "vscode-pandoc";
publisher = "chrischinchilla";
version = "0.4.8";
hash = "sha256-+U6AtT2wf1mE92IR+mv4aKD9/78ULus2GuwwgxdCvBA=";
};
nativeBuildInputs = [
jq
moreutils
];
postInstall = ''
jq '.contributes.configuration.properties."pandoc.executable".default = "${lib.getExe pandoc}"' $out/$installPrefix/package.json | sponge $out/$installPrefix/package.json
'';
meta = {
description = "Converts Markdown files to pdf, docx, or html files using pandoc";
homepage = "https://github.com/ChrisChinchilla/vscode-pandoc#readme";
downloadPage = "https://marketplace.visualstudio.com/items?itemName=yzane.markdown-pdf";
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ pandapip1 ];
};
}

View file

@ -905,6 +905,8 @@ let
};
};
chrischinchilla.vscode-pandoc = callPackage ./chrischinchilla.vscode-pandoc { };
christian-kohler.npm-intellisense = buildVscodeMarketplaceExtension {
mktplcRef = {
name = "npm-intellisense";
@ -1642,8 +1644,8 @@ let
mktplcRef = {
name = "elixir-ls";
publisher = "JakeBecker";
version = "0.23.1";
hash = "sha256-rwpaixQbuxVkH4wlKPG4Qk69IylwjfCtyfUcqCuN/e8=";
version = "0.24.0";
hash = "sha256-zNiKtOeZEO9zVpyF4AE/3FjiEy4jtCSCjB9T8e8PjRE=";
};
meta = {
changelog = "https://marketplace.visualstudio.com/items/JakeBecker.elixir-ls/changelog";
@ -2037,8 +2039,8 @@ let
mktplcRef = {
publisher = "github";
name = "copilot";
version = "1.234.1133"; # compatible with vscode ^1.93.1
hash = "sha256-kRQIB4ozN8f+JPG2U6tA/u0r3/J05kYfMuksaJrumZM=";
version = "1.236.0"; # compatible with vscode ^1.94
hash = "sha256-ozJwByuSjROWSxfrapcyxDkI7xgcjqf/IKtUfEC+MGk=";
};
meta = {
@ -2054,8 +2056,8 @@ let
mktplcRef = {
publisher = "github";
name = "copilot-chat";
version = "0.21.2024090602"; # latest compatible with vscode ^1.93
hash = "sha256-9wl/orFbf1OFwGnF1uLfyOOtO2v5k2H1aUMBtngXDfs=";
version = "0.22.2024100702"; # latest compatible with vscode ^1.94
hash = "sha256-n/ecEnxz3LiTx9MuHO8AMIWBJPNNxQb6vghlG/hPMUY=";
};
meta = {
description = "GitHub Copilot Chat is a companion extension to GitHub Copilot that houses experimental chat features";
@ -5393,6 +5395,8 @@ let
};
};
yzane.markdown-pdf = callPackage ./yzane.markdown-pdf { };
yzhang.dictionary-completion = buildVscodeMarketplaceExtension {
mktplcRef = {
publisher = "yzhang";

View file

@ -0,0 +1,31 @@
{
lib,
vscode-utils,
jq,
moreutils,
ungoogled-chromium,
}:
vscode-utils.buildVscodeMarketplaceExtension {
mktplcRef = {
name = "markdown-pdf";
publisher = "yzane";
version = "1.5.0";
hash = "sha256-aiifZgHXC4GUEbkKAbLc0p/jUZxp1jF/J1Y/KIyvLIE=";
};
nativeBuildInputs = [
jq
moreutils
];
postInstall = ''
jq '.contributes.configuration.properties."markdown-pdf.executablePath".default = "${lib.getExe ungoogled-chromium}"' $out/$installPrefix/package.json | sponge $out/$installPrefix/package.json
'';
meta = {
description = "Converts Markdown files to pdf, html, png or jpeg files";
homepage = "https://github.com/yzane/vscode-markdown-pdf#readme";
changelog = "https://github.com/yzane/vscode-markdown-pdf/blob/master/CHANGELOG.md";
downloadPage = "https://marketplace.visualstudio.com/items?itemName=yzane.markdown-pdf";
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ pandapip1 ];
};
}

View file

@ -236,7 +236,10 @@ in
let
vscodeRipgrep =
if stdenv.hostPlatform.isDarwin then
"Contents/Resources/app/node_modules.asar.unpacked/@vscode/ripgrep/bin/rg"
if lib.versionAtLeast version "1.94.0" then
"Contents/Resources/app/node_modules/@vscode/ripgrep/bin/rg"
else
"Contents/Resources/app/node_modules.asar.unpacked/@vscode/ripgrep/bin/rg"
else
"resources/app/node_modules/@vscode/ripgrep/bin/rg";
in

View file

@ -30,21 +30,21 @@ let
archive_fmt = if stdenv.hostPlatform.isDarwin then "zip" else "tar.gz";
sha256 = {
x86_64-linux = "1adwsm4n934a5z3hnsj9k7mi2l4npl499q8jzk2xhbbpqhkvd96a";
x86_64-darwin = "04cvhhxx7s14z5794gn3pwd482cswpqyrmb1qcwm797cz1rz29z5";
aarch64-linux = "1fca5rir2bkf4wqrs56qhv3kwrxivx17pa5brxp1k4k8a9jmhy7k";
aarch64-darwin = "1mwymizy2a6m9fj3r00h762283fwrkhl9kv5607r0q7widggfg0j";
armv7l-linux = "16ndp0mcfb05wfarpq3nxp3bnac1s1yay596mwjmwbwv44qcq40b";
x86_64-linux = "11d9qqfb5kh5zsc7xd6h5xsywacir5z08l2snj0cz2cb0nji9xhj";
x86_64-darwin = "0rbwvvakh1b5iqca49hcmqlfq4g0j067rrphrh0yx7wdyr6kmwg2";
aarch64-linux = "0vrvcy1p5lrdy2lww42w32vr79075vpkwj4q8wfqzd7x72vmhfci";
aarch64-darwin = "03wccm854v9va50x91kp00a16r483zpndayhlwy1fm4n0wdy6iw8";
armv7l-linux = "0b9r78mz5djvv6n82isn2jqb4bwa41hqyxxc9arhrpvpj5w65rla";
}.${system} or throwSystem;
in
callPackage ./generic.nix rec {
# Please backport all compatible updates to the stable release.
# This is important for the extension ecosystem.
version = "1.94.0";
version = "1.94.1";
pname = "vscode" + lib.optionalString isInsiders "-insiders";
# This is used for VS Code - Remote SSH test
rev = "d78a74bcdfad14d5d3b1b782f87255d802b57511";
rev = "e10f2369d0d9614a452462f2e01cdc4aa9486296";
executableName = "code" + lib.optionalString isInsiders "-insiders";
longName = "Visual Studio Code" + lib.optionalString isInsiders " - Insiders";
@ -68,7 +68,7 @@ in
src = fetchurl {
name = "vscode-server-${rev}.tar.gz";
url = "https://update.code.visualstudio.com/commit:${rev}/server-linux-x64/stable";
sha256 = "1iqglh4wx4wc80ihzcw4is7hd49s6kxpg9fz357r57a2679q0qw6";
sha256 = "094klvp32475f6rsapxkhgsm8cmjmpq4qp3lx2b1vgf3xzl7j9nw";
};
};

View file

@ -49,13 +49,13 @@ in
stdenv.mkDerivation (finalAttrs: {
pname = "imagemagick";
version = "7.1.1-38";
version = "7.1.1-39";
src = fetchFromGitHub {
owner = "ImageMagick";
repo = "ImageMagick";
rev = finalAttrs.version;
hash = "sha256-dyk9kCH1w76Jhy/yBhVFLthTKYaMgXLBn7QGWAFS0XU=";
hash = "sha256-3NUl0q/j3dBdNBtLH+69vh0elobBnTOvqQpC/2KwGBU=";
};
outputs = [ "out" "dev" "doc" ]; # bin/ isn't really big

View file

@ -80,5 +80,8 @@ stdenv.mkDerivation rec {
maintainers = with maintainers; [ bcdarwin pbsds ];
platforms = with platforms; unix;
mainProgram = "f3d";
# error: use of undeclared identifier 'NSMenuItem'
# adding AppKit does not solve it
broken = with stdenv.hostPlatform; isDarwin && isx86_64;
};
}

View file

@ -18,13 +18,13 @@
python3Packages.buildPythonApplication rec {
pname = "gscreenshot";
version = "3.6.2";
version = "3.6.3";
src = fetchFromGitHub {
owner = "thenaterhood";
repo = "${pname}";
rev = "refs/tags/v${version}";
sha256 = "sha256-dYmdM9QtemVKggEmMMcprVIM1fe02jQOyBPniy7p9ns=";
sha256 = "sha256-fpxKhgLpXbuUhALzF6n4v3FLcLaqbqLLxwQJE/wJrAY=";
};
# needed for wrapGAppsHook3 to function

View file

@ -20,13 +20,13 @@
crystal.buildCrystalPackage rec {
pname = "Collision";
version = "3.8.1";
version = "3.9.0";
src = fetchFromGitHub {
owner = "GeopJr";
repo = "Collision";
rev = "v${version}";
hash = "sha256-55qCHc+snMAUFAT31Z8EPtJ/HLrnv1BveCEzjkn7N5g=";
hash = "sha256-c/74LzDM63w5zW8z2T8o4Efvuzj791/zTSKEDN32uak=";
};
postPatch = ''

View file

@ -11,13 +11,13 @@
};
gi-crystal = {
url = "https://github.com/hugopl/gi-crystal.git";
rev = "v0.22.3";
sha256 = "1xyj5bf3l2i1yzqxb8yyj0fc3kwi9nnd57n5dhs5xm9jxzcvw1kk";
rev = "v0.24.0";
sha256 = "0x356xn35008l573qhyl1sdddc9cc5i3bsa4c7865kgq9521ifyh";
};
gtk4 = {
url = "https://github.com/hugopl/gtk4.cr.git";
rev = "v0.16.1";
sha256 = "1cqkbh072y70l8g0p040vf50k920p32ry1larnwn9mqabd74jwaj";
rev = "v0.17.0";
sha256 = "0lv3nvsanxi4g2322zvkf1jxx5zgzaapk228vcw2cl0ja1drm06d";
};
harfbuzz = {
url = "https://github.com/hugopl/harfbuzz.cr.git";

View file

@ -21,8 +21,8 @@
"sha256": "01dgvlvwbhwz7822gp6z5xn6w3k51q09i6qzns2i4ixmjh45wscs"
},
"diet-ng": {
"version": "1.8.1",
"sha256": "0kh8haw712xkd3f07s5x5g12nmmkv0y1lk2cqh66298fc5mgj4sv"
"version": "1.8.2",
"sha256": "0hwm8dsyw7xb9d540ks314vzxibn4ri7b4m2gagqbpmzspvd2slv"
},
"dportals": {
"version": "0.1.0",
@ -37,12 +37,12 @@
"sha256": "0p5vmkw29ksh5wdxz1ijms1wblq288pv15vnbl93z7q2vgnq995w"
},
"eventcore": {
"version": "0.9.30",
"sha256": "1n8wdcjhas0y99pf9fvwwsydkmy9g7gvfjhlwpjh158c7pfjwlaq"
"version": "0.9.34",
"sha256": "0znrcmxdr65gk8bwrknhm530kicznia4xb09h5jv42sxnv3cjkjw"
},
"facetrack-d": {
"version": "0.7.8",
"sha256": "1414wvh0kn1rps5r16ir92sqfj8a7na1gd71ds81jkq8arkm17j0"
"version": "0.8.0",
"sha256": "0p04yd50sgjb9n9gdp2yjgvlm8kkld2gl5ivz36npjnchj8k5a8i"
},
"fghj": {
"version": "1.0.2",
@ -65,24 +65,16 @@
"sha256": "0dl7n4myxp1s3b32v2s975k76gs90wr2nw6ac5jq9hsgzhp1ix0h"
},
"inmath": {
"version": "1.0.6",
"sha256": "0kzk55ilbnl6qypjk60zwd5ibys5n47128hbbr0mbc7bpj9ppfg4"
"version": "1.3.0",
"sha256": "1bmfsnlpm3lb085cs29h63l4fmfr0xr9iyfd0wrg5i87difshpw6"
},
"inochi2d": {
"version": "0.8.4",
"sha256": "1bj0c6i9kcw1vfm6lf8lyxpf1lhhslg3f182jycdmzms15i3jb3y"
"version": "0.8.6",
"sha256": "0xhidp1y91cidh3g1cc5v7psb5kfy17ars7k7cplnywhjlcqqk70"
},
"kra-d": {
"version": "0.5.5",
"sha256": "0dffmf084ykz19y084v936r3f74613d0jifj0wb3xibfcq9mwxqz"
},
"libasync": {
"version": "0.8.6",
"sha256": "0hhk5asfdccby8ky77a25qn7dfmfdmwyzkrg3zk064bicmgdwlnj"
},
"memutils": {
"version": "1.0.10",
"sha256": "0hm31birbw59sw1bi9syjhbcdgwwwyyx6r9jg7ar9i6a74cjr52c"
"version": "0.5.6",
"sha256": "1lp3mf39qfxn6cayznc4nkk24smnd2m5sg8skl9pnd4x85is6zdr"
},
"mir-algorithm": {
"version": "3.22.1",
@ -100,9 +92,13 @@
"version": "2.2.19",
"sha256": "0ad9ahvyrv5h38aqwn3zvlrva3ikfq28dfhpg2lwwgm31ymzvqpb"
},
"numem": {
"version": "0.11.3",
"sha256": "00rm3cg5i714ncww8yxsbzf1y1bf6r8d0yx6i38ac2x7090arvjm"
},
"openssl": {
"version": "3.3.3",
"sha256": "1fwhd5fkvgbqf3y8gwmrnd42kzi4k3mibpxijw5j82jxgfp1rzsf"
"version": "3.3.4",
"sha256": "17s71yfyhb9jyym2nldj23ikazwbbrmh6ply33mg888rd6dxnhyy"
},
"openssl-static": {
"version": "1.0.5+3.0.8",
@ -133,8 +129,8 @@
"sha256": "12mfm49bjnh2pvm51dzna625kzgwznm9kcv6qhazc4il9j0224wd"
},
"vibe-core": {
"version": "2.8.4",
"sha256": "1pik6vympgwxpyxb75g1f8409cd6hw952gbflqvwaj18shz6dwjm"
"version": "2.9.3",
"sha256": "032q1gkm7l6blj5y3yiwk205m12svp4bv8k743crkd8d1xhlrrvi"
},
"vibe-d": {
"version": "0.9.8",

View file

@ -22,13 +22,13 @@ in
inochi-creator = mkGeneric rec {
pname = "inochi-creator";
appname = "Inochi Creator";
version = "0.8.5";
version = "0.8.6";
src = fetchFromGitHub {
owner = "Inochi2D";
repo = "inochi-creator";
rev = "v${version}";
hash = "sha256-qrSHyvFE55xRbcA79lngOHJOdv54rNlUTHlxT9jjPEY=";
hash = "sha256-9d3j5ZL6rGOjN1GUpCIfbjby0mNMvOK7BJbHYgwLY2k=";
};
dubLock = ./creator-dub-lock.json;
@ -54,15 +54,21 @@ in
inochi-session = mkGeneric rec {
pname = "inochi-session";
appname = "Inochi Session";
version = "0.8.4";
version = "0.8.7";
src = fetchFromGitHub {
owner = "Inochi2D";
repo = "inochi-session";
rev = "v${version}";
hash = "sha256-BRA5qODHhyHBeZYT5MQwcFmr/zVokfO5SrbcbQa6w7w=";
hash = "sha256-FcgzTCpD+L50MsPP90kfL6h6DEUtiYkUV1xKww1pQfg=";
};
patches = [
# Dynamically load Lua to get around the linker error on aarch64-linux.
# https://github.com/Inochi2D/inochi-session/pull/60
./session-dynamic-lua.patch
];
dubLock = ./session-dub-lock.json;
preFixup = ''
@ -72,8 +78,8 @@ in
dontStrip = true; # symbol lookup error: undefined symbol: , version
meta = {
# darwin has slightly different build steps, aarch fails to build because of some lua related error
broken = stdenv.hostPlatform.isDarwin || stdenv.hostPlatform.isAarch64;
# darwin has slightly different build steps
broken = stdenv.hostPlatform.isDarwin;
changelog = "https://github.com/Inochi2D/inochi-session/releases/tag/${src.rev}";
description = "An application that allows streaming with Inochi2D puppets";
};

View file

@ -14,6 +14,7 @@
freetype,
SDL2,
zenity,
luajit_2_1,
libGL,
builderArgs,
@ -99,7 +100,7 @@ buildDubPackage (
. gentl.sh
# Use the fake git to generate version info
dub build --skip-registry=all --compiler=ldc2 --build=release --config=meta
dub build --skip-registry=all --compiler=ldc2 --build=release --config=update-version
'';
# Use the "barebones" configuration so that we don't include the mascot and icon files in out build
@ -128,7 +129,12 @@ buildDubPackage (
# Add support for `open file` dialog
makeWrapper $out/share/${pname}/${pname} $out/bin/${pname} \
--prefix PATH : ${lib.makeBinPath [ zenity ]} \
--prefix LD_LIBRARY_PATH : ${lib.makeLibraryPath [ libGL ]}
--prefix LD_LIBRARY_PATH : ${
lib.makeLibraryPath [
libGL
luajit_2_1
]
}
'';
meta = {

View file

@ -21,8 +21,8 @@
"sha256": "01dgvlvwbhwz7822gp6z5xn6w3k51q09i6qzns2i4ixmjh45wscs"
},
"diet-ng": {
"version": "1.8.1",
"sha256": "0kh8haw712xkd3f07s5x5g12nmmkv0y1lk2cqh66298fc5mgj4sv"
"version": "1.8.2",
"sha256": "0hwm8dsyw7xb9d540ks314vzxibn4ri7b4m2gagqbpmzspvd2slv"
},
"dportals": {
"version": "0.1.0",
@ -33,12 +33,12 @@
"sha256": "0p9g4h5qanbg6281x1068mdl5p7zvqig4zmmi72a2cay6dxnbvxb"
},
"eventcore": {
"version": "0.9.30",
"sha256": "1n8wdcjhas0y99pf9fvwwsydkmy9g7gvfjhlwpjh158c7pfjwlaq"
"version": "0.9.34",
"sha256": "0znrcmxdr65gk8bwrknhm530kicznia4xb09h5jv42sxnv3cjkjw"
},
"facetrack-d": {
"version": "0.7.8",
"sha256": "1414wvh0kn1rps5r16ir92sqfj8a7na1gd71ds81jkq8arkm17j0"
"version": "0.8.0",
"sha256": "0p04yd50sgjb9n9gdp2yjgvlm8kkld2gl5ivz36npjnchj8k5a8i"
},
"fghj": {
"version": "1.0.2",
@ -61,29 +61,21 @@
"sha256": "0dl7n4myxp1s3b32v2s975k76gs90wr2nw6ac5jq9hsgzhp1ix0h"
},
"inmath": {
"version": "1.0.6",
"sha256": "0kzk55ilbnl6qypjk60zwd5ibys5n47128hbbr0mbc7bpj9ppfg4"
"version": "1.3.0",
"sha256": "1bmfsnlpm3lb085cs29h63l4fmfr0xr9iyfd0wrg5i87difshpw6"
},
"inochi2d": {
"version": "0.8.4",
"sha256": "1bj0c6i9kcw1vfm6lf8lyxpf1lhhslg3f182jycdmzms15i3jb3y"
"version": "0.8.6",
"sha256": "0xhidp1y91cidh3g1cc5v7psb5kfy17ars7k7cplnywhjlcqqk70"
},
"inui": {
"version": "1.2.1",
"sha256": "0pygf8jxnbvib5f23qxf6k24wz8mh6fc0zhrkp83gq33k02ab5cx"
},
"libasync": {
"version": "0.8.6",
"sha256": "0hhk5asfdccby8ky77a25qn7dfmfdmwyzkrg3zk064bicmgdwlnj"
"version": "1.2.2",
"sha256": "1gh7ngva2ijz5gx9hrqn9rzzx5vvpf6l12r98wklzxwb9v5hmj69"
},
"lumars": {
"version": "1.6.1",
"sha256": "1vzdghqwv2gb41rp75456g43yfsndbl0dy6bnn4x6azwwny22br9"
},
"memutils": {
"version": "1.0.10",
"sha256": "0hm31birbw59sw1bi9syjhbcdgwwwyyx6r9jg7ar9i6a74cjr52c"
},
"mir-algorithm": {
"version": "3.22.1",
"sha256": "1bvvf3dm26x1h10pg1s4kyhxiyrmd96kk2lmchyady39crpjj5cf"
@ -96,9 +88,13 @@
"version": "1.0.1",
"sha256": "0adyjpcgd65z44iydnrrrpjwbvmrm08a3pkcriqi7npqylfysqn6"
},
"numem": {
"version": "0.11.3",
"sha256": "00rm3cg5i714ncww8yxsbzf1y1bf6r8d0yx6i38ac2x7090arvjm"
},
"openssl": {
"version": "3.3.3",
"sha256": "1fwhd5fkvgbqf3y8gwmrnd42kzi4k3mibpxijw5j82jxgfp1rzsf"
"version": "3.3.4",
"sha256": "17s71yfyhb9jyym2nldj23ikazwbbrmh6ply33mg888rd6dxnhyy"
},
"openssl-static": {
"version": "1.0.5+3.0.8",
@ -125,8 +121,8 @@
"sha256": "12mfm49bjnh2pvm51dzna625kzgwznm9kcv6qhazc4il9j0224wd"
},
"vibe-core": {
"version": "2.8.4",
"sha256": "1pik6vympgwxpyxb75g1f8409cd6hw952gbflqvwaj18shz6dwjm"
"version": "2.9.3",
"sha256": "032q1gkm7l6blj5y3yiwk205m12svp4bv8k743crkd8d1xhlrrvi"
},
"vibe-d": {
"version": "0.9.8",

View file

@ -0,0 +1,98 @@
diff --git a/dub.sdl b/dub.sdl
index 50c0da1..87936a4 100644
--- a/dub.sdl
+++ b/dub.sdl
@@ -32,6 +32,9 @@ configuration "barebones" {
targetType "executable"
dependency "dportals" version="~>0.1.0"
+
+ subConfiguration "lumars" "lua51-dynamic"
+ versions "LUA_51"
}
@@ -51,6 +54,9 @@ configuration "linux-full" {
versions "InBranding"
dependency "dportals" version="~>0.1.0"
+
+ subConfiguration "lumars" "lua51-dynamic"
+ versions "LUA_51"
}
configuration "osx-full" {
@@ -84,6 +90,9 @@ configuration "linux-nightly" {
versions "InNightly"
dependency "dportals" version="~>0.1.0"
+
+ subConfiguration "lumars" "lua51-dynamic"
+ versions "LUA_51"
}
// macOS nightly build
diff --git a/source/session/plugins/package.d b/source/session/plugins/package.d
index 965c64f..7cfbb0b 100644
--- a/source/session/plugins/package.d
+++ b/source/session/plugins/package.d
@@ -14,9 +14,9 @@ import lumars;
import session.log;
import std.file;
import std.path;
+import std.exception;
private {
- bool couldLoadLua = true;
LuaState* state;
LuaTable apiTable;
@@ -34,13 +34,17 @@ Plugin[] insPlugins;
Initializes Lua support
*/
void insLuaInit() {
- // LuaSupport support = loadLua();
-
- // if (support == LuaSupport.noLibrary || support == LuaSupport.badLibrary) {
- // couldLoadLua = false;
- // insLogWarn("Could not load Lua support...");
- // } else insLogInfo("Lua support initialized.");
- insLogInfo("Lua support initialized. (Statically linked for now)");
+ version(linux){
+ LuaSupport support = loadLua("libluajit-5.1.so.2");
+ if(support == LuaSupport.noLibrary){
+ support = loadLua();
+ }
+ enforce(support != LuaSupport.noLibrary, "Could not find Lua support...!");
+ enforce(support != LuaSupport.badLibrary, "Bad Lua library found!");
+ insLogInfo("Lua support initialized.");
+ } else {
+ insLogInfo("Lua support initialized. (Statically linked)");
+ }
// Create Lua state
state = new LuaState(luaL_newstate());
@@ -56,6 +60,9 @@ void insLuaInit() {
void insLuaUnload() {
lua_close(state.handle());
destroy(state);
+ version(linux){
+ unloadLua();
+ }
}
void insSavePluginState() {
@@ -111,13 +118,6 @@ void insEnumeratePlugins() {
insSavePluginState();
}
-/**
- Gets whether Lua support is loaded.
-*/
-bool insHasLua() {
- return couldLoadLua;
-}
-
/**
Gets string of value
*/

View file

@ -5,13 +5,13 @@
mkDerivation rec {
pname = "klayout";
version = "0.29.6";
version = "0.29.7";
src = fetchFromGitHub {
owner = "KLayout";
repo = "klayout";
rev = "v${version}";
hash = "sha256-gbbes8CPh+Z9wCeQaAaObZjQvBTMe06z8oR12i6e12M=";
hash = "sha256-4GjCV/Z9al7Hrj7Ik/EvmLy5jPCsU/3Ti9HwOjzPKYc=";
};
postPatch = ''

View file

@ -14,13 +14,13 @@
python310Packages.buildPythonApplication rec {
pname = "nwg-displays";
version = "0.3.21";
version = "0.3.22";
src = fetchFromGitHub {
owner = "nwg-piotr";
repo = "nwg-displays";
rev = "refs/tags/v${version}";
hash = "sha256-aVQSWvQTRdz5R9uEXU4CvveRaPdehcL7hrXwFoPCEyI=";
hash = "sha256-lTFei4NR8eu5/5V9MEc/k6qQYRRZkQ5m6B7Bx9xIS6c=";
};
nativeBuildInputs = [

Some files were not shown because too many files have changed in this diff Show more