2021-12-19 01:06:50 +00:00
{ config , lib , options , pkgs , utils , . . . }:
2020-04-24 23:36:52 +00:00
#
# TODO: zfs tunables
with utils ;
with lib ;
let
cfgZfs = config . boot . zfs ;
2021-12-19 01:06:50 +00:00
optZfs = options . boot . zfs ;
2021-08-25 08:27:29 +00:00
cfgExpandOnBoot = config . services . zfs . expandOnBoot ;
2020-04-24 23:36:52 +00:00
cfgSnapshots = config . services . zfs . autoSnapshot ;
cfgSnapFlags = cfgSnapshots . flags ;
cfgScrub = config . services . zfs . autoScrub ;
cfgTrim = config . services . zfs . trim ;
cfgZED = config . services . zfs . zed ;
2024-01-02 11:29:13 +00:00
selectModulePackage = package : config . boot . kernelPackages . ${ package . kernelModuleAttribute } ;
clevisDatasets = map ( e : e . device ) ( filter ( e : e . device != null && ( hasAttr e . device config . boot . initrd . clevis . devices ) && e . fsType == " z f s " && ( fsNeededForBoot e ) ) config . system . build . fileSystems ) ;
2020-04-24 23:36:52 +00:00
inInitrd = any ( fs : fs == " z f s " ) config . boot . initrd . supportedFilesystems ;
inSystem = any ( fs : fs == " z f s " ) config . boot . supportedFilesystems ;
autosnapPkg = pkgs . zfstools . override {
2021-02-05 17:12:51 +00:00
zfs = cfgZfs . package ;
2020-04-24 23:36:52 +00:00
} ;
zfsAutoSnap = " ${ autosnapPkg } / b i n / z f s - a u t o - s n a p s h o t " ;
datasetToPool = x : elemAt ( splitString " / " x ) 0 ;
fsToPool = fs : datasetToPool fs . device ;
zfsFilesystems = filter ( x : x . fsType == " z f s " ) config . system . build . fileSystems ;
allPools = unique ( ( map fsToPool zfsFilesystems ) ++ cfgZfs . extraPools ) ;
rootPools = unique ( map fsToPool ( filter fsNeededForBoot zfsFilesystems ) ) ;
dataPools = unique ( filter ( pool : ! ( elem pool rootPools ) ) allPools ) ;
snapshotNames = [ " f r e q u e n t " " h o u r l y " " d a i l y " " w e e k l y " " m o n t h l y " ] ;
# When importing ZFS pools, there's one difficulty: These scripts may run
# before the backing devices (physical HDDs, etc.) of the pool have been
# scanned and initialized.
#
# An attempted import with all devices missing will just fail, and can be
# retried, but an import where e.g. two out of three disks in a three-way
# mirror are missing, will succeed. This is a problem: When the missing disks
# are later discovered, they won't be automatically set online, rendering the
# pool redundancy-less (and far slower) until such time as the system reboots.
#
# The solution is the below. poolReady checks the status of an un-imported
# pool, to see if *every* device is available -- in which case the pool will be
# in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
#
# The import scripts then loop over this, waiting until the pool is ready or a
# sufficient amount of time has passed that we can assume it won't be. In the
# latter case it makes one last attempt at importing, allowing the system to
# (eventually) boot even with a degraded pool.
importLib = { zpoolCmd , awkCmd , cfgZfs }: ''
2022-05-18 14:49:53 +00:00
for o in $ ( cat /proc/cmdline ) ; do
case $ o in
zfs_force | zfs_force = 1 | zfs_force = y )
ZFS_FORCE = " - f "
; ;
esac
done
2020-04-24 23:36:52 +00:00
poolReady ( ) {
pool = " $ 1 "
2024-01-13 08:15:51 +00:00
state = " $ ( " $ { zpoolCmd } " i m p o r t - d " $ { cfgZfs . devNodes } " 2 > / d e v / n u l l | " $ { awkCmd } " " /pool : $ pool / { found = 1 } ; /state : / { if ( found == 1 ) { print \ $ 2 ; exit } } ; END { if ( found == 0 ) { print \ " M I S S I N G \" } } " ) "
2020-04-24 23:36:52 +00:00
if [ [ " $ s t a t e " = " O N L I N E " ] ] ; then
return 0
else
echo " P o o l $ p o o l i n s t a t e $ s t a t e , w a i t i n g "
return 1
fi
}
poolImported ( ) {
pool = " $ 1 "
" ${ zpoolCmd } " list " $ p o o l " > /dev/null 2 > /dev/null
}
poolImport ( ) {
pool = " $ 1 "
" ${ zpoolCmd } " import - d " ${ cfgZfs . devNodes } " - N $ ZFS_FORCE " $ p o o l "
}
'' ;
2022-05-18 14:49:53 +00:00
getPoolFilesystems = pool :
filter ( x : x . fsType == " z f s " && ( fsToPool x ) == pool ) config . system . build . fileSystems ;
getPoolMounts = prefix : pool :
let
2023-11-16 04:20:00 +00:00
poolFSes = getPoolFilesystems pool ;
2022-05-18 14:49:53 +00:00
# Remove the "/" suffix because even though most mountpoints
# won't have it, the "/" mountpoint will, and we can't have the
# trailing slash in "/sysroot/" in stage 1.
mountPoint = fs : escapeSystemdPath ( prefix + ( lib . removeSuffix " / " fs . mountPoint ) ) ;
2023-11-16 04:20:00 +00:00
hasUsr = lib . any ( fs : fs . mountPoint == " / u s r " ) poolFSes ;
2022-05-18 14:49:53 +00:00
in
2023-11-16 04:20:00 +00:00
map ( x : " ${ mountPoint x } . m o u n t " ) poolFSes
++ lib . optional hasUsr " s y s u s r - u s r . m o u n t " ;
2022-05-18 14:49:53 +00:00
2023-01-11 07:51:40 +00:00
getKeyLocations = pool : if isBool cfgZfs . requestEncryptionCredentials then {
hasKeys = cfgZfs . requestEncryptionCredentials ;
2024-01-25 14:12:00 +00:00
command = " ${ cfgZfs . package } / s b i n / z f s l i s t - r H o n a m e , k e y l o c a t i o n , k e y s t a t u s - t v o l u m e , f i l e s y s t e m ${ pool } " ;
2023-01-11 07:51:40 +00:00
} else let
keys = filter ( x : datasetToPool x == pool ) cfgZfs . requestEncryptionCredentials ;
in {
hasKeys = keys != [ ] ;
2024-01-25 14:12:00 +00:00
command = " ${ cfgZfs . package } / s b i n / z f s l i s t - H o n a m e , k e y l o c a t i o n , k e y s t a t u s - t v o l u m e , f i l e s y s t e m ${ toString keys } " ;
2023-01-11 07:51:40 +00:00
} ;
2022-05-18 14:49:53 +00:00
createImportService = { pool , systemd , force , prefix ? " " }:
nameValuePair " z f s - i m p o r t - ${ pool } " {
description = " I m p o r t Z F S p o o l \" ${ pool } \" " ;
2023-08-04 22:07:22 +00:00
# We wait for systemd-udev-settle to ensure devices are available,
# but don't *require* it, because mounts shouldn't be killed if it's stopped.
2022-05-18 14:49:53 +00:00
# In the future, hopefully someone will complete this:
# https://github.com/zfsonlinux/zfs/pull/4943
2024-01-02 11:29:13 +00:00
wants = [ " s y s t e m d - u d e v - s e t t l e . s e r v i c e " ] ++ optional ( config . boot . initrd . clevis . useTang ) " n e t w o r k - o n l i n e . t a r g e t " ;
2022-05-18 14:49:53 +00:00
after = [
" s y s t e m d - u d e v - s e t t l e . s e r v i c e "
" s y s t e m d - m o d u l e s - l o a d . s e r v i c e "
" s y s t e m d - a s k - p a s s w o r d - c o n s o l e . s e r v i c e "
2024-01-02 11:29:13 +00:00
] ++ optional ( config . boot . initrd . clevis . useTang ) " n e t w o r k - o n l i n e . t a r g e t " ;
2023-08-04 22:07:22 +00:00
requiredBy = getPoolMounts prefix pool ++ [ " z f s - i m p o r t . t a r g e t " ] ;
2024-01-02 11:29:13 +00:00
before = getPoolMounts prefix pool ++ [ " s h u t d o w n . t a r g e t " " z f s - i m p o r t . t a r g e t " ] ;
conflicts = [ " s h u t d o w n . t a r g e t " ] ;
2022-05-18 14:49:53 +00:00
unitConfig = {
DefaultDependencies = " n o " ;
} ;
serviceConfig = {
Type = " o n e s h o t " ;
RemainAfterExit = true ;
} ;
environment . ZFS_FORCE = optionalString force " - f " ;
2023-01-11 07:51:40 +00:00
script = let
keyLocations = getKeyLocations pool ;
in ( importLib {
2022-05-18 14:49:53 +00:00
# See comments at importLib definition.
zpoolCmd = " ${ cfgZfs . package } / s b i n / z p o o l " ;
awkCmd = " ${ pkgs . gawk } / b i n / a w k " ;
inherit cfgZfs ;
} ) + ''
2023-05-24 13:37:59 +00:00
if ! poolImported " ${ pool } " ; then
echo - n " i m p o r t i n g Z F S p o o l \" ${ pool } \" . . . "
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
for trial in ` seq 1 60 ` ; do
poolReady " ${ pool } " && poolImport " ${ pool } " && break
sleep 1
done
poolImported " ${ pool } " || poolImport " ${ pool } " # Try one last time, e.g. to import a degraded pool.
fi
2022-05-18 14:49:53 +00:00
if poolImported " ${ pool } " ; then
2024-01-02 11:29:13 +00:00
$ { optionalString config . boot . initrd . clevis . enable ( concatMapStringsSep " \n " ( elem : " c l e v i s d e c r y p t < / e t c / c l e v i s / ${ elem } . j w e | z f s l o a d - k e y ${ elem } | | t r u e " ) ( filter ( p : ( elemAt ( splitString " / " p ) 0 ) == pool ) clevisDatasets ) ) }
2023-01-11 07:51:40 +00:00
$ { optionalString keyLocations . hasKeys ''
$ { keyLocations . command } | while IFS = $ ' \ t' read ds kl ks ; do
2022-05-18 14:49:53 +00:00
{
if [ [ " $ k s " != unavailable ] ] ; then
continue
fi
case " $ k l " in
none )
; ;
prompt )
tries = 3
success = false
while [ [ $ success != true ] ] && [ [ $ tries - gt 0 ] ] ; do
2023-05-24 13:37:59 +00:00
$ { systemd } /bin/systemd-ask-password - - timeout = $ { toString cfgZfs . passwordTimeout } " E n t e r k e y f o r $ d s : " | $ { cfgZfs . package } /sbin/zfs load-key " $ d s " \
2022-05-18 14:49:53 +00:00
&& success = true \
|| tries = $ ( ( tries - 1 ) )
done
[ [ $ success = true ] ]
; ;
* )
$ { cfgZfs . package } /sbin/zfs load-key " $ d s "
; ;
esac
} < /dev/null # To protect while read ds kl in case anything reads stdin
done
'' }
echo " S u c c e s s f u l l y i m p o r t e d ${ pool } "
else
exit 1
fi
'' ;
} ;
2020-04-24 23:36:52 +00:00
zedConf = generators . toKeyValue {
mkKeyValue = generators . mkKeyValueDefault {
mkValueString = v :
if isInt v then toString v
else if isString v then " \" ${ v } \" "
else if true == v then " 1 "
else if false == v then " 0 "
else if isList v then " \" " + ( concatStringsSep " " v ) + " \" "
else err " t h i s v a l u e i s " ( toString v ) ;
} " = " ;
} cfgZED . settings ;
in
{
imports = [
( mkRemovedOptionModule [ " b o o t " " z f s " " e n a b l e L e g a c y C r y p t o " ] " T h e c o r r e s p o n d i n g p a c k a g e w a s r e m o v e d f r o m n i x p k g s . " )
] ;
###### interface
options = {
boot . zfs = {
2021-02-05 17:12:51 +00:00
package = mkOption {
type = types . package ;
2024-01-02 11:29:13 +00:00
default = if cfgZfs . enableUnstable then pkgs . zfsUnstable else pkgs . zfs ;
defaultText = literalExpression " i f z f s U n s t a b l e i s e n a b l e d t h e n p k g s . z f s U n s t a b l e e l s e p k g s . z f s " ;
description = lib . mdDoc " C o n f i g u r e d Z F S u s e r l a n d t o o l s p a c k a g e , u s e ` p k g s . z f s U n s t a b l e ` i f y o u w a n t t o t r a c k t h e l a t e s t s t a g i n g Z F S b r a n c h . " ;
} ;
modulePackage = mkOption {
internal = true ; # It is supposed to be selected automatically, but can be overridden by expert users.
default = selectModulePackage cfgZfs . package ;
type = types . package ;
description = lib . mdDoc " C o n f i g u r e d Z F S k e r n e l m o d u l e p a c k a g e . " ;
2021-02-05 17:12:51 +00:00
} ;
enabled = mkOption {
readOnly = true ;
type = types . bool ;
default = inInitrd || inSystem ;
2022-09-09 14:08:57 +00:00
defaultText = literalMD " ` t r u e ` i f Z F S f i l e s y s t e m s u p p o r t i s e n a b l e d " ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc " T r u e i f Z F S f i l e s y s t e m s u p p o r t i s e n a b l e d " ;
2021-02-05 17:12:51 +00:00
} ;
2020-04-24 23:36:52 +00:00
enableUnstable = mkOption {
type = types . bool ;
default = false ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Use the unstable zfs package . This might be an option , if the latest
kernel is not yet supported by a published release of ZFS . Enabling
this option will install a development version of ZFS on Linux . The
version will have already passed an extensive test suite , but it is
more likely to hit an undiscovered bug compared to running a released
version of ZFS on Linux .
'' ;
} ;
2022-10-30 15:09:59 +00:00
allowHibernation = mkOption {
type = types . bool ;
default = false ;
description = lib . mdDoc ''
Allow hibernation support , this may be a unsafe option depending on your
setup . Make sure to NOT use Swap on ZFS .
'' ;
} ;
2020-04-24 23:36:52 +00:00
extraPools = mkOption {
type = types . listOf types . str ;
default = [ ] ;
example = [ " t a n k " " d a t a " ] ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Name or GUID of extra ZFS pools that you wish to import during boot .
Usually this is not necessary . Instead , you should set the mountpoint property
2022-08-12 12:06:08 +00:00
of ZFS filesystems to ` legacy ` and add the ZFS filesystems to
NixOS's { option } ` fileSystems ` option , which makes NixOS automatically
2020-04-24 23:36:52 +00:00
import the associated pool .
However , in some cases ( e . g . if you have many filesystems ) it may be preferable
to exclusively use ZFS commands to manage filesystems . If so , since NixOS/systemd
will not be managing those filesystems , you will need to specify the ZFS pool here
so that NixOS automatically imports it on every boot .
'' ;
} ;
devNodes = mkOption {
type = types . path ;
default = " / d e v / d i s k / b y - i d " ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Name of directory from which to import ZFS devices .
This should be a path under /dev containing stable names for all devices needed , as
import may fail if device nodes are renamed concurrently with a device failing .
'' ;
} ;
forceImportRoot = mkOption {
type = types . bool ;
default = true ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Forcibly import the ZFS root pool ( s ) during early boot .
This is enabled by default for backwards compatibility purposes , but it is highly
recommended to disable this option , as it bypasses some of the safeguards ZFS uses
to protect your ZFS pools .
2022-08-12 12:06:08 +00:00
If you set this option to ` false ` and NixOS subsequently fails to
2020-04-24 23:36:52 +00:00
boot because it cannot import the root pool , you should boot with the
2022-08-12 12:06:08 +00:00
` zfs_force = 1 ` option as a kernel parameter ( e . g . by manually
2020-04-24 23:36:52 +00:00
editing the kernel params in grub during boot ) . You should only need to do this
once .
'' ;
} ;
forceImportAll = mkOption {
type = types . bool ;
2020-11-12 09:05:59 +00:00
default = false ;
2022-09-09 14:08:57 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Forcibly import all ZFS pool ( s ) .
2022-09-09 14:08:57 +00:00
If you set this option to ` false ` and NixOS subsequently fails to
2020-04-24 23:36:52 +00:00
import your non-root ZFS pool ( s ) , you should manually import each pool with
2022-09-09 14:08:57 +00:00
" z p o o l i m p o r t - f \< p o o l - n a m e \> " , and then reboot . You should only need to do
2020-04-24 23:36:52 +00:00
this once .
'' ;
} ;
requestEncryptionCredentials = mkOption {
2020-08-20 17:08:02 +00:00
type = types . either types . bool ( types . listOf types . str ) ;
2020-04-24 23:36:52 +00:00
default = true ;
2020-08-20 17:08:02 +00:00
example = [ " t a n k " " d a t a " ] ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-08-20 17:08:02 +00:00
If true on import encryption keys or passwords for all encrypted datasets
are requested . To only decrypt selected datasets supply a list of dataset
names instead . For root pools the encryption key can be supplied via both
an interactive prompt ( keylocation = prompt ) and from a file ( keylocation = file:// ) .
2020-04-24 23:36:52 +00:00
'' ;
} ;
2023-05-24 13:37:59 +00:00
passwordTimeout = mkOption {
type = types . int ;
default = 0 ;
description = lib . mdDoc ''
Timeout in seconds to wait for password entry for decrypt at boot .
Defaults to 0 , which waits forever .
'' ;
} ;
2023-07-15 17:15:38 +00:00
removeLinuxDRM = lib . mkOption {
type = types . bool ;
default = false ;
description = lib . mdDoc ''
Linux 6 .2 dropped some kernel symbols required on aarch64 required by zfs .
Enabling this option will bring them back to allow this kernel version .
Note that in some jurisdictions this may be illegal as it might be considered
removing copyright protection from the code .
See https://www.ifross.org/?q=en/artikel/ongoing-dispute-over-value-exportsymbolgpl-function for further information .
If configure your kernel package with ` zfs . latestCompatibleLinuxPackages ` , you will need to also pass removeLinuxDRM to that package like this :
` ` `
{ pkgs , . . . }: {
boot . kernelPackages = ( pkgs . zfs . override {
removeLinuxDRM = pkgs . hostPlatform . isAarch64 ;
} ) . latestCompatibleLinuxPackages ;
boot . zfs . removeLinuxDRM = true ;
}
` ` `
'' ;
} ;
2020-04-24 23:36:52 +00:00
} ;
services . zfs . autoSnapshot = {
enable = mkOption {
default = false ;
type = types . bool ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Enable the ( OpenSolaris-compatible ) ZFS auto-snapshotting service .
2022-08-12 12:06:08 +00:00
Note that you must set the ` com.sun:auto-snapshot `
property to ` true ` on all datasets which you wish
2020-04-24 23:36:52 +00:00
to auto-snapshot .
You can override a child dataset to use , or not use auto-snapshotting
by setting its flag with the given interval :
2022-08-12 12:06:08 +00:00
` zfs set com.sun:auto-snapshot:weekly=false DATASET `
2020-04-24 23:36:52 +00:00
'' ;
} ;
flags = mkOption {
default = " - k - p " ;
example = " - k - p - - u t c " ;
type = types . str ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Flags to pass to the zfs-auto-snapshot command .
2022-08-12 12:06:08 +00:00
Run ` zfs-auto-snapshot ` ( without any arguments ) to
2020-04-24 23:36:52 +00:00
see available flags .
If it's not too inconvenient for snapshots to have timestamps in UTC ,
2022-08-12 12:06:08 +00:00
it is suggested that you append ` - - utc ` to the list
2020-04-24 23:36:52 +00:00
of default options ( see example ) .
Otherwise , snapshot names can cause name conflicts or apparent time
reversals due to daylight savings , timezone or other date/time changes .
'' ;
} ;
frequent = mkOption {
default = 4 ;
type = types . int ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Number of frequent ( 1 5 - minute ) auto-snapshots that you wish to keep .
'' ;
} ;
hourly = mkOption {
default = 24 ;
type = types . int ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Number of hourly auto-snapshots that you wish to keep .
'' ;
} ;
daily = mkOption {
default = 7 ;
type = types . int ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Number of daily auto-snapshots that you wish to keep .
'' ;
} ;
weekly = mkOption {
default = 4 ;
type = types . int ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Number of weekly auto-snapshots that you wish to keep .
'' ;
} ;
monthly = mkOption {
default = 12 ;
type = types . int ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Number of monthly auto-snapshots that you wish to keep .
'' ;
} ;
} ;
services . zfs . trim = {
enable = mkOption {
2022-08-12 12:06:08 +00:00
description = lib . mdDoc " W h e t h e r t o e n a b l e p e r i o d i c T R I M o n a l l Z F S p o o l s . " ;
2020-04-24 23:36:52 +00:00
default = true ;
example = false ;
type = types . bool ;
} ;
interval = mkOption {
default = " w e e k l y " ;
type = types . str ;
example = " d a i l y " ;
2022-08-21 13:32:41 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
How often we run trim . For most desktop and server systems
a sufficient trimming frequency is once a week .
The format is described in
2022-08-21 13:32:41 +00:00
{ manpage } ` systemd . time ( 7 ) ` .
2020-04-24 23:36:52 +00:00
'' ;
} ;
} ;
services . zfs . autoScrub = {
2022-09-09 14:08:57 +00:00
enable = mkEnableOption ( lib . mdDoc " p e r i o d i c s c r u b b i n g o f Z F S p o o l s " ) ;
2020-04-24 23:36:52 +00:00
interval = mkOption {
default = " S u n , 0 2 : 0 0 " ;
type = types . str ;
example = " d a i l y " ;
2022-08-21 13:32:41 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
Systemd calendar expression when to scrub ZFS pools . See
2022-08-21 13:32:41 +00:00
{ manpage } ` systemd . time ( 7 ) ` .
2020-04-24 23:36:52 +00:00
'' ;
} ;
pools = mkOption {
default = [ ] ;
type = types . listOf types . str ;
example = [ " t a n k " ] ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2020-04-24 23:36:52 +00:00
List of ZFS pools to periodically scrub . If empty , all pools
will be scrubbed .
'' ;
} ;
} ;
2021-08-25 08:27:29 +00:00
services . zfs . expandOnBoot = mkOption {
type = types . either ( types . enum [ " d i s a b l e d " " a l l " ] ) ( types . listOf types . str ) ;
default = " d i s a b l e d " ;
example = [ " t a n k " " d o z e r " ] ;
2022-08-21 13:32:41 +00:00
description = lib . mdDoc ''
2021-08-25 08:27:29 +00:00
After importing , expand each device in the specified pools .
Set the value to the plain string " a l l " to expand all pools on boot :
services . zfs . expandOnBoot = " a l l " ;
or set the value to a list of pools to expand the disks of specific pools :
services . zfs . expandOnBoot = [ " t a n k " " d o z e r " ] ;
'' ;
} ;
2021-02-13 14:23:35 +00:00
services . zfs . zed = {
2024-01-02 11:29:13 +00:00
enableMail = mkOption {
type = types . bool ;
default = config . services . mail . sendmailSetuidWrapper != null ;
defaultText = literalExpression ''
config . services . mail . sendmailSetuidWrapper != null
'' ;
description = mdDoc ''
Whether to enable ZED's ability to send emails .
'' ;
2021-02-13 14:23:35 +00:00
} ;
2020-04-24 23:36:52 +00:00
2021-02-13 14:23:35 +00:00
settings = mkOption {
type = with types ; attrsOf ( oneOf [ str int bool ( listOf str ) ] ) ;
2021-10-06 13:57:05 +00:00
example = literalExpression ''
2021-02-13 14:23:35 +00:00
{
ZED_DEBUG_LOG = " / t m p / z e d . d e b u g . l o g " ;
2020-04-24 23:36:52 +00:00
2021-02-13 14:23:35 +00:00
ZED_EMAIL_ADDR = [ " r o o t " ] ;
ZED_EMAIL_PROG = " m a i l " ;
ZED_EMAIL_OPTS = " - s ' @ S U B J E C T @ ' @ A D D R E S S @ " ;
2020-04-24 23:36:52 +00:00
2021-02-13 14:23:35 +00:00
ZED_NOTIFY_INTERVAL_SECS = 3600 ;
ZED_NOTIFY_VERBOSE = false ;
ZED_USE_ENCLOSURE_LEDS = true ;
ZED_SCRUB_AFTER_RESILVER = false ;
}
'' ;
2022-08-12 12:06:08 +00:00
description = lib . mdDoc ''
2021-02-13 14:23:35 +00:00
ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
See
2022-08-12 12:06:08 +00:00
{ manpage } ` zed ( 8 ) `
2021-02-13 14:23:35 +00:00
for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
'' ;
} ;
2020-04-24 23:36:52 +00:00
} ;
} ;
###### implementation
config = mkMerge [
2021-02-05 17:12:51 +00:00
( mkIf cfgZfs . enabled {
2020-04-24 23:36:52 +00:00
assertions = [
2021-02-13 14:23:35 +00:00
{
2024-01-02 11:29:13 +00:00
assertion = cfgZfs . modulePackage . version == cfgZfs . package . version ;
message = " T h e k e r n e l m o d u l e a n d t h e u s e r s p a c e t o o l i n g v e r s i o n s a r e n o t m a t c h i n g , t h i s i s a n u n s u p p o r t e d u s e c a s e . " ;
2021-02-13 14:23:35 +00:00
}
2020-04-24 23:36:52 +00:00
{
assertion = config . networking . hostId != null ;
message = " Z F S r e q u i r e s n e t w o r k i n g . h o s t I d t o b e s e t " ;
}
{
assertion = ! cfgZfs . forceImportAll || cfgZfs . forceImportRoot ;
message = " I f y o u e n a b l e b o o t . z f s . f o r c e I m p o r t A l l , y o u m u s t a l s o e n a b l e b o o t . z f s . f o r c e I m p o r t R o o t " ;
}
2022-12-28 21:21:41 +00:00
{
assertion = cfgZfs . allowHibernation -> ! cfgZfs . forceImportRoot && ! cfgZfs . forceImportAll ;
message = " b o o t . z f s . a l l o w H i b e r n a t i o n w h i l e f o r c e i m p o r t i n g i s e n a b l e d w i l l c a u s e d a t a c o r r u p t i o n " ;
}
2023-07-15 17:15:38 +00:00
{
assertion = ! ( elem " " allPools ) ;
message = ''
Automatic pool detection found an empty pool name , which can't be used .
Hint : for ` fileSystems ` entries with ` fsType = zfs ` , the ` device ` attribute
should be a zfs dataset name , like ` device = " p o o l / d a t a / s e t " ` .
This error can be triggered by using an absolute path , such as ` " / d e v / d i s k / . . . " ` .
'' ;
}
2020-04-24 23:36:52 +00:00
] ;
boot = {
kernelModules = [ " z f s " ] ;
2022-10-30 15:09:59 +00:00
# https://github.com/openzfs/zfs/issues/260
# https://github.com/openzfs/zfs/issues/12842
# https://github.com/NixOS/nixpkgs/issues/106093
kernelParams = lib . optionals ( ! config . boot . zfs . allowHibernation ) [ " n o h i b e r n a t e " ] ;
2021-02-05 17:12:51 +00:00
2024-01-02 11:29:13 +00:00
extraModulePackages = [
( cfgZfs . modulePackage . override { inherit ( cfgZfs ) removeLinuxDRM ; } )
2021-02-05 17:12:51 +00:00
] ;
2020-04-24 23:36:52 +00:00
} ;
boot . initrd = mkIf inInitrd {
2024-01-02 11:29:13 +00:00
# spl has been removed in ≥ 2.2.0.
kernelModules = [ " z f s " ] ++ lib . optional ( lib . versionOlder " 2 . 2 . 0 " version ) " s p l " ;
2020-04-24 23:36:52 +00:00
extraUtilsCommands =
2023-11-16 04:20:00 +00:00
mkIf ( ! config . boot . initrd . systemd . enable ) ''
2021-02-05 17:12:51 +00:00
copy_bin_and_libs $ { cfgZfs . package } /sbin/zfs
copy_bin_and_libs $ { cfgZfs . package } /sbin/zdb
copy_bin_and_libs $ { cfgZfs . package } /sbin/zpool
2020-04-24 23:36:52 +00:00
'' ;
2023-11-16 04:20:00 +00:00
extraUtilsCommandsTest =
mkIf ( ! config . boot . initrd . systemd . enable ) ''
2020-04-24 23:36:52 +00:00
$ out/bin/zfs - - help > /dev/null 2 > & 1
$ out/bin/zpool - - help > /dev/null 2 > & 1
'' ;
2023-11-16 04:20:00 +00:00
postDeviceCommands = mkIf ( ! config . boot . initrd . systemd . enable ) ( concatStringsSep " \n " ( [ ''
2020-04-24 23:36:52 +00:00
ZFS_FORCE = " ${ optionalString cfgZfs . forceImportRoot " - f " } "
'' ] + + [ ( i m p o r t L i b {
# See comments at importLib definition.
zpoolCmd = " z p o o l " ;
awkCmd = " a w k " ;
inherit cfgZfs ;
} ) ] ++ ( map ( pool : ''
echo - n " i m p o r t i n g r o o t Z F S p o o l \" ${ pool } \" . . . "
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
if ! poolImported " ${ pool } " ; then
for trial in ` seq 1 60 ` ; do
poolReady " ${ pool } " > /dev/null && msg = " $ ( p o o l I m p o r t " $ { pool } " 2 > & 1 ) " && break
sleep 1
echo - n .
done
echo
if [ [ - n " $ m s g " ] ] ; then
echo " $ m s g " ;
fi
poolImported " ${ pool } " || poolImport " ${ pool } " # Try one last time, e.g. to import a degraded pool.
fi
2024-01-02 11:29:13 +00:00
$ { optionalString config . boot . initrd . clevis . enable ( concatMapStringsSep " \n " ( elem : " c l e v i s d e c r y p t < / e t c / c l e v i s / ${ elem } . j w e | z f s l o a d - k e y ${ elem } " ) ( filter ( p : ( elemAt ( splitString " / " p ) 0 ) == pool ) clevisDatasets ) ) }
2020-08-20 17:08:02 +00:00
$ { if isBool cfgZfs . requestEncryptionCredentials
then optionalString cfgZfs . requestEncryptionCredentials ''
zfs load-key - a
''
else concatMapStrings ( fs : ''
2022-07-14 12:49:19 +00:00
zfs load-key - - $ { escapeShellArg fs }
2023-01-11 07:51:40 +00:00
'' ) ( f i l t e r ( x : d a t a s e t T o P o o l x = = p o o l ) c f g Z f s . r e q u e s t E n c r y p t i o n C r e d e n t i a l s ) }
2023-11-16 04:20:00 +00:00
'' ) r o o t P o o l s ) ) ) ;
2022-05-18 14:49:53 +00:00
# Systemd in stage 1
2023-11-16 04:20:00 +00:00
systemd = mkIf config . boot . initrd . systemd . enable {
2022-05-18 14:49:53 +00:00
packages = [ cfgZfs . package ] ;
services = listToAttrs ( map ( pool : createImportService {
inherit pool ;
systemd = config . boot . initrd . systemd . package ;
force = cfgZfs . forceImportRoot ;
prefix = " / s y s r o o t " ;
} ) rootPools ) ;
2023-08-04 22:07:22 +00:00
targets . zfs-import . wantedBy = [ " z f s . t a r g e t " ] ;
targets . zfs . wantedBy = [ " i n i t r d . t a r g e t " ] ;
2022-05-18 14:49:53 +00:00
extraBin = {
2023-11-16 04:20:00 +00:00
zpool = " ${ cfgZfs . package } / s b i n / z p o o l " ;
zfs = " ${ cfgZfs . package } / s b i n / z f s " ;
2022-05-18 14:49:53 +00:00
awk = " ${ pkgs . gawk } / b i n / a w k " ;
} ;
} ;
2020-04-24 23:36:52 +00:00
} ;
2022-04-27 09:35:20 +00:00
systemd . shutdownRamfs . contents . " / e t c / s y s t e m d / s y s t e m - s h u t d o w n / z p o o l " . source = pkgs . writeShellScript " z p o o l - s y n c - s h u t d o w n " ''
exec $ { cfgZfs . package } /bin/zpool sync
'' ;
systemd . shutdownRamfs . storePaths = [ " ${ cfgZfs . package } / b i n / z p o o l " ] ;
2021-04-18 02:13:31 +00:00
# TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
boot . loader . grub = mkIf ( inInitrd || inSystem ) {
2020-04-24 23:36:52 +00:00
zfsSupport = true ;
2024-01-02 11:29:13 +00:00
zfsPackage = cfgZfs . package ;
2020-04-24 23:36:52 +00:00
} ;
services . zfs . zed . settings = {
2024-01-02 11:29:13 +00:00
ZED_EMAIL_PROG = mkIf cfgZED . enableMail ( mkDefault (
config . security . wrapperDir + " / " +
config . services . mail . sendmailSetuidWrapper . program
) ) ;
# subject in header for sendmail
ZED_EMAIL_OPTS = mkIf cfgZED . enableMail ( mkDefault " @ A D D R E S S @ " ) ;
2020-05-29 06:06:01 +00:00
PATH = lib . makeBinPath [
2021-02-05 17:12:51 +00:00
cfgZfs . package
2020-05-29 06:06:01 +00:00
pkgs . coreutils
pkgs . curl
pkgs . gawk
pkgs . gnugrep
pkgs . gnused
pkgs . nettools
2020-11-24 20:58:05 +00:00
pkgs . util-linux
2020-05-29 06:06:01 +00:00
] ;
2020-04-24 23:36:52 +00:00
} ;
2023-10-09 19:29:22 +00:00
# ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
services . udev . extraRules = ''
ACTION == " a d d | c h a n g e " , KERNEL == " s d [ a - z ] * [ 0 - 9 ] * | m m c b l k [ 0 - 9 ] * p [ 0 - 9 ] * | n v m e [ 0 - 9 ] * n [ 0 - 9 ] * p [ 0 - 9 ] * " , ENV { ID_FS_TYPE } == " z f s _ m e m b e r " , ATTR { ../queue/scheduler } = " n o n e "
'' ;
2020-04-24 23:36:52 +00:00
environment . etc = genAttrs
( map
( file : " z f s / z e d . d / ${ file } " )
[
" a l l - s y s l o g . s h "
" p o o l _ i m p o r t - l e d . s h "
" r e s i l v e r _ f i n i s h - s t a r t - s c r u b . s h "
" s t a t e c h a n g e - l e d . s h "
" v d e v _ a t t a c h - l e d . s h "
" z e d - f u n c t i o n s . s h "
" d a t a - n o t i f y . s h "
" r e s i l v e r _ f i n i s h - n o t i f y . s h "
" s c r u b _ f i n i s h - n o t i f y . s h "
" s t a t e c h a n g e - n o t i f y . s h "
" v d e v _ c l e a r - l e d . s h "
]
)
2021-02-05 17:12:51 +00:00
( file : { source = " ${ cfgZfs . package } / e t c / ${ file } " ; } )
2020-04-24 23:36:52 +00:00
// {
" z f s / z e d . d / z e d . r c " . text = zedConf ;
2021-02-05 17:12:51 +00:00
" z f s / z p o o l . d " . source = " ${ cfgZfs . package } / e t c / z f s / z p o o l . d / " ;
2020-04-24 23:36:52 +00:00
} ;
2021-02-05 17:12:51 +00:00
system . fsPackages = [ cfgZfs . package ] ; # XXX: needed? zfs doesn't have (need) a fsck
environment . systemPackages = [ cfgZfs . package ]
2020-04-24 23:36:52 +00:00
++ optional cfgSnapshots . enable autosnapPkg ; # so the user can run the command to see flags
2021-02-05 17:12:51 +00:00
services . udev . packages = [ cfgZfs . package ] ; # to hook zvol naming, etc.
systemd . packages = [ cfgZfs . package ] ;
2020-04-24 23:36:52 +00:00
2023-07-15 17:15:38 +00:00
# Export kernel_neon_* symbols again.
# This change is necessary until ZFS figures out a solution
# with upstream or in their build system to fill the gap for
# this symbol.
# In the meantime, we restore what was once a working piece of code
# in the kernel.
boot . kernelPatches = lib . optional ( cfgZfs . removeLinuxDRM && pkgs . stdenv . hostPlatform . system == " a a r c h 6 4 - l i n u x " ) {
name = " e x p o r t - n e o n - s y m b o l s - a s - g p l " ;
patch = pkgs . fetchpatch {
url = " h t t p s : / / g i t h u b . c o m / t o r v a l d s / l i n u x / c o m m i t / a a e c a 9 8 4 5 6 4 3 1 a 8 d 9 3 8 2 e c f 4 8 a c 4 8 4 3 e 2 5 2 c 0 7 b 3 . p a t c h " ;
hash = " s h a 2 5 6 - L 2 g 4 G 1 t l W P I i / Q R c k M u H D c d W B c K p O b S W S R T v b H R I w I k = " ;
revert = true ;
} ;
} ;
2020-04-24 23:36:52 +00:00
systemd . services = let
2022-05-18 14:49:53 +00:00
createImportService' = pool : createImportService {
inherit pool ;
systemd = config . systemd . package ;
force = cfgZfs . forceImportAll ;
} ;
2020-04-24 23:36:52 +00:00
# This forces a sync of any ZFS pools prior to poweroff, even if they're set
# to sync=disabled.
createSyncService = pool :
nameValuePair " z f s - s y n c - ${ pool } " {
description = " S y n c Z F S p o o l \" ${ pool } \" " ;
wantedBy = [ " s h u t d o w n . t a r g e t " ] ;
unitConfig = {
DefaultDependencies = false ;
} ;
serviceConfig = {
Type = " o n e s h o t " ;
RemainAfterExit = true ;
} ;
script = ''
2021-02-05 17:12:51 +00:00
$ { cfgZfs . package } /sbin/zfs set nixos:shutdown-time= " $ ( d a t e ) " " ${ pool } "
2020-04-24 23:36:52 +00:00
'' ;
} ;
2021-08-25 08:27:29 +00:00
2020-04-24 23:36:52 +00:00
createZfsService = serv :
nameValuePair serv {
after = [ " s y s t e m d - m o d u l e s - l o a d . s e r v i c e " ] ;
wantedBy = [ " z f s . t a r g e t " ] ;
} ;
2022-05-18 14:49:53 +00:00
in listToAttrs ( map createImportService' dataPools ++
2020-04-24 23:36:52 +00:00
map createSyncService allPools ++
map createZfsService [ " z f s - m o u n t " " z f s - s h a r e " " z f s - z e d " ] ) ;
2023-08-04 22:07:22 +00:00
systemd . targets . zfs-import . wantedBy = [ " z f s . t a r g e t " ] ;
2020-04-24 23:36:52 +00:00
systemd . targets . zfs . wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
} )
2021-08-25 08:27:29 +00:00
( mkIf ( cfgZfs . enabled && cfgExpandOnBoot != " d i s a b l e d " ) {
systemd . services . " z p o o l - e x p a n d @ " = {
description = " E x p a n d Z F S p o o l s " ;
after = [ " z f s . t a r g e t " ] ;
serviceConfig = {
Type = " o n e s h o t " ;
RemainAfterExit = true ;
} ;
scriptArgs = " % i " ;
2022-04-15 01:41:22 +00:00
path = [ cfgZfs . package ] ;
script = ''
2021-08-25 08:27:29 +00:00
pool = $ 1
echo " E x p a n d i n g a l l d e v i c e s f o r $ p o o l . "
2022-04-15 01:41:22 +00:00
$ { pkgs . zpool-auto-expand-partitions } /bin/zpool_part_disks - - automatically-grow " $ p o o l "
2021-08-25 08:27:29 +00:00
'' ;
} ;
systemd . services . " z p o o l - e x p a n d - p o o l s " =
let
# Create a string, to be interpolated in a bash script
# which enumerates all of the pools to expand.
# If the `pools` option is `true`, we want to dynamically
# expand every pool. Otherwise we want to enumerate
# just the specifically provided list of pools.
poolListProvider = if cfgExpandOnBoot == " a l l "
2022-07-14 12:49:19 +00:00
then " $ ( z p o o l l i s t - H - o n a m e ) "
2021-08-25 08:27:29 +00:00
else lib . escapeShellArgs cfgExpandOnBoot ;
in
{
description = " E x p a n d s p e c i f i e d Z F S p o o l s " ;
wantedBy = [ " d e f a u l t . t a r g e t " ] ;
after = [ " z f s . t a r g e t " ] ;
serviceConfig = {
Type = " o n e s h o t " ;
RemainAfterExit = true ;
} ;
2022-08-12 12:06:08 +00:00
path = lib . optionals ( cfgExpandOnBoot == " a l l " ) [ cfgZfs . package ] ;
2021-08-25 08:27:29 +00:00
script = ''
for pool in $ { poolListProvider } ; do
systemctl start - - no-block " z p o o l - e x p a n d @ $ p o o l "
done
'' ;
} ;
} )
2021-02-05 17:12:51 +00:00
( mkIf ( cfgZfs . enabled && cfgSnapshots . enable ) {
2020-04-24 23:36:52 +00:00
systemd . services = let
descr = name : if name == " f r e q u e n t " then " 1 5 m i n s "
else if name == " h o u r l y " then " h o u r "
else if name == " d a i l y " then " d a y "
else if name == " w e e k l y " then " w e e k "
else if name == " m o n t h l y " then " m o n t h "
else throw " u n k n o w n s n a p s h o t n a m e " ;
numSnapshots = name : builtins . getAttr name cfgSnapshots ;
in builtins . listToAttrs ( map ( snapName :
{
name = " z f s - s n a p s h o t - ${ snapName } " ;
value = {
description = " Z F S a u t o - s n a p s h o t t i n g e v e r y ${ descr snapName } " ;
after = [ " z f s - i m p o r t . t a r g e t " ] ;
serviceConfig = {
Type = " o n e s h o t " ;
ExecStart = " ${ zfsAutoSnap } ${ cfgSnapFlags } ${ snapName } ${ toString ( numSnapshots snapName ) } " ;
} ;
restartIfChanged = false ;
} ;
} ) snapshotNames ) ;
systemd . timers = let
timer = name : if name == " f r e q u e n t " then " * : 0 , 1 5 , 3 0 , 4 5 " else name ;
in builtins . listToAttrs ( map ( snapName :
{
name = " z f s - s n a p s h o t - ${ snapName } " ;
value = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnCalendar = timer snapName ;
Persistent = " y e s " ;
} ;
} ;
} ) snapshotNames ) ;
} )
2021-02-05 17:12:51 +00:00
( mkIf ( cfgZfs . enabled && cfgScrub . enable ) {
2020-04-24 23:36:52 +00:00
systemd . services . zfs-scrub = {
description = " Z F S p o o l s s c r u b b i n g " ;
after = [ " z f s - i m p o r t . t a r g e t " ] ;
serviceConfig = {
2022-08-12 12:06:08 +00:00
Type = " s i m p l e " ;
2020-04-24 23:36:52 +00:00
} ;
script = ''
2022-08-12 12:06:08 +00:00
$ { cfgZfs . package } /bin/zpool scrub - w $ {
2020-04-24 23:36:52 +00:00
if cfgScrub . pools != [ ] then
( concatStringsSep " " cfgScrub . pools )
else
2021-02-05 17:12:51 +00:00
" $ ( ${ cfgZfs . package } / b i n / z p o o l l i s t - H - o n a m e ) "
2020-04-24 23:36:52 +00:00
}
'' ;
} ;
systemd . timers . zfs-scrub = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
after = [ " m u l t i - u s e r . t a r g e t " ] ; # Apparently scrubbing before boot is complete hangs the system? #53583
timerConfig = {
OnCalendar = cfgScrub . interval ;
Persistent = " y e s " ;
} ;
} ;
} )
2021-02-05 17:12:51 +00:00
( mkIf ( cfgZfs . enabled && cfgTrim . enable ) {
2020-04-24 23:36:52 +00:00
systemd . services . zpool-trim = {
description = " Z F S p o o l s t r i m " ;
after = [ " z f s - i m p o r t . t a r g e t " ] ;
2021-02-05 17:12:51 +00:00
path = [ cfgZfs . package ] ;
2020-04-24 23:36:52 +00:00
startAt = cfgTrim . interval ;
# By default we ignore errors returned by the trim command, in case:
# - HDDs are mixed with SSDs
# - There is a SSDs in a pool that is currently trimmed.
# - There are only HDDs and we would set the system in a degraded state
2021-02-05 17:12:51 +00:00
serviceConfig . ExecStart = " ${ pkgs . runtimeShell } - c ' f o r p o o l i n $ ( z p o o l l i s t - H - o n a m e ) ; d o z p o o l t r i m $ p o o l ; d o n e | | t r u e ' " ;
2020-04-24 23:36:52 +00:00
} ;
2021-01-17 00:15:33 +00:00
systemd . timers . zpool-trim . timerConfig . Persistent = " y e s " ;
2020-04-24 23:36:52 +00:00
} )
] ;
}