1 { config, lib, pkgs, utils, ... }:
7 cfgZfs = config.boot.zfs;
8 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
9 cfgSnapshots = config.services.zfs.autoSnapshot;
10 cfgSnapFlags = cfgSnapshots.flags;
11 cfgScrub = config.services.zfs.autoScrub;
12 cfgTrim = config.services.zfs.trim;
13 cfgZED = config.services.zfs.zed;
15 selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
16 clevisDatasets = lib.attrNames (lib.filterAttrs (device: _: lib.any (e: e.fsType == "zfs" && (utils.fsNeededForBoot e) && (e.device == device || lib.hasPrefix "${device}/" e.device)) config.system.build.fileSystems) config.boot.initrd.clevis.devices);
18 inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
19 inSystem = config.boot.supportedFilesystems.zfs or false;
21 autosnapPkg = pkgs.zfstools.override {
25 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
27 datasetToPool = x: lib.elemAt (lib.splitString "/" x) 0;
29 fsToPool = fs: datasetToPool fs.device;
31 zfsFilesystems = lib.filter (x: x.fsType == "zfs") config.system.build.fileSystems;
33 allPools = lib.unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
35 rootPools = lib.unique (map fsToPool (lib.filter utils.fsNeededForBoot zfsFilesystems));
37 dataPools = lib.unique (lib.filter (pool: !(lib.elem pool rootPools)) allPools);
39 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
41 # When importing ZFS pools, there's one difficulty: These scripts may run
42 # before the backing devices (physical HDDs, etc.) of the pool have been
43 # scanned and initialized.
45 # An attempted import with all devices missing will just fail, and can be
46 # retried, but an import where e.g. two out of three disks in a three-way
47 # mirror are missing, will succeed. This is a problem: When the missing disks
48 # are later discovered, they won't be automatically set online, rendering the
49 # pool redundancy-less (and far slower) until such time as the system reboots.
51 # The solution is the below. poolReady checks the status of an un-imported
52 # pool, to see if *every* device is available -- in which case the pool will be
53 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
55 # The import scripts then loop over this, waiting until the pool is ready or a
56 # sufficient amount of time has passed that we can assume it won't be. In the
57 # latter case it makes one last attempt at importing, allowing the system to
58 # (eventually) boot even with a degraded pool.
59 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
60 for o in $(cat /proc/cmdline); do
62 zfs_force|zfs_force=1|zfs_force=y)
69 state="$("${zpoolCmd}" import -d "${cfgZfs.devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
70 if [[ "$state" = "ONLINE" ]]; then
73 echo "Pool $pool in state $state, waiting"
79 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
83 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
87 getPoolFilesystems = pool:
88 lib.filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
90 getPoolMounts = prefix: pool:
92 poolFSes = getPoolFilesystems pool;
94 # Remove the "/" suffix because even though most mountpoints
95 # won't have it, the "/" mountpoint will, and we can't have the
96 # trailing slash in "/sysroot/" in stage 1.
97 mountPoint = fs: utils.escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
99 hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
101 map (x: "${mountPoint x}.mount") poolFSes
102 ++ lib.optional hasUsr "sysusr-usr.mount";
104 getKeyLocations = pool: if lib.isBool cfgZfs.requestEncryptionCredentials then {
105 hasKeys = cfgZfs.requestEncryptionCredentials;
106 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
108 keys = lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
110 hasKeys = keys != [];
111 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
114 createImportService = { pool, systemd, force, prefix ? "" }:
115 lib.nameValuePair "zfs-import-${pool}" {
116 description = "Import ZFS pool \"${pool}\"";
117 # We wait for systemd-udev-settle to ensure devices are available,
118 # but don't *require* it, because mounts shouldn't be killed if it's stopped.
119 # In the future, hopefully someone will complete this:
120 # https://github.com/zfsonlinux/zfs/pull/4943
121 wants = [ "systemd-udev-settle.service" ] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
123 "systemd-udev-settle.service"
124 "systemd-modules-load.service"
125 "systemd-ask-password-console.service"
126 ] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
127 requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
128 before = getPoolMounts prefix pool ++ [ "shutdown.target" "zfs-import.target" ];
129 conflicts = [ "shutdown.target" ];
131 DefaultDependencies = "no";
135 RemainAfterExit = true;
137 environment.ZFS_FORCE = lib.optionalString force "-f";
139 keyLocations = getKeyLocations pool;
141 # See comments at importLib definition.
142 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
143 awkCmd = "${pkgs.gawk}/bin/awk";
146 if ! poolImported "${pool}"; then
147 echo -n "importing ZFS pool \"${pool}\"..."
148 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
149 for trial in `seq 1 60`; do
150 poolReady "${pool}" && poolImport "${pool}" && break
153 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
155 if poolImported "${pool}"; then
156 ${lib.optionalString config.boot.initrd.clevis.enable (lib.concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets))}
159 ${lib.optionalString keyLocations.hasKeys ''
160 ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
162 if [[ "$ks" != unavailable ]]; then
171 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
172 ${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
174 || tries=$((tries - 1))
176 [[ $success = true ]]
179 ${cfgZfs.package}/sbin/zfs load-key "$ds"
182 } < /dev/null # To protect while read ds kl in case anything reads stdin
185 echo "Successfully imported ${pool}"
192 zedConf = lib.generators.toKeyValue {
193 mkKeyValue = lib.generators.mkKeyValueDefault {
195 if lib.isInt v then toString v
196 else if lib.isString v then "\"${v}\""
197 else if true == v then "1"
198 else if false == v then "0"
199 else if lib.isList v then "\"" + (lib.concatStringsSep " " v) + "\""
200 else lib.err "this value is" (toString v);
208 (lib.mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
209 (lib.mkRemovedOptionModule [ "boot" "zfs" "enableUnstable" ] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
216 package = lib.mkOption {
217 type = lib.types.package;
219 defaultText = lib.literalExpression "pkgs.zfs";
220 description = "Configured ZFS userland tools package, use `pkgs.zfs_unstable` if you want to track the latest staging ZFS branch.";
223 modulePackage = lib.mkOption {
224 internal = true; # It is supposed to be selected automatically, but can be overridden by expert users.
225 default = selectModulePackage cfgZfs.package;
226 type = lib.types.package;
227 description = "Configured ZFS kernel module package.";
230 enabled = lib.mkOption {
232 type = lib.types.bool;
233 default = inInitrd || inSystem;
234 defaultText = lib.literalMD "`true` if ZFS filesystem support is enabled";
235 description = "True if ZFS filesystem support is enabled";
238 allowHibernation = lib.mkOption {
239 type = lib.types.bool;
242 Allow hibernation support, this may be a unsafe option depending on your
243 setup. Make sure to NOT use Swap on ZFS.
247 extraPools = lib.mkOption {
248 type = lib.types.listOf lib.types.str;
250 example = [ "tank" "data" ];
252 Name or GUID of extra ZFS pools that you wish to import during boot.
254 Usually this is not necessary. Instead, you should set the mountpoint property
255 of ZFS filesystems to `legacy` and add the ZFS filesystems to
256 NixOS's {option}`fileSystems` option, which makes NixOS automatically
257 import the associated pool.
259 However, in some cases (e.g. if you have many filesystems) it may be preferable
260 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
261 will not be managing those filesystems, you will need to specify the ZFS pool here
262 so that NixOS automatically imports it on every boot.
266 devNodes = lib.mkOption {
267 type = lib.types.path;
268 default = "/dev/disk/by-id";
270 Name of directory from which to import ZFS devices.
272 This should be a path under /dev containing stable names for all devices needed, as
273 import may fail if device nodes are renamed concurrently with a device failing.
277 forceImportRoot = lib.mkOption {
278 type = lib.types.bool;
281 Forcibly import the ZFS root pool(s) during early boot.
283 This is enabled by default for backwards compatibility purposes, but it is highly
284 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
285 to protect your ZFS pools.
287 If you set this option to `false` and NixOS subsequently fails to
288 boot because it cannot import the root pool, you should boot with the
289 `zfs_force=1` option as a kernel parameter (e.g. by manually
290 editing the kernel params in grub during boot). You should only need to do this
295 forceImportAll = lib.mkOption {
296 type = lib.types.bool;
299 Forcibly import all ZFS pool(s).
301 If you set this option to `false` and NixOS subsequently fails to
302 import your non-root ZFS pool(s), you should manually import each pool with
303 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
308 requestEncryptionCredentials = lib.mkOption {
309 type = lib.types.either lib.types.bool (lib.types.listOf lib.types.str);
311 example = [ "tank" "data" ];
313 If true on import encryption keys or passwords for all encrypted datasets
314 are requested. To only decrypt selected datasets supply a list of dataset
315 names instead. For root pools the encryption key can be supplied via both
316 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
320 passwordTimeout = lib.mkOption {
321 type = lib.types.int;
324 Timeout in seconds to wait for password entry for decrypt at boot.
326 Defaults to 0, which waits forever.
330 removeLinuxDRM = lib.mkOption {
331 type = lib.types.bool;
334 Patch the kernel to change symbols needed by ZFS from
335 EXPORT_SYMBOL_GPL to EXPORT_SYMBOL.
337 Currently has no effect, but may again in future if a kernel
338 update breaks ZFS due to symbols being newly changed to GPL.
343 services.zfs.autoSnapshot = {
344 enable = lib.mkOption {
346 type = lib.types.bool;
348 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
349 Note that you must set the `com.sun:auto-snapshot`
350 property to `true` on all datasets which you wish
353 You can override a child dataset to use, or not use auto-snapshotting
354 by setting its flag with the given interval:
355 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
359 flags = lib.mkOption {
361 example = "-k -p --utc";
362 type = lib.types.str;
364 Flags to pass to the zfs-auto-snapshot command.
366 Run `zfs-auto-snapshot` (without any arguments) to
369 If it's not too inconvenient for snapshots to have timestamps in UTC,
370 it is suggested that you append `--utc` to the list
371 of default options (see example).
373 Otherwise, snapshot names can cause name conflicts or apparent time
374 reversals due to daylight savings, timezone or other date/time changes.
378 frequent = lib.mkOption {
380 type = lib.types.int;
382 Number of frequent (15-minute) auto-snapshots that you wish to keep.
386 hourly = lib.mkOption {
388 type = lib.types.int;
390 Number of hourly auto-snapshots that you wish to keep.
394 daily = lib.mkOption {
396 type = lib.types.int;
398 Number of daily auto-snapshots that you wish to keep.
402 weekly = lib.mkOption {
404 type = lib.types.int;
406 Number of weekly auto-snapshots that you wish to keep.
410 monthly = lib.mkOption {
412 type = lib.types.int;
414 Number of monthly auto-snapshots that you wish to keep.
419 services.zfs.trim = {
420 enable = lib.mkOption {
421 description = "Whether to enable periodic TRIM on all ZFS pools.";
424 type = lib.types.bool;
427 interval = lib.mkOption {
429 type = lib.types.str;
432 How often we run trim. For most desktop and server systems
433 a sufficient trimming frequency is once a week.
435 The format is described in
436 {manpage}`systemd.time(7)`.
440 randomizedDelaySec = lib.mkOption {
442 type = lib.types.str;
445 Add a randomized delay before each ZFS trim.
446 The delay will be chosen between zero and this value.
447 This value must be a time span in the format specified by
448 {manpage}`systemd.time(7)`
453 services.zfs.autoScrub = {
454 enable = lib.mkEnableOption "periodic scrubbing of ZFS pools";
456 interval = lib.mkOption {
458 type = lib.types.str;
459 example = "quarterly";
461 Systemd calendar expression when to scrub ZFS pools. See
462 {manpage}`systemd.time(7)`.
466 randomizedDelaySec = lib.mkOption {
468 type = lib.types.str;
471 Add a randomized delay before each ZFS autoscrub.
472 The delay will be chosen between zero and this value.
473 This value must be a time span in the format specified by
474 {manpage}`systemd.time(7)`
478 pools = lib.mkOption {
480 type = lib.types.listOf lib.types.str;
481 example = [ "tank" ];
483 List of ZFS pools to periodically scrub. If empty, all pools
489 services.zfs.expandOnBoot = lib.mkOption {
490 type = lib.types.either (lib.types.enum [ "disabled" "all" ]) (lib.types.listOf lib.types.str);
491 default = "disabled";
492 example = [ "tank" "dozer" ];
494 After importing, expand each device in the specified pools.
496 Set the value to the plain string "all" to expand all pools on boot:
498 services.zfs.expandOnBoot = "all";
500 or set the value to a list of pools to expand the disks of specific pools:
502 services.zfs.expandOnBoot = [ "tank" "dozer" ];
507 enableMail = lib.mkOption {
508 type = lib.types.bool;
509 default = config.services.mail.sendmailSetuidWrapper != null;
510 defaultText = lib.literalExpression ''
511 config.services.mail.sendmailSetuidWrapper != null
514 Whether to enable ZED's ability to send emails.
518 settings = lib.mkOption {
519 type = let t = lib.types; in t.attrsOf (t.oneOf [ t.str t.int t.bool (t.listOf t.str) ]);
520 example = lib.literalExpression ''
522 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
524 ZED_EMAIL_ADDR = [ "root" ];
525 ZED_EMAIL_PROG = "mail";
526 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
528 ZED_NOTIFY_INTERVAL_SECS = 3600;
529 ZED_NOTIFY_VERBOSE = false;
531 ZED_USE_ENCLOSURE_LEDS = true;
532 ZED_SCRUB_AFTER_RESILVER = false;
536 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
540 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
546 ###### implementation
548 config = lib.mkMerge [
549 (lib.mkIf cfgZfs.enabled {
552 assertion = cfgZfs.modulePackage.version == cfgZfs.package.version;
553 message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
556 assertion = config.networking.hostId != null;
557 message = "ZFS requires networking.hostId to be set";
560 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
561 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
564 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
565 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
568 assertion = !(lib.elem "" allPools);
570 Automatic pool detection found an empty pool name, which can't be used.
571 Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
572 should be a zfs dataset name, like `device = "pool/data/set"`.
573 This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
579 kernelModules = [ "zfs" ];
580 # https://github.com/openzfs/zfs/issues/260
581 # https://github.com/openzfs/zfs/issues/12842
582 # https://github.com/NixOS/nixpkgs/issues/106093
583 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
585 extraModulePackages = [
590 boot.initrd = lib.mkIf inInitrd {
591 kernelModules = [ "zfs" ];
593 lib.mkIf (!config.boot.initrd.systemd.enable) ''
594 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
595 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
596 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
597 copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
598 copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
600 extraUtilsCommandsTest =
601 lib.mkIf (!config.boot.initrd.systemd.enable) ''
602 $out/bin/zfs --help >/dev/null 2>&1
603 $out/bin/zpool --help >/dev/null 2>&1
605 postResumeCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (lib.concatStringsSep "\n" ([''
606 ZFS_FORCE="${lib.optionalString cfgZfs.forceImportRoot "-f"}"
608 # See comments at importLib definition.
612 })] ++ (map (pool: ''
613 echo -n "importing root ZFS pool \"${pool}\"..."
614 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
615 if ! poolImported "${pool}"; then
616 for trial in `seq 1 60`; do
617 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
622 if [[ -n "$msg" ]]; then
625 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
628 ${lib.optionalString config.boot.initrd.clevis.enable (lib.concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets))}
630 ${if lib.isBool cfgZfs.requestEncryptionCredentials
631 then lib.optionalString cfgZfs.requestEncryptionCredentials ''
634 else lib.concatMapStrings (fs: ''
635 zfs load-key -- ${lib.escapeShellArg fs}
636 '') (lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
640 systemd = lib.mkIf config.boot.initrd.systemd.enable {
641 packages = [cfgZfs.package];
642 services = lib.listToAttrs (map (pool: createImportService {
644 systemd = config.boot.initrd.systemd.package;
645 force = cfgZfs.forceImportRoot;
648 targets.zfs-import.wantedBy = [ "zfs.target" ];
649 targets.zfs.wantedBy = [ "initrd.target" ];
651 zpool = "${cfgZfs.package}/sbin/zpool";
652 zfs = "${cfgZfs.package}/sbin/zfs";
653 awk = "${pkgs.gawk}/bin/awk";
656 "${cfgZfs.package}/lib/udev/vdev_id"
657 "${cfgZfs.package}/lib/udev/zvol_id"
660 services.udev.packages = [cfgZfs.package]; # to hook zvol naming, in stage 1
663 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
664 exec ${cfgZfs.package}/bin/zpool sync
666 systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
668 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
669 boot.loader.grub = lib.mkIf (inInitrd || inSystem) {
671 zfsPackage = cfgZfs.package;
674 services.zfs.zed.settings = {
675 ZED_EMAIL_PROG = lib.mkIf cfgZED.enableMail (lib.mkDefault (
676 config.security.wrapperDir + "/" +
677 config.services.mail.sendmailSetuidWrapper.program
679 # subject in header for sendmail
680 ZED_EMAIL_OPTS = lib.mkIf cfgZED.enableMail (lib.mkDefault "@ADDRESS@");
682 PATH = lib.makeBinPath [
694 # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
695 services.udev.extraRules = ''
696 ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
699 environment.etc = lib.genAttrs
701 (file: "zfs/zed.d/${file}")
705 "resilver_finish-start-scrub.sh"
710 "resilver_finish-notify.sh"
711 "scrub_finish-notify.sh"
712 "statechange-notify.sh"
716 (file: { source = "${cfgZfs.package}/etc/${file}"; })
718 "zfs/zed.d/zed.rc".text = zedConf;
719 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
722 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
723 environment.systemPackages = [ cfgZfs.package ]
724 ++ lib.optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
726 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
727 systemd.packages = [ cfgZfs.package ];
729 systemd.services = let
730 createImportService' = pool: createImportService {
732 systemd = config.systemd.package;
733 force = cfgZfs.forceImportAll;
736 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
738 createSyncService = pool:
739 lib.nameValuePair "zfs-sync-${pool}" {
740 description = "Sync ZFS pool \"${pool}\"";
741 wantedBy = [ "shutdown.target" ];
743 DefaultDependencies = false;
747 RemainAfterExit = true;
750 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
754 createZfsService = serv:
755 lib.nameValuePair serv {
756 after = [ "systemd-modules-load.service" ];
757 wantedBy = [ "zfs.target" ];
760 in lib.listToAttrs (map createImportService' dataPools ++
761 map createSyncService allPools ++
762 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
764 systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
766 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
769 (lib.mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
770 systemd.services."zpool-expand@" = {
771 description = "Expand ZFS pools";
772 after = [ "zfs.target" ];
776 RemainAfterExit = true;
780 path = [ cfgZfs.package ];
785 echo "Expanding all devices for $pool."
787 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
791 systemd.services."zpool-expand-pools" =
793 # Create a string, to be interpolated in a bash script
794 # which enumerates all of the pools to expand.
795 # If the `pools` option is `true`, we want to dynamically
796 # expand every pool. Otherwise we want to enumerate
797 # just the specifically provided list of pools.
798 poolListProvider = if cfgExpandOnBoot == "all"
799 then "$(zpool list -H -o name)"
800 else lib.escapeShellArgs cfgExpandOnBoot;
803 description = "Expand specified ZFS pools";
804 wantedBy = [ "default.target" ];
805 after = [ "zfs.target" ];
809 RemainAfterExit = true;
812 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
815 for pool in ${poolListProvider}; do
816 systemctl start --no-block "zpool-expand@$pool"
822 (lib.mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
823 systemd.services = let
824 descr = name: if name == "frequent" then "15 mins"
825 else if name == "hourly" then "hour"
826 else if name == "daily" then "day"
827 else if name == "weekly" then "week"
828 else if name == "monthly" then "month"
829 else throw "unknown snapshot name";
830 numSnapshots = name: builtins.getAttr name cfgSnapshots;
831 in builtins.listToAttrs (map (snapName:
833 name = "zfs-snapshot-${snapName}";
835 description = "ZFS auto-snapshotting every ${descr snapName}";
836 after = [ "zfs-import.target" ];
839 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
841 restartIfChanged = false;
846 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
847 in builtins.listToAttrs (map (snapName:
849 name = "zfs-snapshot-${snapName}";
851 wantedBy = [ "timers.target" ];
853 OnCalendar = timer snapName;
860 (lib.mkIf (cfgZfs.enabled && cfgScrub.enable) {
861 systemd.services.zfs-scrub = {
862 description = "ZFS pools scrubbing";
863 after = [ "zfs-import.target" ];
868 ${cfgZfs.package}/bin/zpool scrub -w ${
869 if cfgScrub.pools != [] then
870 (lib.concatStringsSep " " cfgScrub.pools)
872 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
877 systemd.timers.zfs-scrub = {
878 wantedBy = [ "timers.target" ];
879 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
881 OnCalendar = cfgScrub.interval;
883 RandomizedDelaySec = cfgScrub.randomizedDelaySec;
888 (lib.mkIf (cfgZfs.enabled && cfgTrim.enable) {
889 systemd.services.zpool-trim = {
890 description = "ZFS pools trim";
891 after = [ "zfs-import.target" ];
892 path = [ cfgZfs.package ];
893 startAt = cfgTrim.interval;
894 # By default we ignore errors returned by the trim command, in case:
895 # - HDDs are mixed with SSDs
896 # - There is a SSDs in a pool that is currently trimmed.
897 # - There are only HDDs and we would set the system in a degraded state
898 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
901 systemd.timers.zpool-trim.timerConfig = {
903 RandomizedDelaySec = cfgTrim.randomizedDelaySec;