1 { config, lib, options, pkgs, utils, ... }:
10 cfgZfs = config.boot.zfs;
11 optZfs = options.boot.zfs;
12 cfgExpandOnBoot = config.services.zfs.expandOnBoot;
13 cfgSnapshots = config.services.zfs.autoSnapshot;
14 cfgSnapFlags = cfgSnapshots.flags;
15 cfgScrub = config.services.zfs.autoScrub;
16 cfgTrim = config.services.zfs.trim;
17 cfgZED = config.services.zfs.zed;
19 selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
20 clevisDatasets = attrNames (filterAttrs (device: _: any (e: e.fsType == "zfs" && (fsNeededForBoot e) && (e.device == device || hasPrefix "${device}/" e.device)) config.system.build.fileSystems) config.boot.initrd.clevis.devices);
22 inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
23 inSystem = config.boot.supportedFilesystems.zfs or false;
25 autosnapPkg = pkgs.zfstools.override {
29 zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
31 datasetToPool = x: elemAt (splitString "/" x) 0;
33 fsToPool = fs: datasetToPool fs.device;
35 zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
37 allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
39 rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
41 dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
43 snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
45 # When importing ZFS pools, there's one difficulty: These scripts may run
46 # before the backing devices (physical HDDs, etc.) of the pool have been
47 # scanned and initialized.
49 # An attempted import with all devices missing will just fail, and can be
50 # retried, but an import where e.g. two out of three disks in a three-way
51 # mirror are missing, will succeed. This is a problem: When the missing disks
52 # are later discovered, they won't be automatically set online, rendering the
53 # pool redundancy-less (and far slower) until such time as the system reboots.
55 # The solution is the below. poolReady checks the status of an un-imported
56 # pool, to see if *every* device is available -- in which case the pool will be
57 # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
59 # The import scripts then loop over this, waiting until the pool is ready or a
60 # sufficient amount of time has passed that we can assume it won't be. In the
61 # latter case it makes one last attempt at importing, allowing the system to
62 # (eventually) boot even with a degraded pool.
63 importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
64 for o in $(cat /proc/cmdline); do
66 zfs_force|zfs_force=1|zfs_force=y)
73 state="$("${zpoolCmd}" import -d "${cfgZfs.devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
74 if [[ "$state" = "ONLINE" ]]; then
77 echo "Pool $pool in state $state, waiting"
83 "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
87 "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
91 getPoolFilesystems = pool:
92 filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
94 getPoolMounts = prefix: pool:
96 poolFSes = getPoolFilesystems pool;
98 # Remove the "/" suffix because even though most mountpoints
99 # won't have it, the "/" mountpoint will, and we can't have the
100 # trailing slash in "/sysroot/" in stage 1.
101 mountPoint = fs: escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
103 hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
105 map (x: "${mountPoint x}.mount") poolFSes
106 ++ lib.optional hasUsr "sysusr-usr.mount";
108 getKeyLocations = pool: if isBool cfgZfs.requestEncryptionCredentials then {
109 hasKeys = cfgZfs.requestEncryptionCredentials;
110 command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
112 keys = filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
114 hasKeys = keys != [];
115 command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
118 createImportService = { pool, systemd, force, prefix ? "" }:
119 nameValuePair "zfs-import-${pool}" {
120 description = "Import ZFS pool \"${pool}\"";
121 # We wait for systemd-udev-settle to ensure devices are available,
122 # but don't *require* it, because mounts shouldn't be killed if it's stopped.
123 # In the future, hopefully someone will complete this:
124 # https://github.com/zfsonlinux/zfs/pull/4943
125 wants = [ "systemd-udev-settle.service" ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
127 "systemd-udev-settle.service"
128 "systemd-modules-load.service"
129 "systemd-ask-password-console.service"
130 ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
131 requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
132 before = getPoolMounts prefix pool ++ [ "shutdown.target" "zfs-import.target" ];
133 conflicts = [ "shutdown.target" ];
135 DefaultDependencies = "no";
139 RemainAfterExit = true;
141 environment.ZFS_FORCE = optionalString force "-f";
143 keyLocations = getKeyLocations pool;
145 # See comments at importLib definition.
146 zpoolCmd = "${cfgZfs.package}/sbin/zpool";
147 awkCmd = "${pkgs.gawk}/bin/awk";
150 if ! poolImported "${pool}"; then
151 echo -n "importing ZFS pool \"${pool}\"..."
152 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
153 for trial in `seq 1 60`; do
154 poolReady "${pool}" && poolImport "${pool}" && break
157 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
159 if poolImported "${pool}"; then
160 ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
163 ${optionalString keyLocations.hasKeys ''
164 ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
166 if [[ "$ks" != unavailable ]]; then
175 while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
176 ${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
178 || tries=$((tries - 1))
180 [[ $success = true ]]
183 ${cfgZfs.package}/sbin/zfs load-key "$ds"
186 } < /dev/null # To protect while read ds kl in case anything reads stdin
189 echo "Successfully imported ${pool}"
196 zedConf = generators.toKeyValue {
197 mkKeyValue = generators.mkKeyValueDefault {
199 if isInt v then toString v
200 else if isString v then "\"${v}\""
201 else if true == v then "1"
202 else if false == v then "0"
203 else if isList v then "\"" + (concatStringsSep " " v) + "\""
204 else err "this value is" (toString v);
212 (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
213 (mkRemovedOptionModule [ "boot" "zfs" "enableUnstable" ] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
221 type = types.package;
223 defaultText = literalExpression "pkgs.zfs";
224 description = "Configured ZFS userland tools package, use `pkgs.zfs_unstable` if you want to track the latest staging ZFS branch.";
227 modulePackage = mkOption {
228 internal = true; # It is supposed to be selected automatically, but can be overridden by expert users.
229 default = selectModulePackage cfgZfs.package;
230 type = types.package;
231 description = "Configured ZFS kernel module package.";
237 default = inInitrd || inSystem;
238 defaultText = literalMD "`true` if ZFS filesystem support is enabled";
239 description = "True if ZFS filesystem support is enabled";
242 allowHibernation = mkOption {
246 Allow hibernation support, this may be a unsafe option depending on your
247 setup. Make sure to NOT use Swap on ZFS.
251 extraPools = mkOption {
252 type = types.listOf types.str;
254 example = [ "tank" "data" ];
256 Name or GUID of extra ZFS pools that you wish to import during boot.
258 Usually this is not necessary. Instead, you should set the mountpoint property
259 of ZFS filesystems to `legacy` and add the ZFS filesystems to
260 NixOS's {option}`fileSystems` option, which makes NixOS automatically
261 import the associated pool.
263 However, in some cases (e.g. if you have many filesystems) it may be preferable
264 to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
265 will not be managing those filesystems, you will need to specify the ZFS pool here
266 so that NixOS automatically imports it on every boot.
270 devNodes = mkOption {
272 default = "/dev/disk/by-id";
274 Name of directory from which to import ZFS devices.
276 This should be a path under /dev containing stable names for all devices needed, as
277 import may fail if device nodes are renamed concurrently with a device failing.
281 forceImportRoot = mkOption {
285 Forcibly import the ZFS root pool(s) during early boot.
287 This is enabled by default for backwards compatibility purposes, but it is highly
288 recommended to disable this option, as it bypasses some of the safeguards ZFS uses
289 to protect your ZFS pools.
291 If you set this option to `false` and NixOS subsequently fails to
292 boot because it cannot import the root pool, you should boot with the
293 `zfs_force=1` option as a kernel parameter (e.g. by manually
294 editing the kernel params in grub during boot). You should only need to do this
299 forceImportAll = mkOption {
303 Forcibly import all ZFS pool(s).
305 If you set this option to `false` and NixOS subsequently fails to
306 import your non-root ZFS pool(s), you should manually import each pool with
307 "zpool import -f \<pool-name\>", and then reboot. You should only need to do
312 requestEncryptionCredentials = mkOption {
313 type = types.either types.bool (types.listOf types.str);
315 example = [ "tank" "data" ];
317 If true on import encryption keys or passwords for all encrypted datasets
318 are requested. To only decrypt selected datasets supply a list of dataset
319 names instead. For root pools the encryption key can be supplied via both
320 an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
324 passwordTimeout = mkOption {
328 Timeout in seconds to wait for password entry for decrypt at boot.
330 Defaults to 0, which waits forever.
334 removeLinuxDRM = lib.mkOption {
338 Patch the kernel to change symbols needed by ZFS from
339 EXPORT_SYMBOL_GPL to EXPORT_SYMBOL.
341 Currently has no effect, but may again in future if a kernel
342 update breaks ZFS due to symbols being newly changed to GPL.
347 services.zfs.autoSnapshot = {
352 Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
353 Note that you must set the `com.sun:auto-snapshot`
354 property to `true` on all datasets which you wish
357 You can override a child dataset to use, or not use auto-snapshotting
358 by setting its flag with the given interval:
359 `zfs set com.sun:auto-snapshot:weekly=false DATASET`
365 example = "-k -p --utc";
368 Flags to pass to the zfs-auto-snapshot command.
370 Run `zfs-auto-snapshot` (without any arguments) to
373 If it's not too inconvenient for snapshots to have timestamps in UTC,
374 it is suggested that you append `--utc` to the list
375 of default options (see example).
377 Otherwise, snapshot names can cause name conflicts or apparent time
378 reversals due to daylight savings, timezone or other date/time changes.
382 frequent = mkOption {
386 Number of frequent (15-minute) auto-snapshots that you wish to keep.
394 Number of hourly auto-snapshots that you wish to keep.
402 Number of daily auto-snapshots that you wish to keep.
410 Number of weekly auto-snapshots that you wish to keep.
418 Number of monthly auto-snapshots that you wish to keep.
423 services.zfs.trim = {
425 description = "Whether to enable periodic TRIM on all ZFS pools.";
431 interval = mkOption {
436 How often we run trim. For most desktop and server systems
437 a sufficient trimming frequency is once a week.
439 The format is described in
440 {manpage}`systemd.time(7)`.
444 randomizedDelaySec = mkOption {
449 Add a randomized delay before each ZFS trim.
450 The delay will be chosen between zero and this value.
451 This value must be a time span in the format specified by
452 {manpage}`systemd.time(7)`
457 services.zfs.autoScrub = {
458 enable = mkEnableOption "periodic scrubbing of ZFS pools";
460 interval = mkOption {
463 example = "quarterly";
465 Systemd calendar expression when to scrub ZFS pools. See
466 {manpage}`systemd.time(7)`.
470 randomizedDelaySec = mkOption {
475 Add a randomized delay before each ZFS autoscrub.
476 The delay will be chosen between zero and this value.
477 This value must be a time span in the format specified by
478 {manpage}`systemd.time(7)`
484 type = types.listOf types.str;
485 example = [ "tank" ];
487 List of ZFS pools to periodically scrub. If empty, all pools
493 services.zfs.expandOnBoot = mkOption {
494 type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
495 default = "disabled";
496 example = [ "tank" "dozer" ];
498 After importing, expand each device in the specified pools.
500 Set the value to the plain string "all" to expand all pools on boot:
502 services.zfs.expandOnBoot = "all";
504 or set the value to a list of pools to expand the disks of specific pools:
506 services.zfs.expandOnBoot = [ "tank" "dozer" ];
511 enableMail = mkOption {
513 default = config.services.mail.sendmailSetuidWrapper != null;
514 defaultText = literalExpression ''
515 config.services.mail.sendmailSetuidWrapper != null
518 Whether to enable ZED's ability to send emails.
522 settings = mkOption {
523 type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
524 example = literalExpression ''
526 ZED_DEBUG_LOG = "/tmp/zed.debug.log";
528 ZED_EMAIL_ADDR = [ "root" ];
529 ZED_EMAIL_PROG = "mail";
530 ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
532 ZED_NOTIFY_INTERVAL_SECS = 3600;
533 ZED_NOTIFY_VERBOSE = false;
535 ZED_USE_ENCLOSURE_LEDS = true;
536 ZED_SCRUB_AFTER_RESILVER = false;
540 ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
544 for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
550 ###### implementation
553 (mkIf cfgZfs.enabled {
556 assertion = cfgZfs.modulePackage.version == cfgZfs.package.version;
557 message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
560 assertion = config.networking.hostId != null;
561 message = "ZFS requires networking.hostId to be set";
564 assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
565 message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
568 assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
569 message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
572 assertion = !(elem "" allPools);
574 Automatic pool detection found an empty pool name, which can't be used.
575 Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
576 should be a zfs dataset name, like `device = "pool/data/set"`.
577 This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
583 kernelModules = [ "zfs" ];
584 # https://github.com/openzfs/zfs/issues/260
585 # https://github.com/openzfs/zfs/issues/12842
586 # https://github.com/NixOS/nixpkgs/issues/106093
587 kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
589 extraModulePackages = [
594 boot.initrd = mkIf inInitrd {
595 # spl has been removed in ≥ 2.2.0.
596 kernelModules = [ "zfs" ] ++ lib.optional (lib.versionOlder "2.2.0" version) "spl";
598 mkIf (!config.boot.initrd.systemd.enable) ''
599 copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
600 copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
601 copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
602 copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
603 copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
605 extraUtilsCommandsTest =
606 mkIf (!config.boot.initrd.systemd.enable) ''
607 $out/bin/zfs --help >/dev/null 2>&1
608 $out/bin/zpool --help >/dev/null 2>&1
610 postResumeCommands = mkIf (!config.boot.initrd.systemd.enable) (concatStringsSep "\n" ([''
611 ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
613 # See comments at importLib definition.
617 })] ++ (map (pool: ''
618 echo -n "importing root ZFS pool \"${pool}\"..."
619 # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
620 if ! poolImported "${pool}"; then
621 for trial in `seq 1 60`; do
622 poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
627 if [[ -n "$msg" ]]; then
630 poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
633 ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
635 ${if isBool cfgZfs.requestEncryptionCredentials
636 then optionalString cfgZfs.requestEncryptionCredentials ''
639 else concatMapStrings (fs: ''
640 zfs load-key -- ${escapeShellArg fs}
641 '') (filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
645 systemd = mkIf config.boot.initrd.systemd.enable {
646 packages = [cfgZfs.package];
647 services = listToAttrs (map (pool: createImportService {
649 systemd = config.boot.initrd.systemd.package;
650 force = cfgZfs.forceImportRoot;
653 targets.zfs-import.wantedBy = [ "zfs.target" ];
654 targets.zfs.wantedBy = [ "initrd.target" ];
656 zpool = "${cfgZfs.package}/sbin/zpool";
657 zfs = "${cfgZfs.package}/sbin/zfs";
658 awk = "${pkgs.gawk}/bin/awk";
661 "${cfgZfs.package}/lib/udev/vdev_id"
662 "${cfgZfs.package}/lib/udev/zvol_id"
665 services.udev.packages = [cfgZfs.package]; # to hook zvol naming, in stage 1
668 systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
669 exec ${cfgZfs.package}/bin/zpool sync
671 systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
673 # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
674 boot.loader.grub = mkIf (inInitrd || inSystem) {
676 zfsPackage = cfgZfs.package;
679 services.zfs.zed.settings = {
680 ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault (
681 config.security.wrapperDir + "/" +
682 config.services.mail.sendmailSetuidWrapper.program
684 # subject in header for sendmail
685 ZED_EMAIL_OPTS = mkIf cfgZED.enableMail (mkDefault "@ADDRESS@");
687 PATH = lib.makeBinPath [
699 # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
700 services.udev.extraRules = ''
701 ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
704 environment.etc = genAttrs
706 (file: "zfs/zed.d/${file}")
710 "resilver_finish-start-scrub.sh"
715 "resilver_finish-notify.sh"
716 "scrub_finish-notify.sh"
717 "statechange-notify.sh"
721 (file: { source = "${cfgZfs.package}/etc/${file}"; })
723 "zfs/zed.d/zed.rc".text = zedConf;
724 "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
727 system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
728 environment.systemPackages = [ cfgZfs.package ]
729 ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
731 services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
732 systemd.packages = [ cfgZfs.package ];
734 systemd.services = let
735 createImportService' = pool: createImportService {
737 systemd = config.systemd.package;
738 force = cfgZfs.forceImportAll;
741 # This forces a sync of any ZFS pools prior to poweroff, even if they're set
743 createSyncService = pool:
744 nameValuePair "zfs-sync-${pool}" {
745 description = "Sync ZFS pool \"${pool}\"";
746 wantedBy = [ "shutdown.target" ];
748 DefaultDependencies = false;
752 RemainAfterExit = true;
755 ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
759 createZfsService = serv:
761 after = [ "systemd-modules-load.service" ];
762 wantedBy = [ "zfs.target" ];
765 in listToAttrs (map createImportService' dataPools ++
766 map createSyncService allPools ++
767 map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
769 systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
771 systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
774 (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
775 systemd.services."zpool-expand@" = {
776 description = "Expand ZFS pools";
777 after = [ "zfs.target" ];
781 RemainAfterExit = true;
785 path = [ cfgZfs.package ];
790 echo "Expanding all devices for $pool."
792 ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
796 systemd.services."zpool-expand-pools" =
798 # Create a string, to be interpolated in a bash script
799 # which enumerates all of the pools to expand.
800 # If the `pools` option is `true`, we want to dynamically
801 # expand every pool. Otherwise we want to enumerate
802 # just the specifically provided list of pools.
803 poolListProvider = if cfgExpandOnBoot == "all"
804 then "$(zpool list -H -o name)"
805 else lib.escapeShellArgs cfgExpandOnBoot;
808 description = "Expand specified ZFS pools";
809 wantedBy = [ "default.target" ];
810 after = [ "zfs.target" ];
814 RemainAfterExit = true;
817 path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
820 for pool in ${poolListProvider}; do
821 systemctl start --no-block "zpool-expand@$pool"
827 (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
828 systemd.services = let
829 descr = name: if name == "frequent" then "15 mins"
830 else if name == "hourly" then "hour"
831 else if name == "daily" then "day"
832 else if name == "weekly" then "week"
833 else if name == "monthly" then "month"
834 else throw "unknown snapshot name";
835 numSnapshots = name: builtins.getAttr name cfgSnapshots;
836 in builtins.listToAttrs (map (snapName:
838 name = "zfs-snapshot-${snapName}";
840 description = "ZFS auto-snapshotting every ${descr snapName}";
841 after = [ "zfs-import.target" ];
844 ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
846 restartIfChanged = false;
851 timer = name: if name == "frequent" then "*:0,15,30,45" else name;
852 in builtins.listToAttrs (map (snapName:
854 name = "zfs-snapshot-${snapName}";
856 wantedBy = [ "timers.target" ];
858 OnCalendar = timer snapName;
865 (mkIf (cfgZfs.enabled && cfgScrub.enable) {
866 systemd.services.zfs-scrub = {
867 description = "ZFS pools scrubbing";
868 after = [ "zfs-import.target" ];
873 ${cfgZfs.package}/bin/zpool scrub -w ${
874 if cfgScrub.pools != [] then
875 (concatStringsSep " " cfgScrub.pools)
877 "$(${cfgZfs.package}/bin/zpool list -H -o name)"
882 systemd.timers.zfs-scrub = {
883 wantedBy = [ "timers.target" ];
884 after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
886 OnCalendar = cfgScrub.interval;
888 RandomizedDelaySec = cfgScrub.randomizedDelaySec;
893 (mkIf (cfgZfs.enabled && cfgTrim.enable) {
894 systemd.services.zpool-trim = {
895 description = "ZFS pools trim";
896 after = [ "zfs-import.target" ];
897 path = [ cfgZfs.package ];
898 startAt = cfgTrim.interval;
899 # By default we ignore errors returned by the trim command, in case:
900 # - HDDs are mixed with SSDs
901 # - There is a SSDs in a pool that is currently trimmed.
902 # - There are only HDDs and we would set the system in a degraded state
903 serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool; done || true' ";
906 systemd.timers.zpool-trim.timerConfig = {
908 RandomizedDelaySec = cfgTrim.randomizedDelaySec;