ARC: Cache arc_c value during arc_evict()
[zfs.git] / contrib / initramfs / scripts / zfs
blob0a2bd2efda7a37d547065d5e5b946d6a98ab78fb
1 # ZFS boot stub for initramfs-tools.
3 # In the initramfs environment, the /init script sources this stub to
4 # override the default functions in the /scripts/local script.
6 # Enable this by passing boot=zfs on the kernel command line.
8 # $quiet, $root, $rpool, $bootfs come from the cmdline:
9 # shellcheck disable=SC2154
11 # Source the common functions
12 . /etc/zfs/zfs-functions
14 # Start interactive shell.
15 # Use debian's panic() if defined, because it allows to prevent shell access
16 # by setting panic in cmdline (e.g. panic=0 or panic=15).
17 # See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
18 # https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
19 shell() {
20         if command -v panic > /dev/null 2>&1; then
21                 panic
22         else
23                 /bin/sh
24         fi
27 # This runs any scripts that should run before we start importing
28 # pools and mounting any filesystems.
29 pre_mountroot()
31         if command -v run_scripts > /dev/null 2>&1
32         then
33                 if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
34                 then
35                         [ "$quiet" != "y" ] && \
36                             zfs_log_begin_msg "Running /scripts/local-top"
37                         run_scripts /scripts/local-top
38                         [ "$quiet" != "y" ] && zfs_log_end_msg
39                 fi
41           if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
42           then
43                         [ "$quiet" != "y" ] && \
44                             zfs_log_begin_msg "Running /scripts/local-premount"
45                         run_scripts /scripts/local-premount
46                         [ "$quiet" != "y" ] && zfs_log_end_msg
47                 fi
48         fi
51 # If plymouth is available, hide the splash image.
52 disable_plymouth()
54         if [ -x /bin/plymouth ] && /bin/plymouth --ping
55         then
56                 /bin/plymouth hide-splash >/dev/null 2>&1
57         fi
60 # Get a ZFS filesystem property value.
61 get_fs_value()
63         fs="$1"
64         value=$2
66         "${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null
69 # Find the 'bootfs' property on pool $1.
70 # If the property does not contain '/', then ignore this
71 # pool by exporting it again.
72 find_rootfs()
74         pool="$1"
76         # If 'POOL_IMPORTED' isn't set, no pool imported and therefore
77         # we won't be able to find a root fs.
78         [ -z "${POOL_IMPORTED}" ] && return 1
80         # If it's already specified, just keep it mounted and exit
81         # User (kernel command line) must be correct.
82         if [ -n "${ZFS_BOOTFS}" ] && [ "${ZFS_BOOTFS}" != "zfs:AUTO" ]; then
83                 return 0
84         fi
86         # Not set, try to find it in the 'bootfs' property of the pool.
87         # NOTE: zpool does not support 'get -H -ovalue bootfs'...
88         ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
90         # Make sure it's not '-' and that it starts with /.
91         if [ "${ZFS_BOOTFS}" != "-" ] && \
92                 get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
93         then
94                 # Keep it mounted
95                 POOL_IMPORTED=1
96                 return 0
97         fi
99         # Not boot fs here, export it and later try again..
100         "${ZPOOL}" export "$pool"
101         POOL_IMPORTED=
102         ZFS_BOOTFS=
103         return 1
106 # Support function to get a list of all pools, separated with ';'
107 find_pools()
109         pools=$("$@" 2> /dev/null | \
110                 sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \
111                 tr '\n' ';')
113         echo "${pools%%;}" # Return without the last ';'.
116 # Get a list of all available pools
117 get_pools()
119         if [ -n "${ZFS_POOL_IMPORT}" ]; then
120                 echo "$ZFS_POOL_IMPORT"
121                 return 0
122         fi
124         # Get the base list of available pools.
125         available_pools=$(find_pools "$ZPOOL" import)
127         # Just in case - seen it happen (that a pool isn't visible/found
128         # with a simple "zpool import" but only when using the "-d"
129         # option or setting ZPOOL_IMPORT_PATH).
130         if [ -d "/dev/disk/by-id" ]
131         then
132                 npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
133                 if [ -n "$npools" ]
134                 then
135                         # Because we have found extra pool(s) here, which wasn't
136                         # found 'normally', we need to force USE_DISK_BY_ID to
137                         # make sure we're able to actually import it/them later.
138                         USE_DISK_BY_ID='yes'
140                         if [ -n "$available_pools" ]
141                         then
142                                 # Filter out duplicates (pools found with the simple
143                                 # "zpool import" but which is also found with the
144                                 # "zpool import -d ...").
145                                 npools=$(echo "$npools" | sed "s,$available_pools,,")
147                                 # Add the list to the existing list of
148                                 # available pools
149                                 available_pools="$available_pools;$npools"
150                         else
151                                 available_pools="$npools"
152                         fi
153                 fi
154         fi
156         # Filter out any exceptions...
157         if [ -n "$ZFS_POOL_EXCEPTIONS" ]
158         then
159                 found=""
160                 apools=""
161                 OLD_IFS="$IFS" ; IFS=";"
163                 for pool in $available_pools
164                 do
165                         for exception in $ZFS_POOL_EXCEPTIONS
166                         do
167                                 [ "$pool" = "$exception" ] && continue 2
168                                 found="$pool"
169                         done
171                         if [ -n "$found" ]
172                         then
173                                 if [ -n "$apools" ]
174                                 then
175                                         apools="$apools;$pool"
176                                 else
177                                         apools="$pool"
178                                 fi
179                         fi
180                 done
182                 IFS="$OLD_IFS"
183                 available_pools="$apools"
184         fi
186         # Return list of available pools.
187         echo "$available_pools"
190 # Import given pool $1
191 import_pool()
193         pool="$1"
195         # Verify that the pool isn't already imported
196         # Make as sure as we can to not require '-f' to import.
197         "${ZPOOL}" get -H -o value name,guid 2>/dev/null | grep -Fxq "$pool" && return 0
199         # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
200         # to something we can use later with the real import(s). We want to
201         # make sure we find all by* dirs, BUT by-vdev should be first (if it
202         # exists).
203         if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
204         then
205                 dirs="$(for dir in /dev/disk/by-*
206                 do
207                         # Ignore by-vdev here - we want it first!
208                         echo "$dir" | grep -q /by-vdev && continue
209                         [ ! -d "$dir" ] && continue
211                         printf "%s" "$dir:"
212                 done | sed 's,:$,,g')"
214                 if [ -d "/dev/disk/by-vdev" ]
215                 then
216                         # Add by-vdev at the beginning.
217                         ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
218                 fi
220                 # ... and /dev at the very end, just for good measure.
221                 ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
222         fi
224         # Needs to be exported for "zpool" to catch it.
225         [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
228         [ "$quiet" != "y" ] && zfs_log_begin_msg \
229                 "Importing pool '${pool}' using defaults"
231         ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
232         ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
233         ZFS_ERROR="$?"
234         if [ "${ZFS_ERROR}" != 0 ]
235         then
236                 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
238                 if [ -f "${ZPOOL_CACHE}" ]
239                 then
240                         [ "$quiet" != "y" ] && zfs_log_begin_msg \
241                                 "Importing pool '${pool}' using cachefile."
243                         ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
244                         ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
245                         ZFS_ERROR="$?"
246                 fi
248                 if [ "${ZFS_ERROR}" != 0 ]
249                 then
250                         [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
252                         disable_plymouth
253                         echo ""
254                         echo "Command: ${ZFS_CMD} '$pool'"
255                         echo "Message: $ZFS_STDERR"
256                         echo "Error: $ZFS_ERROR"
257                         echo ""
258                         echo "Failed to import pool '$pool'."
259                         echo "Manually import the pool and exit."
260                         shell
261                 fi
262         fi
264         [ "$quiet" != "y" ] && zfs_log_end_msg
266         POOL_IMPORTED=1
267         return 0
270 # Load ZFS modules
271 # Loading a module in a initrd require a slightly different approach,
272 # with more logging etc.
273 load_module_initrd()
275         ZFS_INITRD_PRE_MOUNTROOT_SLEEP=${ROOTDELAY:-0}
277         if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ]; then
278                 [ "$quiet" != "y" ] && zfs_log_begin_msg "Delaying for up to '${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}' seconds."
279         fi
281         START=$(/bin/date -u +%s)
282         END=$((START+ZFS_INITRD_PRE_MOUNTROOT_SLEEP))
283         while true; do
285                 # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
286                 if command -v wait_for_udev > /dev/null 2>&1 ; then
287                         wait_for_udev 10
288                 elif command -v wait_for_dev > /dev/null 2>&1 ; then
289                         wait_for_dev
290                 fi
292                 #
293                 # zpool import refuse to import without a valid
294                 # /proc/self/mounts
295                 #
296                 [ ! -f /proc/self/mounts ] && mount proc /proc
298                 # Load the module
299                 if load_module "zfs"; then
300                         ret=0
301                         break
302                 else
303                         ret=1
304                 fi
306                 [ "$(/bin/date -u +%s)" -gt "$END" ] && break
307                 sleep 1
309         done
310         if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ]; then
311                 [ "$quiet" != "y" ] && zfs_log_end_msg
312         fi
314         [ "$ret" -ne 0 ] && return 1
316         if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
317         then
318                 if [ "$quiet" != "y" ]; then
319                         zfs_log_begin_msg "Sleeping for" \
320                                 "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
321                 fi
322                 sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
323                 [ "$quiet" != "y" ] && zfs_log_end_msg
324         fi
326         return 0
329 # Mount a given filesystem
330 mount_fs()
332         fs="$1"
334         # Check that the filesystem exists
335         "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 ||  return 1
337         # Skip filesystems with canmount=off.  The root fs should not have
338         # canmount=off, but ignore it for backwards compatibility just in case.
339         if [ "$fs" != "${ZFS_BOOTFS}" ]
340         then
341                 canmount=$(get_fs_value "$fs" canmount)
342                 [ "$canmount" = "off" ] && return 0
343         fi
345         # Need the _original_ datasets mountpoint!
346         mountpoint=$(get_fs_value "$fs" mountpoint)
347         ZFS_CMD="mount -o zfsutil -t zfs"
348         if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
349                 # Can't use the mountpoint property. Might be one of our
350                 # clones. Check the 'org.zol:mountpoint' property set in
351                 # clone_snap() if that's usable.
352                 mountpoint1=$(get_fs_value "$fs" org.zol:mountpoint)
353                 if [ "$mountpoint1" = "legacy" ] ||
354                    [ "$mountpoint1" = "none" ] ||
355                    [ "$mountpoint1" = "-" ]
356                 then
357                         if [ "$fs" != "${ZFS_BOOTFS}" ]; then
358                                 # We don't have a proper mountpoint and this
359                                 # isn't the root fs.
360                                 return 0
361                         fi
362                         # Don't use mount.zfs -o zfsutils for legacy mountpoint
363                         if [ "$mountpoint" = "legacy" ]; then
364                                 ZFS_CMD="mount -t zfs"
365                         fi
366                         # Last hail-mary: Hope 'rootmnt' is set!
367                         mountpoint=""
368                 else
369                         mountpoint="$mountpoint1"
370                 fi
371         fi
373         # Possibly decrypt a filesystem using native encryption.
374         decrypt_fs "$fs"
376         [ "$quiet" != "y" ] && \
377             zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
378         [ -n "${ZFS_DEBUG}" ] && \
379             zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
381         ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
382         ZFS_ERROR=$?
383         if [ "${ZFS_ERROR}" != 0 ]
384         then
385                 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
387                 disable_plymouth
388                 echo ""
389                 echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
390                 echo "Message: $ZFS_STDERR"
391                 echo "Error: $ZFS_ERROR"
392                 echo ""
393                 echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
394                 echo "Manually mount the filesystem and exit."
395                 shell
396         else
397                 [ "$quiet" != "y" ] && zfs_log_end_msg
398         fi
400         return 0
403 # Unlock a ZFS native encrypted filesystem.
404 decrypt_fs()
406         fs="$1"
408         # If pool encryption is active and the zfs command understands '-o encryption'
409         if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then
411                 # Determine dataset that holds key for root dataset
412                 ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
413                 KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
415                 echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
417                 # If root dataset is encrypted...
418                 if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
419                         KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
420                         # Continue only if the key needs to be loaded
421                         [ "$KEYSTATUS" = "unavailable" ] || return 0
423                         # Try extensions first
424                         for f in "/etc/zfs/initramfs-tools-load-key" "/etc/zfs/initramfs-tools-load-key.d/"*; do
425                                 [ -r "$f" ] || continue
426                                 (. "$f") && {
427                                         # Successful return and actually-loaded key: we're done
428                                         KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
429                                         [ "$KEYSTATUS" = "unavailable" ] || return 0
430                                 }
431                         done
433                         # Do not prompt if key is stored noninteractively,
434                         if ! [ "${KEYLOCATION}" = "prompt" ]; then
435                                 $ZFS load-key "${ENCRYPTIONROOT}"
437                         # Prompt with plymouth, if active
438                         elif /bin/plymouth --ping 2>/dev/null; then
439                                 echo "plymouth" > /run/zfs_console_askpwd_cmd
440                                 for _ in 1 2 3; do
441                                         plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
442                                                 $ZFS load-key "${ENCRYPTIONROOT}" && break
443                                 done
445                         # Prompt with systemd, if active
446                         elif [ -e /run/systemd/system ]; then
447                                 echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
448                                 for _ in 1 2 3; do
449                                         systemd-ask-password --no-tty "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
450                                                 $ZFS load-key "${ENCRYPTIONROOT}" && break
451                                 done
453                         # Prompt with ZFS tty, otherwise
454                         else
455                                 # Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
456                                 echo "load-key" > /run/zfs_console_askpwd_cmd
457                                 read -r storeprintk _ < /proc/sys/kernel/printk
458                                 echo 7 > /proc/sys/kernel/printk
459                                 $ZFS load-key "${ENCRYPTIONROOT}"
460                                 echo "$storeprintk" > /proc/sys/kernel/printk
461                         fi
462                 fi
463         fi
465         return 0
468 # Destroy a given filesystem.
469 destroy_fs()
471         fs="$1"
473         [ "$quiet" != "y" ] && \
474             zfs_log_begin_msg "Destroying '$fs'"
476         ZFS_CMD="${ZFS} destroy $fs"
477         ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
478         ZFS_ERROR="$?"
479         if [ "${ZFS_ERROR}" != 0 ]
480         then
481                 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
483                 disable_plymouth
484                 echo ""
485                 echo "Command: $ZFS_CMD"
486                 echo "Message: $ZFS_STDERR"
487                 echo "Error: $ZFS_ERROR"
488                 echo ""
489                 echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
490                 echo "Hint: Try:  zfs destroy -Rfn $fs"
491                 echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
492                 shell
493         else
494                 [ "$quiet" != "y" ] && zfs_log_end_msg
495         fi
497         return 0
500 # Clone snapshot $1 to destination filesystem $2
501 # Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
502 # manual control over it's mounting (i.e., make sure it's not automatically
503 # mounted with a 'zfs mount -a' in the init/systemd scripts).
504 clone_snap()
506         snap="$1"
507         destfs="$2"
508         mountpoint="$3"
510         [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
512         # Clone the snapshot into a dataset we can boot from
513         # + We don't want this filesystem to be automatically mounted, we
514         #   want control over this here and nowhere else.
515         # + We don't need any mountpoint set for the same reason.
516         # We use the 'org.zol:mountpoint' property to remember the mountpoint.
517         ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
518         ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
519         ZFS_CMD="${ZFS_CMD} $snap $destfs"
520         ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
521         ZFS_ERROR="$?"
522         if [ "${ZFS_ERROR}" != 0 ]
523         then
524                 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
526                 disable_plymouth
527                 echo ""
528                 echo "Command: $ZFS_CMD"
529                 echo "Message: $ZFS_STDERR"
530                 echo "Error: $ZFS_ERROR"
531                 echo ""
532                 echo "Failed to clone snapshot."
533                 echo "Make sure that any problems are corrected and then make sure"
534                 echo "that the dataset '$destfs' exists and is bootable."
535                 shell
536         else
537                 [ "$quiet" != "y" ] && zfs_log_end_msg
538         fi
540         return 0
543 # Rollback a given snapshot.
544 rollback_snap()
546         snap="$1"
548         [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
550         ZFS_CMD="${ZFS} rollback -Rf $snap"
551         ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
552         ZFS_ERROR="$?"
553         if [ "${ZFS_ERROR}" != 0 ]
554         then
555                 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
557                 disable_plymouth
558                 echo ""
559                 echo "Command: $ZFS_CMD"
560                 echo "Message: $ZFS_STDERR"
561                 echo "Error: $ZFS_ERROR"
562                 echo ""
563                 echo "Failed to rollback snapshot."
564                 shell
565         else
566                 [ "$quiet" != "y" ] && zfs_log_end_msg
567         fi
569         return 0
572 # Get a list of snapshots, give them as a numbered list
573 # to the user to choose from.
574 ask_user_snap()
576         fs="$1"
578         # We need to temporarily disable debugging. Set 'debug' so we
579         # remember to enabled it again.
580         if [ -n "${ZFS_DEBUG}" ]; then
581                 unset ZFS_DEBUG
582                 set +x
583                 debug=1
584         fi
586         # Because we need the resulting snapshot, which is sent on
587         # stdout to the caller, we use stderr for our questions.
588         echo "What snapshot do you want to boot from?" > /dev/stderr
589         # shellcheck disable=SC2046
590         IFS="
591 " set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
593         i=1
594         for snap in "$@"; do
595                 echo "  $i: $snap"
596                 i=$((i + 1))
597         done > /dev/stderr
599         # expr instead of test here because [ a -lt 0 ] errors out,
600         # but expr falls back to lexicographical, which works out right
601         snapnr=0
602         while expr "$snapnr" "<" 1 > /dev/null ||
603             expr "$snapnr" ">" "$#" > /dev/null
604         do
605                 printf "%s" "Snap nr [1-$#]? " > /dev/stderr
606                 read -r snapnr
607         done
609         # Re-enable debugging.
610         if [ -n "${debug}" ]; then
611                 ZFS_DEBUG=1
612                 set -x
613         fi
615         eval echo '$'"$snapnr"
618 setup_snapshot_booting()
620         snap="$1"
621         retval=0
623         # Make sure that the snapshot specified actually exists.
624         if [ -z "$(get_fs_value "${snap}" type)" ]
625         then
626                 # Snapshot does not exist (...@<null> ?)
627                 # ask the user for a snapshot to use.
628                 snap="$(ask_user_snap "${snap%%@*}")"
629         fi
631         # Separate the full snapshot ('$snap') into it's filesystem and
632         # snapshot names. Would have been nice with a split() function..
633         rootfs="${snap%%@*}"
634         snapname="${snap##*@}"
635         ZFS_BOOTFS="${rootfs}_${snapname}"
637         if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
638         then
639                 # If the destination dataset for the clone
640                 # already exists, destroy it. Recursively
641                 if [ -n "$(get_fs_value "${rootfs}_${snapname}" type)" ]
642                 then
643                         filesystems=$("${ZFS}" list -oname -tfilesystem -H \
644                             -r -Sname "${ZFS_BOOTFS}")
645                         for fs in $filesystems; do
646                                 destroy_fs "${fs}"
647                         done
648                 fi
649         fi
651         # Get all snapshots, recursively (might need to clone /usr, /var etc
652         # as well).
653         for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
654             grep "${snapname}")
655         do
656                 if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
657                 then
658                         # Rollback snapshot
659                         rollback_snap "$s" || retval=$((retval + 1))
660                         ZFS_BOOTFS="${rootfs}"
661                 else
662                         # Setup a destination filesystem name.
663                         # Ex: Called with 'rpool/ROOT/debian@snap2'
664                         #       rpool/ROOT/debian@snap2         => rpool/ROOT/debian_snap2
665                         #       rpool/ROOT/debian/boot@snap2    => rpool/ROOT/debian_snap2/boot
666                         #       rpool/ROOT/debian/usr@snap2     => rpool/ROOT/debian_snap2/usr
667                         #       rpool/ROOT/debian/var@snap2     => rpool/ROOT/debian_snap2/var
668                         subfs="${s##"$rootfs"}"
669                         subfs="${subfs%%@"$snapname"}"
671                         destfs="${rootfs}_${snapname}" # base fs.
672                         [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
674                         # Get the mountpoint of the filesystem, to be used
675                         # with clone_snap(). If legacy or none, then use
676                         # the sub fs value.
677                         mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
678                         if [ "$mountpoint" = "legacy" ] || \
679                            [ "$mountpoint" = "none" ]
680                         then
681                                 if [ -n "${subfs}" ]; then
682                                         mountpoint="${subfs}"
683                                 else
684                                         mountpoint="/"
685                                 fi
686                         fi
688                         # Clone the snapshot into its own
689                         # filesystem
690                         clone_snap "$s" "${destfs}" "${mountpoint}" || \
691                             retval=$((retval + 1))
692                 fi
693         done
695         # If we haven't return yet, we have a problem...
696         return "${retval}"
699 # ================================================================
701 # This is the main function.
702 mountroot()
704         # ----------------------------------------------------------------
705         # I N I T I A L   S E T U P
707         # ------------
708         # Run the pre-mount scripts from /scripts/local-top.
709         pre_mountroot
711         # ------------
712         # Source the default setup variables.
713         [ -r '/etc/default/zfs' ] && . /etc/default/zfs
715         # ------------
716         # Support debug option
717         if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
718         then
719                 ZFS_DEBUG=1
720                 mkdir /var/log
721                 #exec 2> /var/log/boot.debug
722                 set -x
723         fi
725         # ------------
726         # Load ZFS module etc.
727         if ! load_module_initrd; then
728                 disable_plymouth
729                 echo ""
730                 echo "Failed to load ZFS modules."
731                 echo "Manually load the modules and exit."
732                 shell
733         fi
735         # ------------
736         # Look for the cache file (if any).
737         [ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
738         [ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
740         # ------------
741         # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
742         #                'root' is for Redhat/Fedora (etc),
743         #                'REAL_ROOT' is for Gentoo
744         if [ -z "$ROOT" ]
745         then
746                 [ -n "$root" ] && ROOT=${root}
748                 [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
749         fi
751         # ------------
752         # Where to mount the root fs in the initrd - set outside this script
753         # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
754         #                'NEWROOT' is for RedHat/Fedora (etc),
755         #                'NEW_ROOT' is for Gentoo
756         if [ -z "$rootmnt" ]
757         then
758                 [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
760                 [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
761         fi
763         # ------------
764         # No longer set in the defaults file, but it could have been set in
765         # get_pools() in some circumstances. If it's something, but not 'yes',
766         # it's no good to us.
767         [ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
768             unset USE_DISK_BY_ID
770         # ----------------------------------------------------------------
771         # P A R S E   C O M M A N D   L I N E   O P T I O N S
773         # This part is the really ugly part - there's so many options and permutations
774         # 'out there', and if we should make this the 'primary' source for ZFS initrd
775         # scripting, we need/should support them all.
776         #
777         # Supports the following kernel command line argument combinations
778         # (in this order - first match win):
779         #
780         #       rpool=<pool>                    (tries to finds bootfs automatically)
781         #       bootfs=<pool>/<dataset>         (uses this for rpool - first part)
782         #       rpool=<pool> bootfs=<pool>/<dataset>
783         #       -B zfs-bootfs=<pool>/<fs>       (uses this for rpool - first part)
784         #       rpool=rpool                     (default if none of the above is used)
785         #       root=<pool>/<dataset>           (uses this for rpool - first part)
786         #       root=ZFS=<pool>/<dataset>       (uses this for rpool - first part, without 'ZFS=')
787         #       root=zfs:AUTO                   (tries to detect both pool and rootfs)
788         #       root=zfs:<pool>/<dataset>       (uses this for rpool - first part, without 'zfs:')
789         #
790         # Option <dataset> could also be <snapshot>
791         # Option <pool> could also be <guid>
793         # ------------
794         # Support force option
795         # In addition, setting one of zfs_force, zfs.force or zfsforce to
796         # 'yes', 'on' or '1' will make sure we force import the pool.
797         # This should (almost) never be needed, but it's here for
798         # completeness.
799         ZPOOL_FORCE=""
800         if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
801         then
802                 ZPOOL_FORCE="-f"
803         fi
805         # ------------
806         # Look for 'rpool' and 'bootfs' parameter
807         [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
808         [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
810         # ------------
811         # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
812         # 'ROOT'
813         [ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
815         # ------------
816         # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
817         # NOTE: Only use the pool name and dataset. The rest is not
818         #       supported by OpenZFS (whatever it's for).
819         if [ -z "$ZFS_RPOOL" ]
820         then
821                 # The ${zfs-bootfs} variable is set at the kernel command
822                 # line, usually by GRUB, but it cannot be referenced here
823                 # directly because bourne variable names cannot contain a
824                 # hyphen.
825                 #
826                 # Reassign the variable by dumping the environment and
827                 # stripping the zfs-bootfs= prefix.  Let the shell handle
828                 # quoting through the eval command:
829                 # shellcheck disable=SC2046
830                 eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
831         fi
833         # ------------
834         # No root fs or pool specified - do auto detect.
835         if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
836         then
837                 # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
838                 # which will be caught later
839                 ROOT='zfs:AUTO'
840         fi
842         # ----------------------------------------------------------------
843         # F I N D   A N D   I M P O R T   C O R R E C T   P O O L
845         # ------------
846         if [ "$ROOT" = "zfs:AUTO" ]
847         then
848                 # Try to detect both pool and root fs.
850                 # If we got here, that means we don't have a hint so as to
851                 # the root dataset, but with root=zfs:AUTO on cmdline,
852                 # this says "zfs:AUTO" here and interferes with checks later
853                 ZFS_BOOTFS=
855                 [ "$quiet" != "y" ] && \
856                     zfs_log_begin_msg "Attempting to import additional pools."
858                 # Get a list of pools available for import
859                 if [ -n "$ZFS_RPOOL" ]
860                 then
861                         # We've specified a pool - check only that
862                         POOLS=$ZFS_RPOOL
863                 else
864                         POOLS=$(get_pools)
865                 fi
867                 OLD_IFS="$IFS" ; IFS=";"
868                 for pool in $POOLS
869                 do
870                         [ -z "$pool" ] && continue
872                         IFS="$OLD_IFS" import_pool "$pool"
873                         IFS="$OLD_IFS" find_rootfs "$pool" && break
874                 done
875                 IFS="$OLD_IFS"
877                 [ "$quiet" != "y" ] && zfs_log_end_msg "$ZFS_ERROR"
878         else
879                 # No auto - use value from the command line option.
881                 # Strip 'zfs:' and 'ZFS='.
882                 ZFS_BOOTFS="${ROOT#*[:=]}"
884                 # Strip everything after the first slash.
885                 ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
886         fi
888         # Import the pool (if not already done so in the AUTO check above).
889         if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
890         then
891                 [ "$quiet" != "y" ] && \
892                     zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
894                 import_pool "${ZFS_RPOOL}"
895                 find_rootfs "${ZFS_RPOOL}"
897                 [ "$quiet" != "y" ] && zfs_log_end_msg
898         fi
900         if [ -z "${POOL_IMPORTED}" ]
901         then
902                 # No pool imported, this is serious!
903                 disable_plymouth
904                 echo ""
905                 echo "Command: $ZFS_CMD"
906                 echo "Message: $ZFS_STDERR"
907                 echo "Error: $ZFS_ERROR"
908                 echo ""
909                 echo "No pool imported. Manually import the root pool"
910                 echo "at the command prompt and then exit."
911                 echo "Hint: Try:  zpool import -N ${ZFS_RPOOL}"
912                 shell
913         fi
915         # In case the pool was specified as guid, resolve guid to name
916         pool="$("${ZPOOL}" get -H -o name,value name,guid | \
917             awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
918         if [ -n "$pool" ]; then
919                 # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
920                 ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
921                         sed -e "s/$("${ZPOOL}" get -H -o value guid "$pool")/$pool/g")
922                 ZFS_RPOOL="${pool}"
923         fi
926         # ----------------------------------------------------------------
927         # P R E P A R E   R O O T   F I L E S Y S T E M
929         if [ -n "${ZFS_BOOTFS}" ]
930         then
931                 # Booting from a snapshot?
932                 # Will overwrite the ZFS_BOOTFS variable like so:
933                 #   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
934                 echo "${ZFS_BOOTFS}" | grep -q '@' && \
935                     setup_snapshot_booting "${ZFS_BOOTFS}"
936         fi
938         if [ -z "${ZFS_BOOTFS}" ]
939         then
940                 # Still nothing! Let the user sort this out.
941                 disable_plymouth
942                 echo ""
943                 echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
944                 echo "       not specified on the kernel command line."
945                 echo ""
946                 echo "Manually mount the root filesystem on $rootmnt and then exit."
947                 echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
948                 shell
949         fi
951         # ----------------------------------------------------------------
952         # M O U N T   F I L E S Y S T E M S
954         # * Ideally, the root filesystem would be mounted like this:
955         #
956         #     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
957         #     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
958         #
959         #   but the MOUNTPOINT prefix is preserved on descendent filesystem
960         #   after the pivot into the regular root, which later breaks things
961         #   like `zfs mount -a` and the /proc/self/mounts refresh.
962         #
963         # * Mount additional filesystems required
964         #   Such as /usr, /var, /usr/local etc.
965         #   NOTE: Mounted in the order specified in the
966         #         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
968         # Go through the complete list (recursively) of all filesystems below
969         # the real root dataset
970         filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
971         OLD_IFS="$IFS" ; IFS="
973         for fs in $filesystems; do
974                 IFS="$OLD_IFS" mount_fs "$fs"
975         done
976         IFS="$OLD_IFS"
977         for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do
978                 mount_fs "$fs"
979         done
981         touch /run/zfs_unlock_complete
982         if [ -e /run/zfs_unlock_complete_notify ]; then
983                 read -r < /run/zfs_unlock_complete_notify
984         fi
986         # ------------
987         # Debugging information
988         if [ -n "${ZFS_DEBUG}" ]
989         then
990                 #exec 2>&1-
992                 echo "DEBUG: imported pools:"
993                 "${ZPOOL}" list -H
994                 echo
996                 echo "DEBUG: mounted ZFS filesystems:"
997                 mount | grep zfs
998                 echo
1000                 echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
1001                 printf "%s" "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
1002                 read -r b
1004                 [ "$b" = "c" ] && /bin/sh
1005                 [ "$b" = "r" ] && reboot -f
1007                 set +x
1008         fi
1010         # ------------
1011         # Run local bottom script
1012         if command -v run_scripts > /dev/null 2>&1
1013         then
1014                 if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
1015                 then
1016                         [ "$quiet" != "y" ] && \
1017                             zfs_log_begin_msg "Running /scripts/local-bottom"
1018                         run_scripts /scripts/local-bottom
1019                         [ "$quiet" != "y" ] && zfs_log_end_msg
1020                 fi
1021         fi