4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or https://opensource.org/licenses/CDDL-1.0.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24 # Copyright (c) 2012, 2020, Delphix. All rights reserved.
25 # Copyright (c) 2017, Tim Chase. All rights reserved.
26 # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27 # Copyright (c) 2017, Lawrence Livermore National Security LLC.
28 # Copyright (c) 2017, Datto Inc. All rights reserved.
29 # Copyright (c) 2017, Open-E Inc. All rights reserved.
30 # Copyright (c) 2021, The FreeBSD Foundation.
31 # Use is subject to license terms.
34 . ${STF_SUITE}/include/tunables.cfg
36 . ${STF_TOOLS}/include/logapi.shlib
37 . ${STF_SUITE}/include/math.shlib
38 . ${STF_SUITE}/include/blkdev.shlib
40 # On AlmaLinux 9 we will see $PWD = '.' instead of the full path. This causes
41 # some tests to fail. Fix it up here.
42 if [ "$PWD" = "." ] ; then
43 PWD="$(readlink -f $PWD)"
47 # Apply constrained path when available. This is required since the
48 # PATH may have been modified by sudo's secure_path behavior.
50 if [ -n "$STF_PATH" ]; then
51 export PATH="$STF_PATH"
55 # Generic dot version comparison function
57 # Returns success when version $1 is greater than or equal to $2.
59 function compare_version_gte
61 [ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
64 # Helper function used by linux_version() and freebsd_version()
65 # $1, if provided, should be a MAJOR, MAJOR.MINOR or MAJOR.MINOR.PATCH
67 function kernel_version
71 [ -z "$ver" ] && case "$UNAME" in
73 # Linux version numbers are X.Y.Z followed by optional
74 # vendor/distro specific stuff
75 # RHEL7: 3.10.0-1160.108.1.el7.x86_64
76 # Fedora 37: 6.5.12-100.fc37.x86_64
77 # Debian 12.6: 6.1.0-22-amd64
78 ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
81 # FreeBSD version numbers are X.Y-BRANCH-pZ. Depending on
82 # branch, -pZ may not be present, but this is typically only
83 # on pre-release or true .0 releases, so can be assumed 0
90 grep -Eo "[0-9]+\.[0-9]+(-[A-Z0-9]+-p[0-9]+)?" | \
91 sed -E "s/-[^-]+-p/./")
95 log_fail "Don't know how to get kernel version for '$UNAME'"
99 typeset version major minor _
100 IFS='.' read -r version major minor _ <<<"$ver"
102 [ -z "$version" ] && version=0
103 [ -z "$major" ] && major=0
104 [ -z "$minor" ] && minor=0
106 echo $((version * 100000 + major * 1000 + minor))
109 # Linux kernel version comparison function
111 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
113 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
114 function linux_version {
118 # FreeBSD version comparison function
120 # $1 FreeBSD version ("13.2", "14.0") or blank for installed FreeBSD version
122 # Used for comparison: if [ $(freebsd_version) -ge $(freebsd_version "13.2") ]
123 function freebsd_version {
127 # Determine if this is a Linux test system
129 # Return 0 if platform Linux, 1 if otherwise
133 [ "$UNAME" = "Linux" ]
136 # Determine if this is an illumos test system
138 # Return 0 if platform illumos, 1 if otherwise
141 [ "$UNAME" = "illumos" ]
144 # Determine if this is a FreeBSD test system
146 # Return 0 if platform FreeBSD, 1 if otherwise
150 [ "$UNAME" = "FreeBSD" ]
153 # Determine if this is a 32-bit system
155 # Return 0 if platform is 32-bit, 1 if otherwise
159 [ $(getconf LONG_BIT) = "32" ]
162 # Determine if kmemleak is enabled
164 # Return 0 if kmemleak is enabled, 1 if otherwise
168 is_linux && [ -e /sys/kernel/debug/kmemleak ]
171 # Determine whether a dataset is mounted
174 # $2 filesystem type; optional - defaulted to zfs
176 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
181 [[ -z $fstype ]] && fstype=zfs
186 if [[ "$1" == "/"* ]] ; then
187 ! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
189 ! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
194 mount -pt $fstype | while read dev dir _t _flags; do
195 [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
198 out=$(df -F $fstype $1 2>/dev/null) || return
206 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
210 df -t $fstype $1 > /dev/null 2>&1
213 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
214 link=$(readlink -f $ZVOL_DEVDIR/$1)
215 [[ -n "$link" ]] && \
216 mount | grep -q "^$link" && \
226 # Return 0 if a dataset is mounted; 1 otherwise
229 # $2 filesystem type; optional - defaulted to zfs
236 # Return 0 if a dataset is unmounted; 1 otherwise
239 # $2 filesystem type; optional - defaulted to zfs
246 function default_setup
248 default_setup_noexit "$@"
253 function default_setup_no_mountpoint
255 default_setup_noexit "$1" "$2" "$3" "yes"
261 # Given a list of disks, setup storage pools and datasets.
263 function default_setup_noexit
268 typeset no_mountpoint=$4
269 log_note begin default_setup_noexit
271 if is_global_zone; then
272 if poolexists $TESTPOOL ; then
273 destroy_pool $TESTPOOL
275 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
276 log_must zpool create -f $TESTPOOL $disklist
281 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
282 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
284 log_must zfs create $TESTPOOL/$TESTFS
285 if [[ -z $no_mountpoint ]]; then
286 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
289 if [[ -n $container ]]; then
290 rm -rf $TESTDIR1 || \
291 log_unresolved Could not remove $TESTDIR1
292 mkdir -p $TESTDIR1 || \
293 log_unresolved Could not create $TESTDIR1
295 log_must zfs create $TESTPOOL/$TESTCTR
296 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
297 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
298 if [[ -z $no_mountpoint ]]; then
299 log_must zfs set mountpoint=$TESTDIR1 \
300 $TESTPOOL/$TESTCTR/$TESTFS1
304 if [[ -n $volume ]]; then
305 if is_global_zone ; then
306 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
309 log_must zfs create $TESTPOOL/$TESTVOL
315 # Given a list of disks, setup a storage pool, file system and
318 function default_container_setup
322 default_setup "$disklist" "true"
326 # Given a list of disks, setup a storage pool,file system
329 function default_volume_setup
333 default_setup "$disklist" "" "true"
337 # Given a list of disks, setup a storage pool,file system,
338 # a container and a volume.
340 function default_container_volume_setup
344 default_setup "$disklist" "true" "true"
348 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
351 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
352 # $2 snapshot name. Default, $TESTSNAP
354 function create_snapshot
356 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
357 typeset snap=${2:-$TESTSNAP}
359 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
360 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
362 if snapexists $fs_vol@$snap; then
363 log_fail "$fs_vol@$snap already exists."
365 datasetexists $fs_vol || \
366 log_fail "$fs_vol must exist."
368 log_must zfs snapshot $fs_vol@$snap
372 # Create a clone from a snapshot, default clone name is $TESTCLONE.
374 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
375 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
377 function create_clone # snapshot clone
379 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
380 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
383 log_fail "Snapshot name is undefined."
385 log_fail "Clone name is undefined."
387 log_must zfs clone $snap $clone
391 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
394 # $1 Existing filesystem or volume name. Default, $TESTFS
395 # $2 Existing snapshot name. Default, $TESTSNAP
396 # $3 bookmark name. Default, $TESTBKMARK
398 function create_bookmark
400 typeset fs_vol=${1:-$TESTFS}
401 typeset snap=${2:-$TESTSNAP}
402 typeset bkmark=${3:-$TESTBKMARK}
404 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
405 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
406 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
408 if bkmarkexists $fs_vol#$bkmark; then
409 log_fail "$fs_vol#$bkmark already exists."
411 datasetexists $fs_vol || \
412 log_fail "$fs_vol must exist."
413 snapexists $fs_vol@$snap || \
414 log_fail "$fs_vol@$snap must exist."
416 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
420 # Create a temporary clone result of an interrupted resumable 'zfs receive'
421 # $1 Destination filesystem name. Must not exist, will be created as the result
422 # of this function along with its %recv temporary clone
423 # $2 Source filesystem name. Must not exist, will be created and destroyed
425 function create_recv_clone
428 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
429 typeset snap="$sendfs@snap1"
430 typeset incr="$sendfs@snap2"
431 typeset mountpoint="$TESTDIR/create_recv_clone"
432 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
434 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
436 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
437 datasetexists $sendfs && log_fail "Send filesystem must not exist."
439 log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
440 log_must zfs snapshot $snap
441 log_must eval "zfs send $snap | zfs recv -u $recvfs"
442 log_must mkfile 1m "$mountpoint/data"
443 log_must zfs snapshot $incr
444 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
445 iflag=fullblock > $sendfile"
446 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
447 destroy_dataset "$sendfs" "-r"
448 log_must rm -f "$sendfile"
450 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
451 log_fail "Error creating temporary $recvfs/%recv clone"
455 function default_mirror_setup
457 default_mirror_setup_noexit $1 $2 $3
463 # Given a pair of disks, set up a storage pool and dataset for the mirror
464 # @parameters: $1 the primary side of the mirror
465 # $2 the secondary side of the mirror
466 # @uses: ZPOOL ZFS TESTPOOL TESTFS
467 function default_mirror_setup_noexit
469 readonly func="default_mirror_setup_noexit"
473 [[ -z $primary ]] && \
474 log_fail "$func: No parameters passed"
475 [[ -z $secondary ]] && \
476 log_fail "$func: No secondary partition passed"
477 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
478 log_must zpool create -f $TESTPOOL mirror $@
479 log_must zfs create $TESTPOOL/$TESTFS
480 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
484 # Destroy the configured testpool mirrors.
485 # the mirrors are of the form ${TESTPOOL}{number}
486 # @uses: ZPOOL ZFS TESTPOOL
487 function destroy_mirrors
489 default_cleanup_noexit
494 function default_raidz_setup
496 default_raidz_setup_noexit "$*"
502 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
503 # $1 the list of disks
505 function default_raidz_setup_noexit
507 typeset disklist="$*"
508 disks=(${disklist[*]})
510 if [[ ${#disks[*]} -lt 2 ]]; then
511 log_fail "A raid-z requires a minimum of two disks."
514 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
515 log_must zpool create -f $TESTPOOL raidz $disklist
516 log_must zfs create $TESTPOOL/$TESTFS
517 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
521 # Common function used to cleanup storage pools and datasets.
523 # Invoked at the start of the test suite to ensure the system
524 # is in a known state, and also at the end of each set of
525 # sub-tests to ensure errors from one set of tests doesn't
526 # impact the execution of the next set.
528 function default_cleanup
530 default_cleanup_noexit
536 # Utility function used to list all available pool names.
538 # NOTE: $KEEP is a variable containing pool names, separated by a newline
539 # character, that must be excluded from the returned list.
541 function get_all_pools
543 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
546 function default_cleanup_noexit
550 # Destroying the pool will also destroy any
551 # filesystems it contains.
553 if is_global_zone; then
554 zfs unmount -a > /dev/null 2>&1
555 ALL_POOLS=$(get_all_pools)
556 # Here, we loop through the pools we're allowed to
557 # destroy, only destroying them if it's safe to do
559 while [ ! -z ${ALL_POOLS} ]
561 for pool in ${ALL_POOLS}
563 if safe_to_destroy_pool $pool ;
568 ALL_POOLS=$(get_all_pools)
574 for fs in $(zfs list -H -o name \
575 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
576 destroy_dataset "$fs" "-Rf"
579 # Need cleanup here to avoid garbage dir left.
580 for fs in $(zfs list -H -o name); do
581 [[ $fs == /$ZONE_POOL ]] && continue
582 [[ -d $fs ]] && log_must rm -rf $fs/*
586 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
589 for fs in $(zfs list -H -o name); do
590 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
591 log_must zfs set reservation=none $fs
592 log_must zfs set recordsize=128K $fs
593 log_must zfs set mountpoint=/$fs $fs
594 typeset enc=$(get_prop encryption $fs)
595 if [ -z "$enc" ] || [ "$enc" = "off" ]; then
596 log_must zfs set checksum=on $fs
598 log_must zfs set compression=off $fs
599 log_must zfs set atime=on $fs
600 log_must zfs set devices=off $fs
601 log_must zfs set exec=on $fs
602 log_must zfs set setuid=on $fs
603 log_must zfs set readonly=off $fs
604 log_must zfs set snapdir=hidden $fs
605 log_must zfs set aclmode=groupmask $fs
606 log_must zfs set aclinherit=secure $fs
611 [[ -d $TESTDIR ]] && \
612 log_must rm -rf $TESTDIR
615 if is_mpath_device $disk1; then
619 rm -f $TEST_BASE_DIR/{err,out}
624 # Common function used to cleanup storage pools, file systems
627 function default_container_cleanup
629 if ! is_global_zone; then
633 ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
634 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
636 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
637 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
639 [[ -e $TESTDIR1 ]] && \
640 log_must rm -rf $TESTDIR1
646 # Common function used to cleanup snapshot of file system or volume. Default to
647 # delete the file system's snapshot
651 function destroy_snapshot
653 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
655 if ! snapexists $snap; then
656 log_fail "'$snap' does not exist."
660 # For the sake of the value which come from 'get_prop' is not equal
661 # to the really mountpoint when the snapshot is unmounted. So, firstly
662 # check and make sure this snapshot's been mounted in current system.
665 if ismounted $snap; then
666 mtpt=$(get_prop mountpoint $snap)
669 destroy_dataset "$snap"
670 [[ $mtpt != "" && -d $mtpt ]] && \
671 log_must rm -rf $mtpt
675 # Common function used to cleanup clone.
679 function destroy_clone
681 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
683 if ! datasetexists $clone; then
684 log_fail "'$clone' does not existed."
687 # With the same reason in destroy_snapshot
689 if ismounted $clone; then
690 mtpt=$(get_prop mountpoint $clone)
693 destroy_dataset "$clone"
694 [[ $mtpt != "" && -d $mtpt ]] && \
695 log_must rm -rf $mtpt
699 # Common function used to cleanup bookmark of file system or volume. Default
700 # to delete the file system's bookmark.
704 function destroy_bookmark
706 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
708 if ! bkmarkexists $bkmark; then
709 log_fail "'$bkmarkp' does not existed."
712 destroy_dataset "$bkmark"
715 # Return 0 if a snapshot exists; $? otherwise
721 zfs list -H -t snapshot "$1" > /dev/null 2>&1
725 # Return 0 if a bookmark exists; $? otherwise
729 function bkmarkexists
731 zfs list -H -t bookmark "$1" > /dev/null 2>&1
735 # Return 0 if a hold exists; $? otherwise
742 ! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
746 # Set a property to a certain value on a dataset.
747 # Sets a property of the dataset to the value as passed in.
749 # $1 dataset who's property is being set
751 # $3 value to set property to
753 # 0 if the property could be set.
754 # non-zero otherwise.
757 function dataset_setprop
759 typeset fn=dataset_setprop
762 log_note "$fn: Insufficient parameters (need 3, had $#)"
766 output=$(zfs set $2=$3 $1 2>&1)
769 log_note "Setting property on $1 failed."
770 log_note "property $2=$3"
771 log_note "Return Code: $rv"
772 log_note "Output: $output"
779 # Check a numeric assertion
780 # @parameter: $@ the assertion to check
781 # @output: big loud notice if assertion failed
786 (($@)) || log_fail "$@"
790 # Function to format partition size of a disk
791 # Given a disk cxtxdx reduces all partitions
794 function zero_partitions #<whole_disk_name>
800 gpart destroy -F $diskname
802 DSK=$DEV_DSKDIR/$diskname
803 DSK=$(echo $DSK | sed -e "s|//|/|g")
804 log_must parted $DSK -s -- mklabel gpt
805 blockdev --rereadpt $DSK 2>/dev/null
808 for i in 0 1 3 4 5 6 7
810 log_must set_partition $i "" 0mb $diskname
818 # Given a slice, size and disk, this function
819 # formats the slice to the specified size.
820 # Size should be specified with units as per
821 # the `format` command requirements eg. 100mb 3gb
823 # NOTE: This entire interface is problematic for the Linux parted utility
824 # which requires the end of the partition to be specified. It would be
825 # best to retire this interface and replace it with something more flexible.
826 # At the moment a best effort is made.
828 # arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
829 function set_partition
831 typeset -i slicenum=$1
834 typeset disk=${4#$DEV_DSKDIR/}
835 disk=${disk#$DEV_RDSKDIR/}
839 if [[ -z $size || -z $disk ]]; then
840 log_fail "The size or disk name is unspecified."
842 disk=$DEV_DSKDIR/$disk
843 typeset size_mb=${size%%[mMgG]}
845 size_mb=${size_mb%%[mMgG][bB]}
846 if [[ ${size:1:1} == 'g' ]]; then
847 ((size_mb = size_mb * 1024))
850 # Create GPT partition table when setting slice 0 or
851 # when the device doesn't already contain a GPT label.
852 parted $disk -s -- print 1 >/dev/null
854 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
855 if ! parted $disk -s -- mklabel gpt; then
856 log_note "Failed to create GPT partition table on $disk"
861 # When no start is given align on the first cylinder.
862 if [[ -z "$start" ]]; then
866 # Determine the cylinder size for the device and using
867 # that calculate the end offset in cylinders.
868 typeset -i cly_size_kb=0
869 cly_size_kb=$(parted -m $disk -s -- unit cyl print |
870 awk -F '[:k.]' 'NR == 3 {print $4}')
871 ((end = (size_mb * 1024 / cly_size_kb) + start))
874 mkpart part$slicenum ${start}cyl ${end}cyl
876 if [[ $ret_val -ne 0 ]]; then
877 log_note "Failed to create partition $slicenum on $disk"
881 blockdev --rereadpt $disk 2>/dev/null
882 block_device_wait $disk
885 if [[ -z $size || -z $disk ]]; then
886 log_fail "The size or disk name is unspecified."
888 disk=$DEV_DSKDIR/$disk
890 if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
891 gpart destroy -F $disk >/dev/null 2>&1
892 if ! gpart create -s GPT $disk; then
893 log_note "Failed to create GPT partition table on $disk"
898 typeset index=$((slicenum + 1))
900 if [[ -n $start ]]; then
903 gpart add -t freebsd-zfs $start -s $size -i $index $disk
904 if [[ $ret_val -ne 0 ]]; then
905 log_note "Failed to create partition $slicenum on $disk"
909 block_device_wait $disk
912 if [[ -z $slicenum || -z $size || -z $disk ]]; then
913 log_fail "The slice, size or disk name is unspecified."
916 typeset format_file=/var/tmp/format_in.$$
918 echo "partition" >$format_file
919 echo "$slicenum" >> $format_file
920 echo "" >> $format_file
921 echo "" >> $format_file
922 echo "$start" >> $format_file
923 echo "$size" >> $format_file
924 echo "label" >> $format_file
925 echo "" >> $format_file
926 echo "q" >> $format_file
927 echo "q" >> $format_file
929 format -e -s -d $disk -f $format_file
935 if [[ $ret_val -ne 0 ]]; then
936 log_note "Unable to format $disk slice $slicenum to $size"
943 # Delete all partitions on all disks - this is specifically for the use of multipath
944 # devices which currently can only be used in the test suite as raw/un-partitioned
945 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
947 function delete_partitions
951 if [[ -z $DISKSARRAY ]]; then
957 for disk in $DISKSARRAY; do
958 for (( part = 1; part < MAX_PARTITIONS; part++ )); do
959 typeset partition=${disk}${SLICE_PREFIX}${part}
960 parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
961 if lsblk | grep -qF ${partition}; then
962 log_fail "Partition ${partition} not deleted"
964 log_note "Partition ${partition} deleted"
968 elif is_freebsd; then
969 for disk in $DISKSARRAY; do
970 if gpart destroy -F $disk; then
971 log_note "Partitions for ${disk} deleted"
973 log_fail "Partitions for ${disk} not deleted"
980 # Get the end cyl of the given slice
982 function get_endslice #<disk> <slice>
986 if [[ -z $disk || -z $slice ]] ; then
987 log_fail "The disk name or slice number is unspecified."
992 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
993 awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
994 ((endcyl = (endcyl + 1)))
997 disk=${disk#/dev/zvol/}
1000 endcyl=$(gpart show $disk | \
1001 awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1004 disk=${disk#/dev/dsk/}
1005 disk=${disk#/dev/rdsk/}
1009 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1010 awk '/sectors\/cylinder/ {print $2}')
1012 if ((ratio == 0)); then
1016 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1017 awk -v token="$slice" '$1 == token {print $6}')
1019 ((endcyl = (endcyl + 1) / ratio))
1028 # Given a size,disk and total slice number, this function formats the
1029 # disk slices from 0 to the total slice number with the same specified
1032 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1035 typeset slice_size=$1
1036 typeset disk_name=$2
1037 typeset total_slices=$3
1040 zero_partitions $disk_name
1041 while ((i < $total_slices)); do
1048 log_must set_partition $i "$cyl" $slice_size $disk_name
1049 cyl=$(get_endslice $disk_name $i)
1055 # This function continues to write to a filenum number of files into dirnum
1056 # number of directories until either file_write returns an error or the
1057 # maximum number of files per directory have been written.
1060 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1062 # Return value: 0 on success
1066 # destdir: is the directory where everything is to be created under
1067 # dirnum: the maximum number of subdirectories to use, -1 no limit
1068 # filenum: the maximum number of files per subdirectory
1069 # bytes: number of bytes to write
1070 # num_writes: number of types to write out bytes
1071 # data: the data that will be written
1074 # fill_fs /testdir 20 25 1024 256 0
1076 # Note: bytes * num_writes equals the size of the testfile
1078 function fill_fs # destdir dirnum filenum bytes num_writes data
1080 typeset destdir=${1:-$TESTDIR}
1081 typeset -i dirnum=${2:-50}
1082 typeset -i filenum=${3:-50}
1083 typeset -i bytes=${4:-8192}
1084 typeset -i num_writes=${5:-10240}
1085 typeset data=${6:-0}
1087 mkdir -p $destdir/{1..$dirnum}
1088 for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1089 file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1094 # Get the specified dataset property in parsable format or fail
1095 function get_prop # property dataset
1100 zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
1103 # Get the specified pool property in parsable format or fail
1104 function get_pool_prop # property pool
1109 zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
1112 # Return 0 if a pool exists; $? otherwise
1120 if [[ -z $pool ]]; then
1121 log_note "No pool name given."
1125 zpool get name "$pool" > /dev/null 2>&1
1128 # Return 0 if all the specified datasets exist; $? otherwise
1131 function datasetexists
1133 if (($# == 0)); then
1134 log_note "No dataset name given."
1138 zfs get name "$@" > /dev/null 2>&1
1141 # return 0 if none of the specified datasets exists, otherwise return 1.
1144 function datasetnonexists
1146 if (($# == 0)); then
1147 log_note "No dataset name given."
1151 while (($# > 0)); do
1152 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1160 # FreeBSD breaks exports(5) at whitespace and doesn't process escapes
1161 # Solaris just breaks
1163 # cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
1165 # Linux can have spaces (which are \OOO-escaped),
1166 # but can't have backslashes because they're parsed recursively
1167 function shares_can_have_whitespace
1172 function is_shared_freebsd
1176 pgrep -q mountd && showmount -E | grep -qx "$fs"
1179 function is_shared_illumos
1184 for mtpt in `share | awk '{print $2}'` ; do
1185 if [[ $mtpt == $fs ]] ; then
1190 typeset stat=$(svcs -H -o STA nfs/server:default)
1191 if [[ $stat != "ON" ]]; then
1192 log_note "Current nfs/server status: $stat"
1198 function is_shared_linux
1201 ! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
1205 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1207 # Returns 0 if shared, 1 otherwise.
1214 if [[ $fs != "/"* ]] ; then
1215 if datasetnonexists "$fs" ; then
1218 mtpt=$(get_prop mountpoint "$fs")
1220 none|legacy|-) return 1
1229 FreeBSD) is_shared_freebsd "$fs" ;;
1230 Linux) is_shared_linux "$fs" ;;
1231 *) is_shared_illumos "$fs" ;;
1235 function is_exported_illumos
1240 while read -r mtpt _; do
1241 [ "$mtpt" = "$fs" ] && return
1242 done < /etc/dfs/sharetab
1247 function is_exported_freebsd
1252 while read -r mtpt _; do
1253 [ "$mtpt" = "$fs" ] && return
1254 done < /etc/zfs/exports
1259 function is_exported_linux
1264 while read -r mtpt _; do
1265 [ "$(printf "$mtpt")" = "$fs" ] && return
1266 done < /etc/exports.d/zfs.exports
1272 # Given a mountpoint, or a dataset name, determine if it is exported via
1273 # the os-specific NFS exports file.
1275 # Returns 0 if exported, 1 otherwise.
1277 function is_exported
1282 if [[ $fs != "/"* ]] ; then
1283 if datasetnonexists "$fs" ; then
1286 mtpt=$(get_prop mountpoint "$fs")
1288 none|legacy|-) return 1
1297 FreeBSD) is_exported_freebsd "$fs" ;;
1298 Linux) is_exported_linux "$fs" ;;
1299 *) is_exported_illumos "$fs" ;;
1304 # Given a dataset name determine if it is shared via SMB.
1306 # Returns 0 if shared, 1 otherwise.
1308 function is_shared_smb
1312 datasetexists "$fs" || return
1315 net usershare list | grep -xFq "${fs//[-\/]/_}"
1317 log_note "SMB on $UNAME currently unsupported by the test framework"
1323 # Given a mountpoint, determine if it is not shared via NFS.
1325 # Returns 0 if not shared, 1 otherwise.
1333 # Given a dataset determine if it is not shared via SMB.
1335 # Returns 0 if not shared, 1 otherwise.
1337 function not_shared_smb
1343 # Helper function to unshare a mountpoint.
1345 function unshare_fs #fs
1349 if is_shared $fs || is_shared_smb $fs; then
1350 log_must zfs unshare $fs
1355 # Helper function to share a NFS mountpoint.
1357 function share_nfs #fs
1361 is_shared "$fs" && return
1365 log_must exportfs "*:$fs"
1369 read -r mountd < /var/run/mountd.pid
1370 log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
1371 log_must kill -s HUP "$mountd"
1374 log_must share -F nfs "$fs"
1382 # Helper function to unshare a NFS mountpoint.
1384 function unshare_nfs #fs
1388 ! is_shared "$fs" && return
1392 log_must exportfs -u "*:$fs"
1396 read -r mountd < /var/run/mountd.pid
1397 awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
1398 log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
1399 log_must kill -s HUP "$mountd"
1402 log_must unshare -F nfs $fs
1410 # Helper function to show NFS shares.
1412 function showshares_nfs
1437 log_unsupported "Unknown platform"
1439 esac || log_unsupported "The NFS utilities are not installed"
1443 # Check NFS server status and trigger it online.
1445 function setup_nfs_server
1447 # Cannot share directory in non-global zone.
1449 if ! is_global_zone; then
1450 log_note "Cannot trigger NFS server by sharing in LZ."
1456 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1457 # /etc/exports.d./* to provide a clean test environment.
1459 log_must exportfs -r
1461 log_note "NFS server must be started prior to running ZTS."
1463 elif is_freebsd; then
1464 log_must kill -s HUP $(</var/run/mountd.pid)
1466 log_note "NFS server must be started prior to running ZTS."
1470 typeset nfs_fmri="svc:/network/nfs/server:default"
1471 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1473 # Only really sharing operation can enable NFS server
1474 # to online permanently.
1476 typeset dummy=/tmp/dummy
1478 if [[ -d $dummy ]]; then
1479 log_must rm -rf $dummy
1482 log_must mkdir $dummy
1483 log_must share $dummy
1486 # Waiting for fmri's status to be the final status.
1487 # Otherwise, in transition, an asterisk (*) is appended for
1488 # instances, unshare will reverse status to 'DIS' again.
1490 # Waiting for 1's at least.
1494 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1501 log_must unshare $dummy
1502 log_must rm -rf $dummy
1505 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1509 # To verify whether calling process is in global zone
1511 # Return 0 if in global zone, 1 in non-global zone
1513 function is_global_zone
1515 if is_linux || is_freebsd; then
1518 typeset cur_zone=$(zonename 2>/dev/null)
1519 [ $cur_zone = "global" ]
1524 # Verify whether test is permitted to run from
1525 # global zone, local zone, or both
1527 # $1 zone limit, could be "global", "local", or "both"(no limit)
1529 # Return 0 if permitted, otherwise exit with log_unsupported
1531 function verify_runnable # zone limit
1535 [[ -z $limit ]] && return 0
1537 if is_global_zone ; then
1541 local) log_unsupported "Test is unable to run from "\
1544 *) log_note "Warning: unknown limit $limit - " \
1552 global) log_unsupported "Test is unable to run from "\
1555 *) log_note "Warning: unknown limit $limit - " \
1566 # Return 0 if create successfully or the pool exists; $? otherwise
1567 # Note: In local zones, this function should return 0 silently.
1570 # $2-n - [keyword] devs_list
1572 function create_pool #pool devs_list
1574 typeset pool=${1%%/*}
1578 if [[ -z $pool ]]; then
1579 log_note "Missing pool name."
1583 if poolexists $pool ; then
1587 if is_global_zone ; then
1588 [[ -d /$pool ]] && rm -rf /$pool
1589 log_must zpool create -f $pool $@
1595 # Return 0 if destroy successfully or the pool exists; $? otherwise
1596 # Note: In local zones, this function should return 0 silently.
1599 # Destroy pool with the given parameters.
1601 function destroy_pool #pool
1603 typeset pool=${1%%/*}
1606 if [[ -z $pool ]]; then
1607 log_note "No pool name given."
1611 if is_global_zone ; then
1612 if poolexists "$pool" ; then
1613 mtpt=$(get_prop mountpoint "$pool")
1615 # At times, syseventd/udev activity can cause attempts
1616 # to destroy a pool to fail with EBUSY. We retry a few
1617 # times allowing failures before requiring the destroy
1619 log_must_busy zpool destroy -f $pool
1622 log_must rm -rf $mtpt
1624 log_note "Pool does not exist. ($pool)"
1632 # Return 0 if created successfully; $? otherwise
1635 # $2-n - dataset options
1637 function create_dataset #dataset dataset_options
1643 if [[ -z $dataset ]]; then
1644 log_note "Missing dataset name."
1648 if datasetexists $dataset ; then
1649 destroy_dataset $dataset
1652 log_must zfs create $@ $dataset
1657 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1658 # Note: In local zones, this function should return 0 silently.
1661 # $2 - custom arguments for zfs destroy
1662 # Destroy dataset with the given parameters.
1664 function destroy_dataset # dataset [args]
1668 typeset args=${2:-""}
1670 if [[ -z $dataset ]]; then
1671 log_note "No dataset name given."
1675 if is_global_zone ; then
1676 if datasetexists "$dataset" ; then
1677 mtpt=$(get_prop mountpoint "$dataset")
1678 log_must_busy zfs destroy $args $dataset
1680 [ -d $mtpt ] && log_must rm -rf $mtpt
1682 log_note "Dataset does not exist. ($dataset)"
1691 # Reexport TESTPOOL & TESTPOOL(1-4)
1693 function reexport_pool
1698 while ((i < cntctr)); do
1700 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1701 if ! ismounted $TESTPOOL; then
1702 log_must zfs mount $TESTPOOL
1705 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1706 if eval ! ismounted \$TESTPOOL$i; then
1707 log_must eval zfs mount \$TESTPOOL$i
1715 # Verify a given disk or pool state
1717 # Return 0 is pool/disk matches expected state, 1 otherwise
1719 function check_state # pool disk state{online,offline,degraded}
1722 typeset disk=${2#$DEV_DSKDIR/}
1725 [[ -z $pool ]] || [[ -z $state ]] \
1726 && log_fail "Arguments invalid or missing"
1728 if [[ -z $disk ]]; then
1729 #check pool state only
1730 zpool get -H -o value health $pool | grep -qi "$state"
1732 zpool status -v $pool | grep "$disk" | grep -qi "$state"
1737 # Get the mountpoint of snapshot
1738 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1741 function snapshot_mountpoint
1743 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1745 if [[ $dataset != *@* ]]; then
1746 log_fail "Error name of snapshot '$dataset'."
1749 typeset fs=${dataset%@*}
1750 typeset snap=${dataset#*@}
1752 if [[ -z $fs || -z $snap ]]; then
1753 log_fail "Error name of snapshot '$dataset'."
1756 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1760 # Given a device and 'ashift' value verify it's correctly set on every label
1762 function verify_ashift # device ashift
1767 zdb -e -lll $device | awk -v ashift=$ashift '
1780 # Given a pool and file system, this function will verify the file system
1781 # using the zdb internal tool. Note that the pool is exported and imported
1782 # to ensure it has consistent state.
1784 function verify_filesys # pool filesystem dir
1787 typeset filesys="$2"
1788 typeset zdbout="/tmp/zdbout.$$"
1793 typeset search_path=""
1795 log_note "Calling zdb to verify filesystem '$filesys'"
1796 zfs unmount -a > /dev/null 2>&1
1797 log_must zpool export $pool
1799 if [[ -n $dirs ]] ; then
1800 for dir in $dirs ; do
1801 search_path="$search_path -d $dir"
1805 log_must zpool import $search_path $pool
1807 if ! zdb -cudi $filesys > $zdbout 2>&1; then
1808 log_note "Output: zdb -cudi $filesys"
1811 log_fail "zdb detected errors with: '$filesys'"
1814 log_must zfs mount -a
1815 log_must rm -rf $zdbout
1819 # Given a pool issue a scrub and verify that no checksum errors are reported.
1821 function verify_pool
1823 typeset pool=${1:-$TESTPOOL}
1825 log_must zpool scrub $pool
1826 log_must wait_scrubbed $pool
1828 typeset -i cksum=$(zpool status $pool | awk '
1830 isvdev { errors += $NF }
1831 /CKSUM$/ { isvdev = 1 }
1832 END { print errors }
1834 if [[ $cksum != 0 ]]; then
1835 log_must zpool status -v
1836 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1841 # Given a pool, and this function list all disks in the pool
1843 function get_disklist # pool
1845 echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
1846 grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
1850 # Given a pool, and this function list all disks in the pool with their full
1851 # path (like "/dev/sda" instead of "sda").
1853 function get_disklist_fullpath # pool
1855 get_disklist "-P $1"
1861 # This function kills a given list of processes after a time period. We use
1862 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1863 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1864 # would be listed as FAIL, which we don't want : we're happy with stress tests
1865 # running for a certain amount of time, then finishing.
1867 # @param $1 the time in seconds after which we should terminate these processes
1868 # @param $2..$n the processes we wish to terminate.
1870 function stress_timeout
1872 typeset -i TIMEOUT=$1
1876 log_note "Waiting for child processes($cpids). " \
1877 "It could last dozens of minutes, please be patient ..."
1878 log_must sleep $TIMEOUT
1880 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1882 for pid in $cpids; do
1883 ps -p $pid > /dev/null 2>&1 &&
1884 log_must kill -USR1 $pid
1889 # Verify a given hotspare disk is inuse or avail
1891 # Return 0 is pool/disk matches expected state, 1 otherwise
1893 function check_hotspare_state # pool disk state{inuse,avail}
1896 typeset disk=${2#$DEV_DSKDIR/}
1899 cur_state=$(get_device_state $pool $disk "spares")
1901 [ $state = $cur_state ]
1905 # Wait until a hotspare transitions to a given state or times out.
1907 # Return 0 when pool/disk matches expected state, 1 on timeout.
1909 function wait_hotspare_state # pool disk state timeout
1912 typeset disk=${2#*$DEV_DSKDIR/}
1914 typeset timeout=${4:-60}
1917 while [[ $i -lt $timeout ]]; do
1918 if check_hotspare_state $pool $disk $state; then
1930 # Verify a given vdev disk is inuse or avail
1932 # Return 0 is pool/disk matches expected state, 1 otherwise
1934 function check_vdev_state # pool disk state{online,offline,unavail,removed}
1937 typeset disk=${2#*$DEV_DSKDIR/}
1940 cur_state=$(get_device_state $pool $disk)
1942 [ $state = $cur_state ]
1946 # Wait until a vdev transitions to a given state or times out.
1948 # Return 0 when pool/disk matches expected state, 1 on timeout.
1950 function wait_vdev_state # pool disk state timeout
1953 typeset disk=${2#*$DEV_DSKDIR/}
1955 typeset timeout=${4:-60}
1958 while [[ $i -lt $timeout ]]; do
1959 if check_vdev_state $pool $disk $state; then
1971 # Check the output of 'zpool status -v <pool>',
1972 # and to see if the content of <token> contain the <keyword> specified.
1974 # Return 0 is contain, 1 otherwise
1976 function check_pool_status # pool token keyword <verbose>
1981 typeset verbose=${4:-false}
1983 scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
1984 if [[ $verbose == true ]]; then
1987 echo $scan | grep -qi "$keyword"
1991 # The following functions are instance of check_pool_status()
1992 # is_pool_resilvering - to check if the pool resilver is in progress
1993 # is_pool_resilvered - to check if the pool resilver is completed
1994 # is_pool_scrubbing - to check if the pool scrub is in progress
1995 # is_pool_scrubbed - to check if the pool scrub is completed
1996 # is_pool_scrub_stopped - to check if the pool scrub is stopped
1997 # is_pool_scrub_paused - to check if the pool scrub has paused
1998 # is_pool_removing - to check if the pool removing is a vdev
1999 # is_pool_removed - to check if the pool remove is completed
2000 # is_pool_discarding - to check if the pool checkpoint is being discarded
2001 # is_pool_replacing - to check if the pool is performing a replacement
2003 function is_pool_resilvering #pool <verbose>
2005 check_pool_status "$1" "scan" \
2006 "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2009 function is_pool_resilvered #pool <verbose>
2011 check_pool_status "$1" "scan" "resilvered " $2
2014 function is_pool_scrubbing #pool <verbose>
2016 check_pool_status "$1" "scan" "scrub in progress since " $2
2019 function is_pool_error_scrubbing #pool <verbose>
2021 check_pool_status "$1" "scrub" "error scrub in progress since " $2
2025 function is_pool_scrubbed #pool <verbose>
2027 check_pool_status "$1" "scan" "scrub repaired" $2
2030 function is_pool_scrub_stopped #pool <verbose>
2032 check_pool_status "$1" "scan" "scrub canceled" $2
2035 function is_pool_error_scrub_stopped #pool <verbose>
2037 check_pool_status "$1" "scrub" "error scrub canceled on " $2
2041 function is_pool_scrub_paused #pool <verbose>
2043 check_pool_status "$1" "scan" "scrub paused since " $2
2046 function is_pool_error_scrub_paused #pool <verbose>
2048 check_pool_status "$1" "scrub" "error scrub paused since " $2
2052 function is_pool_removing #pool
2054 check_pool_status "$1" "remove" "in progress since "
2057 function is_pool_removed #pool
2059 check_pool_status "$1" "remove" "completed on"
2062 function is_pool_discarding #pool
2064 check_pool_status "$1" "checkpoint" "discarding"
2066 function is_pool_replacing #pool
2068 zpool status "$1" | grep -qE 'replacing-[0-9]+'
2071 function wait_for_degraded
2074 typeset timeout=${2:-30}
2078 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2079 log_note "$pool is not yet degraded."
2081 if ((SECONDS - t0 > $timeout)); then
2082 log_note "$pool not degraded after $timeout seconds."
2091 # Use create_pool()/destroy_pool() to clean up the information in
2092 # in the given disk to avoid slice overlapping.
2094 function cleanup_devices #vdevs
2096 typeset pool="foopool$$"
2099 zero_partitions $vdev
2102 poolexists $pool && destroy_pool $pool
2103 create_pool $pool $@
2110 # A function to find and locate free disks on a system or from given
2111 # disks as the parameter. It works by locating disks that are in use
2112 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2114 # $@ given disks to find which are free, default is all disks in
2117 # @return a string containing the list of available disks
2121 # Trust provided list, no attempt is made to locate unused devices.
2122 if is_linux || is_freebsd; then
2128 sfi=/tmp/swaplist.$$
2129 dmpi=/tmp/dumpdev.$$
2130 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2133 dumpadm > $dmpi 2>/dev/null
2135 disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
2143 if (searchdisks && $2 !~ "^$"){
2149 /^AVAILABLE DISK SELECTIONS:/{
2155 for disk in $disks; do
2157 grep -q "${disk}[sp]" /etc/mnttab && continue
2159 grep -q "${disk}[sp]" $sfi && continue
2160 # check for dump device
2161 grep -q "${disk}[sp]" $dmpi && continue
2162 # check to see if this disk hasn't been explicitly excluded
2163 # by a user-set environment variable
2164 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
2165 unused_candidates="$unused_candidates $disk"
2169 # now just check to see if those disks do actually exist
2170 # by looking for a device pointing to the first slice in
2171 # each case. limit the number to max_finddisksnum
2173 for disk in $unused_candidates; do
2174 if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2175 [ $count -lt $max_finddisksnum ]; then
2176 unused="$unused $disk"
2177 # do not impose limit if $@ is provided
2178 [[ -z $@ ]] && ((count = count + 1))
2182 # finally, return our disk list
2186 function add_user_freebsd #<group_name> <user_name> <basedir>
2192 # Check to see if the user exists.
2193 if id $user > /dev/null 2>&1; then
2197 # Assign 1000 as the base uid
2200 pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2203 # The uid is not unique
2207 if [[ $uid == 65000 ]]; then
2208 log_fail "No user id available under 65000 for $user"
2213 touch $basedir/$user/.hushlogin
2219 # Delete the specified user.
2223 function del_user_freebsd #<logname>
2227 if id $user > /dev/null 2>&1; then
2228 log_must pw userdel $user
2235 # Select valid gid and create specified group.
2239 function add_group_freebsd #<group_name>
2243 # See if the group already exists.
2244 if pw groupshow $group >/dev/null 2>&1; then
2248 # Assign 1000 as the base gid
2251 pw groupadd -g $gid -n $group > /dev/null 2>&1
2254 # The gid is not unique
2258 if [[ $gid == 65000 ]]; then
2259 log_fail "No user id available under 65000 for $group"
2265 # Delete the specified group.
2269 function del_group_freebsd #<group_name>
2273 pw groupdel -n $group > /dev/null 2>&1
2275 # Group does not exist, or was deleted successfully.
2277 # Name already exists as a group name
2278 9) log_must pw groupdel $group ;;
2285 function add_user_illumos #<group_name> <user_name> <basedir>
2291 log_must useradd -g $group -d $basedir/$user -m $user
2296 function del_user_illumos #<user_name>
2300 if id $user > /dev/null 2>&1; then
2301 log_must_retry "currently used" 6 userdel $user
2307 function add_group_illumos #<group_name>
2313 groupadd -g $gid $group > /dev/null 2>&1
2316 # The gid is not unique
2323 function del_group_illumos #<group_name>
2327 groupmod -n $grp $grp > /dev/null 2>&1
2329 # Group does not exist.
2331 # Name already exists as a group name
2332 9) log_must groupdel $grp ;;
2337 function add_user_linux #<group_name> <user_name> <basedir>
2343 log_must useradd -g $group -d $basedir/$user -m $user
2345 # Add new users to the same group and the command line utils.
2346 # This allows them to be run out of the original users home
2347 # directory as long as it permissioned to be group readable.
2348 cmd_group=$(stat --format="%G" $(command -v zfs))
2349 log_must usermod -a -G $cmd_group $user
2354 function del_user_linux #<user_name>
2358 if id $user > /dev/null 2>&1; then
2359 log_must_retry "currently used" 6 userdel $user
2363 function add_group_linux #<group_name>
2367 # Assign 100 as the base gid, a larger value is selected for
2368 # Linux because for many distributions 1000 and under are reserved.
2370 groupadd $group > /dev/null 2>&1
2378 function del_group_linux #<group_name>
2382 getent group $group > /dev/null 2>&1
2384 # Group does not exist.
2386 # Name already exists as a group name
2387 0) log_must groupdel $group ;;
2395 # Add specified user to specified group
2399 # $3 base of the homedir (optional)
2401 function add_user #<group_name> <user_name> <basedir>
2405 typeset basedir=${3:-"/var/tmp"}
2407 if ((${#group} == 0 || ${#user} == 0)); then
2408 log_fail "group name or user name are not defined."
2413 add_user_freebsd "$group" "$user" "$basedir"
2416 add_user_linux "$group" "$user" "$basedir"
2419 add_user_illumos "$group" "$user" "$basedir"
2427 # Delete the specified user.
2430 # $2 base of the homedir (optional)
2432 function del_user #<logname> <basedir>
2435 typeset basedir=${2:-"/var/tmp"}
2437 if ((${#user} == 0)); then
2438 log_fail "login name is necessary."
2443 del_user_freebsd "$user"
2446 del_user_linux "$user"
2449 del_user_illumos "$user"
2453 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2459 # Select valid gid and create specified group.
2463 function add_group #<group_name>
2467 if ((${#group} == 0)); then
2468 log_fail "group name is necessary."
2473 add_group_freebsd "$group"
2476 add_group_linux "$group"
2479 add_group_illumos "$group"
2487 # Delete the specified group.
2491 function del_group #<group_name>
2495 if ((${#group} == 0)); then
2496 log_fail "group name is necessary."
2501 del_group_freebsd "$group"
2504 del_group_linux "$group"
2507 del_group_illumos "$group"
2515 # This function will return true if it's safe to destroy the pool passed
2516 # as argument 1. It checks for pools based on zvols and files, and also
2517 # files contained in a pool that may have a different mountpoint.
2519 function safe_to_destroy_pool { # $1 the pool name
2522 typeset DONT_DESTROY=""
2524 # We check that by deleting the $1 pool, we're not
2525 # going to pull the rug out from other pools. Do this
2526 # by looking at all other pools, ensuring that they
2527 # aren't built from files or zvols contained in this pool.
2529 for pool in $(zpool list -H -o name)
2533 # this is a list of the top-level directories in each of the
2534 # files that make up the path to the files the pool is based on
2535 FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
2537 # this is a list of the zvols that make up the pool
2538 ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
2540 # also want to determine if it's a file-based pool using an
2541 # alternate mountpoint...
2542 POOL_FILE_DIRS=$(zpool status -v $pool | \
2543 awk '/\// {print $1}' | \
2544 awk -F/ '!/dev/ {print $2}')
2546 for pooldir in $POOL_FILE_DIRS
2548 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2549 awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
2551 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2555 if [ ! -z "$ZVOLPOOL" ]
2558 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2561 if [ ! -z "$FILEPOOL" ]
2564 log_note "Pool $pool is built from $FILEPOOL on $1"
2567 if [ ! -z "$ALTMOUNTPOOL" ]
2570 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2574 if [ -z "${DONT_DESTROY}" ]
2578 log_note "Warning: it is not safe to destroy $1!"
2584 # Verify zfs operation with -p option work as expected
2585 # $1 operation, value could be create, clone or rename
2586 # $2 dataset type, value could be fs or vol
2588 # $4 new dataset name
2590 function verify_opt_p_ops
2595 typeset newdataset=$4
2597 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2598 log_fail "$datatype is not supported."
2601 # check parameters accordingly
2606 if [[ $datatype == "vol" ]]; then
2607 ops="create -V $VOLSIZE"
2611 if [[ -z $newdataset ]]; then
2612 log_fail "newdataset should not be empty" \
2615 log_must datasetexists $dataset
2616 log_must snapexists $dataset
2619 if [[ -z $newdataset ]]; then
2620 log_fail "newdataset should not be empty" \
2623 log_must datasetexists $dataset
2626 log_fail "$ops is not supported."
2630 # make sure the upper level filesystem does not exist
2631 destroy_dataset "${newdataset%/*}" "-rRf"
2633 # without -p option, operation will fail
2634 log_mustnot zfs $ops $dataset $newdataset
2635 log_mustnot datasetexists $newdataset ${newdataset%/*}
2637 # with -p option, operation should succeed
2638 log_must zfs $ops -p $dataset $newdataset
2641 if ! datasetexists $newdataset ; then
2642 log_fail "-p option does not work for $ops"
2645 # when $ops is create or clone, redo the operation still return zero
2646 if [[ $ops != "rename" ]]; then
2647 log_must zfs $ops -p $dataset $newdataset
2654 # Get configuration of pool
2663 if ! poolexists "$pool" ; then
2666 if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
2670 fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
2674 # Privated function. Random select one of items from arguments.
2679 function _random_get
2686 ((ind = RANDOM % cnt + 1))
2688 echo "$str" | cut -f $ind -d ' '
2692 # Random select one of item from arguments which include NONE string
2694 function random_get_with_non
2699 _random_get "$cnt" "$@"
2703 # Random select one of item from arguments which doesn't include NONE string
2707 _random_get "$#" "$@"
2711 # The function will generate a dataset name with specific length
2712 # $1, the length of the name
2713 # $2, the base string to construct the name
2715 function gen_dataset_name
2718 typeset basestr="$2"
2719 typeset -i baselen=${#basestr}
2723 if ((len % baselen == 0)); then
2724 ((iter = len / baselen))
2726 ((iter = len / baselen + 1))
2728 while ((iter > 0)); do
2729 l_name="${l_name}$basestr"
2738 # Get cksum tuple of dataset
2741 # sample zdb output:
2742 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2743 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2744 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2745 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2746 function datasetcksum
2751 zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
2755 # Get the given disk/slice state from the specific field of the pool
2757 function get_device_state #pool disk field("", "spares","logs")
2760 typeset disk=${2#$DEV_DSKDIR/}
2761 typeset field=${3:-$pool}
2763 zpool status -v "$pool" 2>/dev/null | \
2764 awk -v device=$disk -v pool=$pool -v field=$field \
2765 'BEGIN {startconfig=0; startfield=0; }
2766 /config:/ {startconfig=1}
2767 (startconfig==1) && ($1==field) {startfield=1; next;}
2768 (startfield==1) && ($1==device) {print $2; exit;}
2770 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
2774 # get the root filesystem name if it's zfsroot system.
2776 # return: root filesystem name
2782 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
2783 elif ! is_linux; then
2784 rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
2787 if [[ -z "$rootfs" ]]; then
2788 log_fail "Can not get rootfs"
2790 if datasetexists $rootfs; then
2793 log_fail "This is not a zfsroot system."
2798 # get the rootfs's pool name
2802 function get_rootpool
2804 typeset rootfs=$(get_rootfs)
2809 # To verify if the require numbers of disks is given
2811 function verify_disk_count
2813 typeset -i min=${2:-1}
2815 typeset -i count=$(echo "$1" | wc -w)
2817 if ((count < min)); then
2818 log_untested "A minimum of $min disks is required to run." \
2819 " You specified $count disk(s)"
2823 function ds_is_volume
2825 typeset type=$(get_prop type $1)
2826 [ $type = "volume" ]
2829 function ds_is_filesystem
2831 typeset type=$(get_prop type $1)
2832 [ $type = "filesystem" ]
2836 # Check if Trusted Extensions are installed and enabled
2838 function is_te_enabled
2840 svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
2843 # Return the number of CPUs (cross-platform)
2844 function get_num_cpus
2847 grep -c '^processor' /proc/cpuinfo
2848 elif is_freebsd; then
2849 sysctl -n kern.smp.cpus
2855 # Utility function to determine if a system has multiple cpus.
2858 [[ $(get_num_cpus) -gt 1 ]]
2861 function get_cpu_freq
2864 lscpu | awk '/CPU MHz/ { print $3 }'
2865 elif is_freebsd; then
2866 sysctl -n hw.clockrate
2868 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2872 # Run the given command as the user provided.
2878 log_note "user: $user"
2881 typeset out=$TEST_BASE_DIR/out
2882 typeset err=$TEST_BASE_DIR/err
2884 sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
2886 log_note "out: $(<$out)"
2887 log_note "err: $(<$err)"
2892 # Check if the pool contains the specified vdevs
2897 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2898 # vdevs is not in the pool, and 2 if pool name is missing.
2900 function vdevs_in_pool
2905 if [[ -z $pool ]]; then
2906 log_note "Missing pool name."
2912 # We could use 'zpool list' to only get the vdevs of the pool but we
2913 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
2914 # therefore we use the 'zpool status' output.
2915 typeset tmpfile=$(mktemp)
2916 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
2917 for vdev in "$@"; do
2918 grep -wq ${vdev##*/} $tmpfile || return 1
2931 max=$((max > i ? max : i))
2937 # Write data that can be compressed into a directory
2938 function write_compressible
2942 typeset nfiles=${3:-1}
2943 typeset bs=${4:-1024k}
2944 typeset fname=${5:-file}
2946 [[ -d $dir ]] || log_fail "No directory: $dir"
2948 # Under Linux fio is not currently used since its behavior can
2949 # differ significantly across versions. This includes missing
2950 # command line options and cases where the --buffer_compress_*
2951 # options fail to behave as expected.
2953 typeset file_bytes=$(to_bytes $megs)
2954 typeset bs_bytes=4096
2955 typeset blocks=$(($file_bytes / $bs_bytes))
2957 for (( i = 0; i < $nfiles; i++ )); do
2958 truncate -s $file_bytes $dir/$fname.$i
2960 # Write every third block to get 66% compression.
2961 for (( j = 0; j < $blocks; j += 3 )); do
2962 dd if=/dev/urandom of=$dir/$fname.$i \
2963 seek=$j bs=$bs_bytes count=1 \
2964 conv=notrunc >/dev/null 2>&1
2968 command -v fio > /dev/null || log_unsupported "fio missing"
2974 --buffer_compress_percentage=66 \
2975 --buffer_compress_chunk=4096 \
2976 --directory="$dir" \
2977 --numjobs="$nfiles" \
2978 --nrfiles="$nfiles" \
2981 --filesize="$megs" \
2982 "--filename_format='$fname.\$jobnum' >/dev/null"
2991 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2993 objnum=$(stat -f "%i" $pathname)
2995 objnum=$(stat -c %i $pathname)
3001 # Sync data to the pool
3004 # $2 boolean to force uberblock (and config including zpool cache file) update
3006 function sync_pool #pool <force>
3008 typeset pool=${1:-$TESTPOOL}
3009 typeset force=${2:-false}
3011 if [[ $force == true ]]; then
3012 log_must zpool sync -f $pool
3014 log_must zpool sync $pool
3023 # $1 boolean to force uberblock (and config including zpool cache file) update
3025 function sync_all_pools #<force>
3027 typeset force=${1:-false}
3029 if [[ $force == true ]]; then
3030 log_must zpool sync -f
3039 # Wait for zpool 'freeing' property drops to zero.
3043 function wait_freeing #pool
3045 typeset pool=${1:-$TESTPOOL}
3047 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3053 # Wait for every device replace operation to complete
3058 function wait_replacing #pool timeout
3060 typeset timeout=${2:-300}
3061 typeset pool=${1:-$TESTPOOL}
3062 for (( timer = 0; timer < $timeout; timer++ )); do
3063 is_pool_replacing $pool || break;
3068 # Wait for a pool to be scrubbed
3073 function wait_scrubbed #pool timeout
3075 typeset timeout=${2:-300}
3076 typeset pool=${1:-$TESTPOOL}
3077 for (( timer = 0; timer < $timeout; timer++ )); do
3078 is_pool_scrubbed $pool && break;
3083 # Backup the zed.rc in our test directory so that we can edit it for our test.
3085 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3086 function zed_rc_backup
3088 zedrc_backup="$(mktemp)"
3089 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3093 function zed_rc_restore
3095 mv $1 $ZEDLET_DIR/zed.rc
3099 # Setup custom environment for the ZED.
3101 # $@ Optional list of zedlets to run under zed.
3105 log_unsupported "No zed on $UNAME"
3108 if [[ ! -d $ZEDLET_DIR ]]; then
3109 log_must mkdir $ZEDLET_DIR
3112 if [[ ! -e $VDEVID_CONF ]]; then
3113 log_must touch $VDEVID_CONF
3116 if [[ -e $VDEVID_CONF_ETC ]]; then
3117 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3121 # Create a symlink for /etc/zfs/vdev_id.conf file.
3122 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3124 # Setup minimal ZED configuration. Individual test cases should
3125 # add additional ZEDLETs as needed for their specific test.
3126 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3127 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3129 # Scripts must only be user writable.
3130 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3131 saved_umask=$(umask)
3133 for i in $EXTRA_ZEDLETS ; do
3134 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3136 log_must umask $saved_umask
3139 # Customize the zed.rc file to enable the full debug log.
3140 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3141 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3146 # Cleanup custom ZED environment.
3148 # $@ Optional list of zedlets to remove from our test zed.d directory.
3149 function zed_cleanup
3155 for extra_zedlet; do
3156 log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
3158 log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
3159 $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
3164 # Check if ZED is currently running; if so, returns PIDs
3171 zedpids="$(pgrep -x zed)"
3172 zedpids2="$(pgrep -x lt-zed)"
3173 echo ${zedpids} ${zedpids2}
3177 # Check if ZED is currently running, if not start ZED.
3185 # ZEDLET_DIR=/var/tmp/zed
3186 if [[ ! -d $ZEDLET_DIR ]]; then
3187 log_must mkdir $ZEDLET_DIR
3190 # Verify the ZED is not already running.
3191 zedpids=$(zed_check)
3192 if [ -n "$zedpids" ]; then
3193 # We never, ever, really want it to just keep going if zed
3194 # is already running - usually this implies our test cases
3195 # will break very strangely because whatever we wanted to
3196 # configure zed for won't be listening to our changes in the
3198 log_fail "ZED already running - ${zedpids}"
3200 log_note "Starting ZED"
3201 # run ZED in the background and redirect foreground logging
3202 # output to $ZED_LOG.
3203 log_must truncate -s 0 $ZED_DEBUG_LOG
3204 log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3205 "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3220 log_note "Stopping ZED"
3222 zedpids=$(zed_check)
3223 [ ! -n "$zedpids" ] && break
3225 log_must kill $zedpids
3234 function zed_events_drain
3236 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3238 zpool events -c >/dev/null
3242 # Set a variable in zed.rc to something, un-commenting it in the process.
3252 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3255 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3260 # Check is provided device is being active used as a swap device.
3262 function is_swap_inuse
3266 if [[ -z $device ]] ; then
3267 log_note "No device specified."
3273 swapon -s | grep -wq $(readlink -f $device)
3276 swapctl -l | grep -wq $device
3279 swap -l | grep -wq $device
3285 # Setup a swap device using the provided device.
3293 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3294 log_must swapon $swapdev
3297 log_must swapctl -a $swapdev
3300 log_must swap -a $swapdev
3308 # Cleanup a swap device on the provided device.
3310 function swap_cleanup
3314 if is_swap_inuse $swapdev; then
3316 log_must swapoff $swapdev
3317 elif is_freebsd; then
3318 log_must swapoff $swapdev
3320 log_must swap -d $swapdev
3328 # Set a global system tunable (64-bit value)
3330 # $1 tunable name (use a NAME defined in tunables.cfg)
3333 function set_tunable64
3335 set_tunable_impl "$1" "$2" Z
3339 # Set a global system tunable (32-bit value)
3341 # $1 tunable name (use a NAME defined in tunables.cfg)
3344 function set_tunable32
3346 set_tunable_impl "$1" "$2" W
3349 function set_tunable_impl
3353 typeset mdb_cmd="$3"
3355 eval "typeset tunable=\$$name"
3358 log_unsupported "Tunable '$name' is unsupported on $UNAME"
3361 log_fail "Tunable '$name' must be added to tunables.cfg"
3367 [[ -z "$value" ]] && return 1
3368 [[ -z "$mdb_cmd" ]] && return 1
3372 typeset zfs_tunables="/sys/module/zfs/parameters"
3373 echo "$value" >"$zfs_tunables/$tunable"
3376 sysctl vfs.zfs.$tunable=$value
3379 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3384 function save_tunable
3386 [[ ! -d $TEST_BASE_DIR ]] && return 1
3387 [[ -e $TEST_BASE_DIR/tunable-$1 ]] && return 2
3388 echo "$(get_tunable """$1""")" > "$TEST_BASE_DIR"/tunable-"$1"
3391 function restore_tunable
3393 [[ ! -e $TEST_BASE_DIR/tunable-$1 ]] && return 1
3394 val="$(cat $TEST_BASE_DIR/tunable-"""$1""")"
3395 set_tunable64 "$1" "$val"
3396 rm $TEST_BASE_DIR/tunable-$1
3400 # Get a global system tunable
3402 # $1 tunable name (use a NAME defined in tunables.cfg)
3404 function get_tunable
3406 get_tunable_impl "$1"
3409 function get_tunable_impl
3412 typeset module="${2:-zfs}"
3413 typeset check_only="$3"
3415 eval "typeset tunable=\$$name"
3418 if [ -z "$check_only" ] ; then
3419 log_unsupported "Tunable '$name' is unsupported on $UNAME"
3425 if [ -z "$check_only" ] ; then
3426 log_fail "Tunable '$name' must be added to tunables.cfg"
3437 typeset zfs_tunables="/sys/module/$module/parameters"
3438 cat $zfs_tunables/$tunable
3441 sysctl -n vfs.zfs.$tunable
3444 [[ "$module" -eq "zfs" ]] || return 1
3449 # Does a tunable exist?
3452 function tunable_exists
3454 get_tunable_impl $1 "zfs" 1
3458 # Compute xxh128sum for given file or stdin if no file given.
3459 # Note: file path must not contain spaces
3461 function xxh128digest
3463 xxh128sum $1 | awk '{print $1}'
3467 # Compare the xxhash128 digest of two files.
3469 function cmp_xxh128 {
3473 typeset sum1=$(xxh128digest $file1)
3474 typeset sum2=$(xxh128digest $file2)
3475 test "$sum1" = "$sum2"
3478 function new_fs #<args>
3485 echo y | newfs -v "$@"
3490 function stat_size #<path>
3504 function stat_mtime #<path>
3518 function stat_ctime #<path>
3532 function stat_crtime #<path>
3546 function stat_generation #<path>
3552 getversion "${path}"
3555 stat -f %v "${path}"
3560 # Run a command as if it was being run in a TTY.
3569 script -q /dev/null env "$@"
3571 script --return --quiet -c "$*" /dev/null
3576 # Produce a random permutation of the integers in a given range (inclusive).
3578 function range_shuffle # begin end
3583 seq ${begin} ${end} | sort -R
3587 # Cross-platform xattr helpers
3590 function get_xattr # name path
3597 getextattr -qq user "${name}" "${path}"
3600 attr -qg "${name}" "${path}"
3605 function set_xattr # name value path
3613 setextattr user "${name}" "${value}" "${path}"
3616 attr -qs "${name}" -V "${value}" "${path}"
3621 function set_xattr_stdin # name value
3628 setextattr -i user "${name}" "${path}"
3631 attr -qs "${name}" "${path}"
3636 function rm_xattr # name path
3643 rmextattr -q user "${name}" "${path}"
3646 attr -qr "${name}" "${path}"
3651 function ls_xattr # path
3657 lsextattr -qq user "${path}"
3665 function kstat # stat flags?
3668 typeset flags=${2-"-n"}
3672 sysctl $flags kstat.zfs.misc.$stat
3675 cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
3683 function get_arcstat # stat
3689 kstat arcstats.$stat
3692 kstat arcstats | awk "/$stat/"' { print $3 }'
3700 function punch_hole # offset length file
3708 truncate -d -o $offset -l $length "$file"
3711 fallocate --punch-hole --offset $offset --length $length "$file"
3719 function zero_range # offset length file
3727 fallocate --zero-range --offset $offset --length $length "$file"
3736 # Wait for the specified arcstat to reach non-zero quiescence.
3737 # If echo is 1 echo the value after reaching quiescence, otherwise
3738 # if echo is 0 print the arcstat we are waiting on.
3740 function arcstat_quiescence # stat echo
3744 typeset do_once=true
3746 if [[ $echo -eq 0 ]]; then
3747 echo "Waiting for arcstat $1 quiescence."
3750 while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
3751 typeset stat1=$(get_arcstat $stat)
3753 typeset stat2=$(get_arcstat $stat)
3757 if [[ $echo -eq 1 ]]; then
3762 function arcstat_quiescence_noecho # stat
3765 arcstat_quiescence $stat 0
3768 function arcstat_quiescence_echo # stat
3771 arcstat_quiescence $stat 1
3775 # Given an array of pids, wait until all processes
3776 # have completed and check their return status.
3778 function wait_for_children #children
3782 for child in "${children[@]}"
3785 wait ${child} || child_exit=$?
3786 if [ $child_exit -ne 0 ]; then
3787 echo "child ${child} failed with ${child_exit}"
3795 # Compare two directory trees recursively in a manner similar to diff(1), but
3796 # using rsync. If there are any discrepancies, a summary of the differences are
3797 # output and a non-zero error is returned.
3799 # If you're comparing a directory after a ZIL replay, you should set
3800 # LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
3801 # directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
3804 function directory_diff # dir_a dir_b
3808 zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
3810 # If one of the directories doesn't exist, return 2. This is to match the
3811 # semantics of diff.
3812 if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
3816 # Run rsync with --dry-run --itemize-changes to get something akin to diff
3817 # output, but rsync is far more thorough in detecting differences (diff
3818 # doesn't compare file metadata, and cannot handle special files).
3820 # Also make sure to filter out non-user.* xattrs when comparing. On
3821 # SELinux-enabled systems the copied tree will probably have different
3823 args=("-nicaAHX" '--filter=-x! user.*' "--delete")
3825 # NOTE: Quite a few rsync builds do not support --crtimes which would be
3826 # necessary to verify that creation times are being maintained properly.
3827 # Unfortunately because of this we cannot use it unconditionally but we can
3828 # check if this rsync build supports it and use it then. This check is
3829 # based on the same check in the rsync test suite (testsuite/crtimes.test).
3831 # We check ctimes even with zil_replay=1 because the ZIL does store
3832 # creation times and we should make sure they match (if the creation times
3833 # do not match there is a "c" entry in one of the columns).
3834 if rsync --version | grep -q "[, ] crtimes"; then
3837 log_note "This rsync package does not support --crtimes (-N)."
3840 # If we are testing a ZIL replay, we need to ignore timestamp changes.
3841 # Unfortunately --no-times doesn't do what we want -- it will still tell
3842 # you if the timestamps don't match but rsync will set the timestamps to
3843 # the current time (leading to an itemised change entry). It's simpler to
3844 # just filter out those lines.
3845 if [ "$zil_replay" -eq 0 ]; then
3848 # Different rsync versions have different numbers of columns. So just
3849 # require that aside from the first two, all other columns must be
3850 # blank (literal ".") or a timestamp field ("[tT]").
3851 filter=("grep" "-v" '^\..[.Tt]\+ ')
3854 diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
3856 if [ -n "$diff" ]; then
3864 # Compare two directory trees recursively, without checking whether the mtimes
3865 # match (creation times will be checked if the available rsync binary supports
3866 # it). This is necessary for ZIL replay checks (because the ZIL does not
3867 # contain mtimes and thus after a ZIL replay, mtimes won't match).
3869 # This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
3871 function replay_directory_diff # dir_a dir_b
3873 LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
3877 # Put coredumps into $1/core.{basename}
3879 # Output must be saved and passed to pop_coredump_pattern on cleanup
3881 function push_coredump_pattern # dir
3886 cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
3887 echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
3888 echo 0 >/proc/sys/kernel/core_uses_pid
3891 sysctl -n kern.corefile
3892 sysctl kern.corefile="$1/core.%N" >/dev/null
3895 # Nothing to output – set only for this shell
3896 coreadm -p "$1/core.%f"
3902 # Put coredumps back into the default location
3904 function pop_coredump_pattern
3906 [ -s "$1" ] || return 0
3910 { read -r pat; read -r pid; } < "$1"
3911 echo "$pat" >/proc/sys/kernel/core_pattern &&
3912 echo "$pid" >/proc/sys/kernel/core_uses_pid
3915 sysctl kern.corefile="$(<"$1")" >/dev/null