4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 # Use is subject to license terms.
25 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26 # Copyright 2016 Nexenta Systems, Inc.
29 . ${STF_TOOLS}/contrib/include/logapi.shlib
31 # Determine whether a dataset is mounted
34 # $2 filesystem type; optional - defaulted to zfs
36 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
41 [[ -z $fstype ]] && fstype=zfs
42 typeset out dir name ret
46 if [[ "$1" == "/"* ]] ; then
47 for out in $(zfs mount | awk '{print $2}'); do
48 [[ $1 == $out ]] && return 0
51 for out in $(zfs mount | awk '{print $1}'); do
52 [[ $1 == $out ]] && return 0
57 out=$(df -F $fstype $1 2>/dev/null)
59 (($ret != 0)) && return $ret
67 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
74 # Return 0 if a dataset is mounted; 1 otherwise
77 # $2 filesystem type; optional - defaulted to zfs
82 (($? == 0)) && return 0
86 # Return 0 if a dataset is unmounted; 1 otherwise
89 # $2 filesystem type; optional - defaulted to zfs
94 (($? == 1)) && return 0
104 echo $1 | sed "s/,/ /g"
107 function default_setup
109 default_setup_noexit "$@"
115 # Given a list of disks, setup storage pools and datasets.
117 function default_setup_noexit
123 if is_global_zone; then
124 if poolexists $TESTPOOL ; then
125 destroy_pool $TESTPOOL
127 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
128 log_must zpool create -f $TESTPOOL $disklist
133 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
134 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
136 log_must zfs create $TESTPOOL/$TESTFS
137 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
139 if [[ -n $container ]]; then
140 rm -rf $TESTDIR1 || \
141 log_unresolved Could not remove $TESTDIR1
142 mkdir -p $TESTDIR1 || \
143 log_unresolved Could not create $TESTDIR1
145 log_must zfs create $TESTPOOL/$TESTCTR
146 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
147 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
148 log_must zfs set mountpoint=$TESTDIR1 \
149 $TESTPOOL/$TESTCTR/$TESTFS1
152 if [[ -n $volume ]]; then
153 if is_global_zone ; then
154 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
156 log_must zfs create $TESTPOOL/$TESTVOL
162 # Given a list of disks, setup a storage pool, file system and
165 function default_container_setup
169 default_setup "$disklist" "true"
173 # Given a list of disks, setup a storage pool,file system
176 function default_volume_setup
180 default_setup "$disklist" "" "true"
184 # Given a list of disks, setup a storage pool,file system,
185 # a container and a volume.
187 function default_container_volume_setup
191 default_setup "$disklist" "true" "true"
195 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
198 # $1 Existing filesystem or volume name. Default, $TESTFS
199 # $2 snapshot name. Default, $TESTSNAP
201 function create_snapshot
203 typeset fs_vol=${1:-$TESTFS}
204 typeset snap=${2:-$TESTSNAP}
206 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
207 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
209 if snapexists $fs_vol@$snap; then
210 log_fail "$fs_vol@$snap already exists."
212 datasetexists $fs_vol || \
213 log_fail "$fs_vol must exist."
215 log_must zfs snapshot $fs_vol@$snap
219 # Create a clone from a snapshot, default clone name is $TESTCLONE.
221 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
222 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
224 function create_clone # snapshot clone
226 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
227 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
230 log_fail "Snapshot name is undefined."
232 log_fail "Clone name is undefined."
234 log_must zfs clone $snap $clone
238 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
241 # $1 Existing filesystem or volume name. Default, $TESTFS
242 # $2 Existing snapshot name. Default, $TESTSNAP
243 # $3 bookmark name. Default, $TESTBKMARK
245 function create_bookmark
247 typeset fs_vol=${1:-$TESTFS}
248 typeset snap=${2:-$TESTSNAP}
249 typeset bkmark=${3:-$TESTBKMARK}
251 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
252 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
253 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
255 if bkmarkexists $fs_vol#$bkmark; then
256 log_fail "$fs_vol#$bkmark already exists."
258 datasetexists $fs_vol || \
259 log_fail "$fs_vol must exist."
260 snapexists $fs_vol@$snap || \
261 log_fail "$fs_vol@$snap must exist."
263 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
266 function default_mirror_setup
268 default_mirror_setup_noexit $1 $2 $3
274 # Given a pair of disks, set up a storage pool and dataset for the mirror
275 # @parameters: $1 the primary side of the mirror
276 # $2 the secondary side of the mirror
277 # @uses: ZPOOL ZFS TESTPOOL TESTFS
278 function default_mirror_setup_noexit
280 readonly func="default_mirror_setup_noexit"
284 [[ -z $primary ]] && \
285 log_fail "$func: No parameters passed"
286 [[ -z $secondary ]] && \
287 log_fail "$func: No secondary partition passed"
288 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
289 log_must zpool create -f $TESTPOOL mirror $@
290 log_must zfs create $TESTPOOL/$TESTFS
291 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
295 # create a number of mirrors.
296 # We create a number($1) of 2 way mirrors using the pairs of disks named
297 # on the command line. These mirrors are *not* mounted
298 # @parameters: $1 the number of mirrors to create
299 # $... the devices to use to create the mirrors on
300 # @uses: ZPOOL ZFS TESTPOOL
301 function setup_mirrors
303 typeset -i nmirrors=$1
306 while ((nmirrors > 0)); do
307 log_must test -n "$1" -a -n "$2"
308 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
309 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
311 ((nmirrors = nmirrors - 1))
316 # create a number of raidz pools.
317 # We create a number($1) of 2 raidz pools using the pairs of disks named
318 # on the command line. These pools are *not* mounted
319 # @parameters: $1 the number of pools to create
320 # $... the devices to use to create the pools on
321 # @uses: ZPOOL ZFS TESTPOOL
322 function setup_raidzs
324 typeset -i nraidzs=$1
327 while ((nraidzs > 0)); do
328 log_must test -n "$1" -a -n "$2"
329 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
330 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
332 ((nraidzs = nraidzs - 1))
337 # Destroy the configured testpool mirrors.
338 # the mirrors are of the form ${TESTPOOL}{number}
339 # @uses: ZPOOL ZFS TESTPOOL
340 function destroy_mirrors
342 default_cleanup_noexit
348 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
349 # $1 the list of disks
351 function default_raidz_setup
353 typeset disklist="$*"
354 disks=(${disklist[*]})
356 if [[ ${#disks[*]} -lt 2 ]]; then
357 log_fail "A raid-z requires a minimum of two disks."
360 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
361 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
362 log_must zfs create $TESTPOOL/$TESTFS
363 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
369 # Common function used to cleanup storage pools and datasets.
371 # Invoked at the start of the test suite to ensure the system
372 # is in a known state, and also at the end of each set of
373 # sub-tests to ensure errors from one set of tests doesn't
374 # impact the execution of the next set.
376 function default_cleanup
378 default_cleanup_noexit
383 function default_cleanup_noexit
388 # Destroying the pool will also destroy any
389 # filesystems it contains.
391 if is_global_zone; then
392 zfs unmount -a > /dev/null 2>&1
393 exclude=`eval echo \"'(${KEEP})'\"`
394 ALL_POOLS=$(zpool list -H -o name \
395 | grep -v "$NO_POOLS" | egrep -v "$exclude")
396 # Here, we loop through the pools we're allowed to
397 # destroy, only destroying them if it's safe to do
399 while [ ! -z ${ALL_POOLS} ]
401 for pool in ${ALL_POOLS}
403 if safe_to_destroy_pool $pool ;
407 ALL_POOLS=$(zpool list -H -o name \
408 | grep -v "$NO_POOLS" \
409 | egrep -v "$exclude")
416 for fs in $(zfs list -H -o name \
417 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
418 datasetexists $fs && \
419 log_must zfs destroy -Rf $fs
422 # Need cleanup here to avoid garbage dir left.
423 for fs in $(zfs list -H -o name); do
424 [[ $fs == /$ZONE_POOL ]] && continue
425 [[ -d $fs ]] && log_must rm -rf $fs/*
429 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
432 for fs in $(zfs list -H -o name); do
433 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
434 log_must zfs set reservation=none $fs
435 log_must zfs set recordsize=128K $fs
436 log_must zfs set mountpoint=/$fs $fs
438 enc=$(get_prop encryption $fs)
439 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
440 [[ "$enc" == "off" ]]; then
441 log_must zfs set checksum=on $fs
443 log_must zfs set compression=off $fs
444 log_must zfs set atime=on $fs
445 log_must zfs set devices=off $fs
446 log_must zfs set exec=on $fs
447 log_must zfs set setuid=on $fs
448 log_must zfs set readonly=off $fs
449 log_must zfs set snapdir=hidden $fs
450 log_must zfs set aclmode=groupmask $fs
451 log_must zfs set aclinherit=secure $fs
456 [[ -d $TESTDIR ]] && \
457 log_must rm -rf $TESTDIR
462 # Common function used to cleanup storage pools, file systems
465 function default_container_cleanup
467 if ! is_global_zone; then
471 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
473 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
475 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
476 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
478 datasetexists $TESTPOOL/$TESTCTR && \
479 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
481 [[ -e $TESTDIR1 ]] && \
482 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
488 # Common function used to cleanup snapshot of file system or volume. Default to
489 # delete the file system's snapshot
493 function destroy_snapshot
495 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
497 if ! snapexists $snap; then
498 log_fail "'$snap' does not existed."
502 # For the sake of the value which come from 'get_prop' is not equal
503 # to the really mountpoint when the snapshot is unmounted. So, firstly
504 # check and make sure this snapshot's been mounted in current system.
507 if ismounted $snap; then
508 mtpt=$(get_prop mountpoint $snap)
510 log_fail "get_prop mountpoint $snap failed."
513 log_must zfs destroy $snap
514 [[ $mtpt != "" && -d $mtpt ]] && \
515 log_must rm -rf $mtpt
519 # Common function used to cleanup clone.
523 function destroy_clone
525 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
527 if ! datasetexists $clone; then
528 log_fail "'$clone' does not existed."
531 # With the same reason in destroy_snapshot
533 if ismounted $clone; then
534 mtpt=$(get_prop mountpoint $clone)
536 log_fail "get_prop mountpoint $clone failed."
539 log_must zfs destroy $clone
540 [[ $mtpt != "" && -d $mtpt ]] && \
541 log_must rm -rf $mtpt
545 # Common function used to cleanup bookmark of file system or volume. Default
546 # to delete the file system's bookmark.
550 function destroy_bookmark
552 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
554 if ! bkmarkexists $bkmark; then
555 log_fail "'$bkmarkp' does not existed."
558 log_must zfs destroy $bkmark
561 # Return 0 if a snapshot exists; $? otherwise
567 zfs list -H -t snapshot "$1" > /dev/null 2>&1
572 # Return 0 if a bookmark exists; $? otherwise
576 function bkmarkexists
578 zfs list -H -t bookmark "$1" > /dev/null 2>&1
583 # Set a property to a certain value on a dataset.
584 # Sets a property of the dataset to the value as passed in.
586 # $1 dataset who's property is being set
588 # $3 value to set property to
590 # 0 if the property could be set.
591 # non-zero otherwise.
594 function dataset_setprop
596 typeset fn=dataset_setprop
599 log_note "$fn: Insufficient parameters (need 3, had $#)"
603 output=$(zfs set $2=$3 $1 2>&1)
606 log_note "Setting property on $1 failed."
607 log_note "property $2=$3"
608 log_note "Return Code: $rv"
609 log_note "Output: $output"
616 # Assign suite defined dataset properties.
617 # This function is used to apply the suite's defined default set of
618 # properties to a dataset.
619 # @parameters: $1 dataset to use
620 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
622 # 0 if the dataset has been altered.
623 # 1 if no pool name was passed in.
624 # 2 if the dataset could not be found.
625 # 3 if the dataset could not have it's properties set.
627 function dataset_set_defaultproperties
631 [[ -z $dataset ]] && return 1
635 for confset in $(zfs list); do
636 if [[ $dataset = $confset ]]; then
641 [[ $found -eq 0 ]] && return 2
642 if [[ -n $COMPRESSION_PROP ]]; then
643 dataset_setprop $dataset compression $COMPRESSION_PROP || \
645 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
647 if [[ -n $CHECKSUM_PROP ]]; then
648 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
650 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
656 # Check a numeric assertion
657 # @parameter: $@ the assertion to check
658 # @output: big loud notice if assertion failed
663 (($@)) || log_fail "$@"
667 # Function to format partition size of a disk
668 # Given a disk cxtxdx reduces all partitions
671 function zero_partitions #<whole_disk_name>
676 for i in 0 1 3 4 5 6 7
678 set_partition $i "" 0mb $diskname
683 # Given a slice, size and disk, this function
684 # formats the slice to the specified size.
685 # Size should be specified with units as per
686 # the `format` command requirements eg. 100mb 3gb
688 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
690 typeset -i slicenum=$1
694 [[ -z $slicenum || -z $size || -z $disk ]] && \
695 log_fail "The slice, size or disk name is unspecified."
696 typeset format_file=/var/tmp/format_in.$$
698 echo "partition" >$format_file
699 echo "$slicenum" >> $format_file
700 echo "" >> $format_file
701 echo "" >> $format_file
702 echo "$start" >> $format_file
703 echo "$size" >> $format_file
704 echo "label" >> $format_file
705 echo "" >> $format_file
706 echo "q" >> $format_file
707 echo "q" >> $format_file
709 format -e -s -d $disk -f $format_file
712 [[ $ret_val -ne 0 ]] && \
713 log_fail "Unable to format $disk slice $slicenum to $size"
718 # Get the end cyl of the given slice
720 function get_endslice #<disk> <slice>
724 if [[ -z $disk || -z $slice ]] ; then
725 log_fail "The disk name or slice number is unspecified."
728 disk=${disk#/dev/dsk/}
729 disk=${disk#/dev/rdsk/}
733 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
734 grep "sectors\/cylinder" | \
737 if ((ratio == 0)); then
741 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
742 nawk -v token="$slice" '{if ($1==token) print $6}')
744 ((endcyl = (endcyl + 1) / ratio))
750 # Given a size,disk and total slice number, this function formats the
751 # disk slices from 0 to the total slice number with the same specified
754 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
757 typeset slice_size=$1
759 typeset total_slices=$3
762 zero_partitions $disk_name
763 while ((i < $total_slices)); do
768 set_partition $i "$cyl" $slice_size $disk_name
769 cyl=$(get_endslice $disk_name $i)
775 # This function continues to write to a filenum number of files into dirnum
776 # number of directories until either file_write returns an error or the
777 # maximum number of files per directory have been written.
780 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
782 # Return value: 0 on success
786 # destdir: is the directory where everything is to be created under
787 # dirnum: the maximum number of subdirectories to use, -1 no limit
788 # filenum: the maximum number of files per subdirectory
789 # bytes: number of bytes to write
790 # num_writes: numer of types to write out bytes
791 # data: the data that will be writen
794 # file_fs /testdir 20 25 1024 256 0
796 # Note: bytes * num_writes equals the size of the testfile
798 function fill_fs # destdir dirnum filenum bytes num_writes data
800 typeset destdir=${1:-$TESTDIR}
801 typeset -i dirnum=${2:-50}
802 typeset -i filenum=${3:-50}
803 typeset -i bytes=${4:-8192}
804 typeset -i num_writes=${5:-10240}
805 typeset -i data=${6:-0}
812 log_must mkdir -p $destdir/$idirnum
813 while (($odirnum > 0)); do
814 if ((dirnum >= 0 && idirnum >= dirnum)); then
818 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
819 -b $bytes -c $num_writes -d $data
821 if (($retval != 0)); then
825 if (($fn >= $filenum)); then
827 ((idirnum = idirnum + 1))
828 log_must mkdir -p $destdir/$idirnum
837 # Simple function to get the specified property. If unable to
838 # get the property then exits.
840 # Note property is in 'parsable' format (-p)
842 function get_prop # property dataset
848 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
849 if [[ $? -ne 0 ]]; then
850 log_note "Unable to get $prop property for dataset " \
860 # Simple function to get the specified property of pool. If unable to
861 # get the property then exits.
863 function get_pool_prop # property pool
869 if poolexists $pool ; then
870 prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
872 if [[ $? -ne 0 ]]; then
873 log_note "Unable to get $prop property for pool " \
878 log_note "Pool $pool not exists."
886 # Return 0 if a pool exists; $? otherwise
894 if [[ -z $pool ]]; then
895 log_note "No pool name given."
899 zpool get name "$pool" > /dev/null 2>&1
903 # Return 0 if all the specified datasets exist; $? otherwise
906 function datasetexists
909 log_note "No dataset name given."
914 zfs get name $1 > /dev/null 2>&1 || \
922 # return 0 if none of the specified datasets exists, otherwise return 1.
925 function datasetnonexists
928 log_note "No dataset name given."
933 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
942 # Given a mountpoint, or a dataset name, determine if it is shared.
944 # Returns 0 if shared, 1 otherwise.
951 if [[ $fs != "/"* ]] ; then
952 if datasetnonexists "$fs" ; then
955 mtpt=$(get_prop mountpoint "$fs")
957 none|legacy|-) return 1
965 for mtpt in `share | awk '{print $2}'` ; do
966 if [[ $mtpt == $fs ]] ; then
971 typeset stat=$(svcs -H -o STA nfs/server:default)
972 if [[ $stat != "ON" ]]; then
973 log_note "Current nfs/server status: $stat"
980 # Given a mountpoint, determine if it is not shared.
982 # Returns 0 if not shared, 1 otherwise.
997 # Helper function to unshare a mountpoint.
999 function unshare_fs #fs
1004 if (($? == 0)); then
1005 log_must zfs unshare $fs
1012 # Check NFS server status and trigger it online.
1014 function setup_nfs_server
1016 # Cannot share directory in non-global zone.
1018 if ! is_global_zone; then
1019 log_note "Cannot trigger NFS server by sharing in LZ."
1023 typeset nfs_fmri="svc:/network/nfs/server:default"
1024 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1026 # Only really sharing operation can enable NFS server
1027 # to online permanently.
1029 typeset dummy=/tmp/dummy
1031 if [[ -d $dummy ]]; then
1032 log_must rm -rf $dummy
1035 log_must mkdir $dummy
1036 log_must share $dummy
1039 # Waiting for fmri's status to be the final status.
1040 # Otherwise, in transition, an asterisk (*) is appended for
1041 # instances, unshare will reverse status to 'DIS' again.
1043 # Waiting for 1's at least.
1047 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1054 log_must unshare $dummy
1055 log_must rm -rf $dummy
1058 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1062 # To verify whether calling process is in global zone
1064 # Return 0 if in global zone, 1 in non-global zone
1066 function is_global_zone
1068 typeset cur_zone=$(zonename 2>/dev/null)
1069 if [[ $cur_zone != "global" ]]; then
1076 # Verify whether test is permitted to run from
1077 # global zone, local zone, or both
1079 # $1 zone limit, could be "global", "local", or "both"(no limit)
1081 # Return 0 if permitted, otherwise exit with log_unsupported
1083 function verify_runnable # zone limit
1087 [[ -z $limit ]] && return 0
1089 if is_global_zone ; then
1093 local) log_unsupported "Test is unable to run from "\
1096 *) log_note "Warning: unknown limit $limit - " \
1104 global) log_unsupported "Test is unable to run from "\
1107 *) log_note "Warning: unknown limit $limit - " \
1118 # Return 0 if create successfully or the pool exists; $? otherwise
1119 # Note: In local zones, this function should return 0 silently.
1122 # $2-n - [keyword] devs_list
1124 function create_pool #pool devs_list
1126 typeset pool=${1%%/*}
1130 if [[ -z $pool ]]; then
1131 log_note "Missing pool name."
1135 if poolexists $pool ; then
1139 if is_global_zone ; then
1140 [[ -d /$pool ]] && rm -rf /$pool
1141 log_must zpool create -f $pool $@
1147 # Return 0 if destroy successfully or the pool exists; $? otherwise
1148 # Note: In local zones, this function should return 0 silently.
1151 # Destroy pool with the given parameters.
1153 function destroy_pool #pool
1155 typeset pool=${1%%/*}
1158 if [[ -z $pool ]]; then
1159 log_note "No pool name given."
1163 if is_global_zone ; then
1164 if poolexists "$pool" ; then
1165 mtpt=$(get_prop mountpoint "$pool")
1167 # At times, syseventd activity can cause attempts to
1168 # destroy a pool to fail with EBUSY. We retry a few
1169 # times allowing failures before requiring the destroy
1171 typeset -i wait_time=10 ret=1 count=0
1173 while [[ $ret -ne 0 ]]; do
1174 $must zpool destroy -f $pool
1176 [[ $ret -eq 0 ]] && break
1177 log_note "zpool destroy failed with $ret"
1178 [[ count++ -ge 7 ]] && must=log_must
1183 log_must rm -rf $mtpt
1185 log_note "Pool does not exist. ($pool)"
1194 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1195 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1196 # and a zvol device to the zone.
1199 # $2 zone root directory prefix
1202 function zfs_zones_setup #zone_name zone_root zone_ip
1204 typeset zone_name=${1:-$(hostname)-z}
1205 typeset zone_root=${2:-"/zone_root"}
1206 typeset zone_ip=${3:-"10.1.1.10"}
1207 typeset prefix_ctr=$ZONE_CTR
1208 typeset pool_name=$ZONE_POOL
1212 # Create pool and 5 container within it
1214 [[ -d /$pool_name ]] && rm -rf /$pool_name
1215 log_must zpool create -f $pool_name $DISKS
1216 while ((i < cntctr)); do
1217 log_must zfs create $pool_name/$prefix_ctr$i
1222 log_must zfs create -V 1g $pool_name/zone_zvol
1225 # If current system support slog, add slog device for pool
1227 if verify_slog_support ; then
1228 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1229 log_must mkfile $MINVDEVSIZE $sdevs
1230 log_must zpool add $pool_name log mirror $sdevs
1233 # this isn't supported just yet.
1234 # Create a filesystem. In order to add this to
1235 # the zone, it must have it's mountpoint set to 'legacy'
1236 # log_must zfs create $pool_name/zfs_filesystem
1237 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1239 [[ -d $zone_root ]] && \
1240 log_must rm -rf $zone_root/$zone_name
1241 [[ ! -d $zone_root ]] && \
1242 log_must mkdir -p -m 0700 $zone_root/$zone_name
1244 # Create zone configure file and configure the zone
1246 typeset zone_conf=/tmp/zone_conf.$$
1247 echo "create" > $zone_conf
1248 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1249 echo "set autoboot=true" >> $zone_conf
1251 while ((i < cntctr)); do
1252 echo "add dataset" >> $zone_conf
1253 echo "set name=$pool_name/$prefix_ctr$i" >> \
1255 echo "end" >> $zone_conf
1259 # add our zvol to the zone
1260 echo "add device" >> $zone_conf
1261 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1262 echo "end" >> $zone_conf
1264 # add a corresponding zvol rdsk to the zone
1265 echo "add device" >> $zone_conf
1266 echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1267 echo "end" >> $zone_conf
1269 # once it's supported, we'll add our filesystem to the zone
1270 # echo "add fs" >> $zone_conf
1271 # echo "set type=zfs" >> $zone_conf
1272 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1273 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1274 # echo "end" >> $zone_conf
1276 echo "verify" >> $zone_conf
1277 echo "commit" >> $zone_conf
1278 log_must zonecfg -z $zone_name -f $zone_conf
1279 log_must rm -f $zone_conf
1282 zoneadm -z $zone_name install
1283 if (($? == 0)); then
1284 log_note "SUCCESS: zoneadm -z $zone_name install"
1286 log_fail "FAIL: zoneadm -z $zone_name install"
1289 # Install sysidcfg file
1291 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1292 echo "system_locale=C" > $sysidcfg
1293 echo "terminal=dtterm" >> $sysidcfg
1294 echo "network_interface=primary {" >> $sysidcfg
1295 echo "hostname=$zone_name" >> $sysidcfg
1296 echo "}" >> $sysidcfg
1297 echo "name_service=NONE" >> $sysidcfg
1298 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1299 echo "security_policy=NONE" >> $sysidcfg
1300 echo "timezone=US/Eastern" >> $sysidcfg
1303 log_must zoneadm -z $zone_name boot
1307 # Reexport TESTPOOL & TESTPOOL(1-4)
1309 function reexport_pool
1314 while ((i < cntctr)); do
1316 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1317 if ! ismounted $TESTPOOL; then
1318 log_must zfs mount $TESTPOOL
1321 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1322 if eval ! ismounted \$TESTPOOL$i; then
1323 log_must eval zfs mount \$TESTPOOL$i
1331 # Verify a given disk is online or offline
1333 # Return 0 is pool/disk matches expected state, 1 otherwise
1335 function check_state # pool disk state{online,offline}
1338 typeset disk=${2#/dev/dsk/}
1341 zpool status -v $pool | grep "$disk" \
1342 | grep -i "$state" > /dev/null 2>&1
1348 # Get the mountpoint of snapshot
1349 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1352 function snapshot_mountpoint
1354 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1356 if [[ $dataset != *@* ]]; then
1357 log_fail "Error name of snapshot '$dataset'."
1360 typeset fs=${dataset%@*}
1361 typeset snap=${dataset#*@}
1363 if [[ -z $fs || -z $snap ]]; then
1364 log_fail "Error name of snapshot '$dataset'."
1367 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1371 # Given a pool and file system, this function will verify the file system
1372 # using the zdb internal tool. Note that the pool is exported and imported
1373 # to ensure it has consistent state.
1375 function verify_filesys # pool filesystem dir
1378 typeset filesys="$2"
1379 typeset zdbout="/tmp/zdbout.$$"
1384 typeset search_path=""
1386 log_note "Calling zdb to verify filesystem '$filesys'"
1387 zfs unmount -a > /dev/null 2>&1
1388 log_must zpool export $pool
1390 if [[ -n $dirs ]] ; then
1391 for dir in $dirs ; do
1392 search_path="$search_path -d $dir"
1396 log_must zpool import $search_path $pool
1398 zdb -cudi $filesys > $zdbout 2>&1
1399 if [[ $? != 0 ]]; then
1400 log_note "Output: zdb -cudi $filesys"
1402 log_fail "zdb detected errors with: '$filesys'"
1405 log_must zfs mount -a
1406 log_must rm -rf $zdbout
1410 # Given a pool, and this function list all disks in the pool
1412 function get_disklist # pool
1416 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1417 grep -v "\-\-\-\-\-" | \
1418 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1424 # This function kills a given list of processes after a time period. We use
1425 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1426 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1427 # would be listed as FAIL, which we don't want : we're happy with stress tests
1428 # running for a certain amount of time, then finishing.
1430 # @param $1 the time in seconds after which we should terminate these processes
1431 # @param $2..$n the processes we wish to terminate.
1433 function stress_timeout
1435 typeset -i TIMEOUT=$1
1439 log_note "Waiting for child processes($cpids). " \
1440 "It could last dozens of minutes, please be patient ..."
1441 log_must sleep $TIMEOUT
1443 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1445 for pid in $cpids; do
1446 ps -p $pid > /dev/null 2>&1
1447 if (($? == 0)); then
1448 log_must kill -USR1 $pid
1454 # Verify a given hotspare disk is inuse or avail
1456 # Return 0 is pool/disk matches expected state, 1 otherwise
1458 function check_hotspare_state # pool disk state{inuse,avail}
1461 typeset disk=${2#/dev/dsk/}
1464 cur_state=$(get_device_state $pool $disk "spares")
1466 if [[ $state != ${cur_state} ]]; then
1473 # Verify a given slog disk is inuse or avail
1475 # Return 0 is pool/disk matches expected state, 1 otherwise
1477 function check_slog_state # pool disk state{online,offline,unavail}
1480 typeset disk=${2#/dev/dsk/}
1483 cur_state=$(get_device_state $pool $disk "logs")
1485 if [[ $state != ${cur_state} ]]; then
1492 # Verify a given vdev disk is inuse or avail
1494 # Return 0 is pool/disk matches expected state, 1 otherwise
1496 function check_vdev_state # pool disk state{online,offline,unavail}
1499 typeset disk=${2#/dev/dsk/}
1502 cur_state=$(get_device_state $pool $disk)
1504 if [[ $state != ${cur_state} ]]; then
1511 # Check the output of 'zpool status -v <pool>',
1512 # and to see if the content of <token> contain the <keyword> specified.
1514 # Return 0 is contain, 1 otherwise
1516 function check_pool_status # pool token keyword
1522 zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1523 ($1==token) {print $0}' \
1524 | grep -i "$keyword" > /dev/null 2>&1
1530 # These 5 following functions are instance of check_pool_status()
1531 # is_pool_resilvering - to check if the pool is resilver in progress
1532 # is_pool_resilvered - to check if the pool is resilver completed
1533 # is_pool_scrubbing - to check if the pool is scrub in progress
1534 # is_pool_scrubbed - to check if the pool is scrub completed
1535 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1537 function is_pool_resilvering #pool
1539 check_pool_status "$1" "scan" "resilver in progress since "
1543 function is_pool_resilvered #pool
1545 check_pool_status "$1" "scan" "resilvered "
1549 function is_pool_scrubbing #pool
1551 check_pool_status "$1" "scan" "scrub in progress since "
1555 function is_pool_scrubbed #pool
1557 check_pool_status "$1" "scan" "scrub repaired"
1561 function is_pool_scrub_stopped #pool
1563 check_pool_status "$1" "scan" "scrub canceled"
1568 # Use create_pool()/destroy_pool() to clean up the infomation in
1569 # in the given disk to avoid slice overlapping.
1571 function cleanup_devices #vdevs
1573 typeset pool="foopool$$"
1575 if poolexists $pool ; then
1579 create_pool $pool $@
1586 # A function to find and locate free disks on a system or from given
1587 # disks as the parameter. It works by locating disks that are in use
1588 # as swap devices and dump devices, and also disks listed in /etc/vfstab
1590 # $@ given disks to find which are free, default is all disks in
1593 # @return a string containing the list of available disks
1597 sfi=/tmp/swaplist.$$
1598 dmpi=/tmp/dumpdev.$$
1599 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1602 dumpadm > $dmpi 2>/dev/null
1604 # write an awk script that can process the output of format
1605 # to produce a list of disks we know about. Note that we have
1606 # to escape "$2" so that the shell doesn't interpret it while
1607 # we're creating the awk script.
1608 # -------------------
1609 cat > /tmp/find_disks.awk <<EOF
1618 if (searchdisks && \$2 !~ "^$"){
1624 /^AVAILABLE DISK SELECTIONS:/{
1628 #---------------------
1630 chmod 755 /tmp/find_disks.awk
1631 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1632 rm /tmp/find_disks.awk
1635 for disk in $disks; do
1637 grep "${disk}[sp]" /etc/mnttab >/dev/null
1638 (($? == 0)) && continue
1640 grep "${disk}[sp]" $sfi >/dev/null
1641 (($? == 0)) && continue
1642 # check for dump device
1643 grep "${disk}[sp]" $dmpi >/dev/null
1644 (($? == 0)) && continue
1645 # check to see if this disk hasn't been explicitly excluded
1646 # by a user-set environment variable
1647 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1648 (($? == 0)) && continue
1649 unused_candidates="$unused_candidates $disk"
1654 # now just check to see if those disks do actually exist
1655 # by looking for a device pointing to the first slice in
1656 # each case. limit the number to max_finddisksnum
1658 for disk in $unused_candidates; do
1659 if [ -b /dev/dsk/${disk}s0 ]; then
1660 if [ $count -lt $max_finddisksnum ]; then
1661 unused="$unused $disk"
1662 # do not impose limit if $@ is provided
1663 [[ -z $@ ]] && ((count = count + 1))
1668 # finally, return our disk list
1673 # Add specified user to specified group
1677 # $3 base of the homedir (optional)
1679 function add_user #<group_name> <user_name> <basedir>
1683 typeset basedir=${3:-"/var/tmp"}
1685 if ((${#gname} == 0 || ${#uname} == 0)); then
1686 log_fail "group name or user name are not defined."
1689 log_must useradd -g $gname -d $basedir/$uname -m $uname
1695 # Delete the specified user.
1698 # $2 base of the homedir (optional)
1700 function del_user #<logname> <basedir>
1703 typeset basedir=${2:-"/var/tmp"}
1705 if ((${#user} == 0)); then
1706 log_fail "login name is necessary."
1709 if id $user > /dev/null 2>&1; then
1710 log_must userdel $user
1713 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
1719 # Select valid gid and create specified group.
1723 function add_group #<group_name>
1727 if ((${#group} == 0)); then
1728 log_fail "group name is necessary."
1731 # Assign 100 as the base gid
1734 groupadd -g $gid $group > /dev/null 2>&1
1738 # The gid is not unique
1746 # Delete the specified group.
1750 function del_group #<group_name>
1753 if ((${#grp} == 0)); then
1754 log_fail "group name is necessary."
1757 groupmod -n $grp $grp > /dev/null 2>&1
1760 # Group does not exist.
1762 # Name already exists as a group name
1763 9) log_must groupdel $grp ;;
1771 # This function will return true if it's safe to destroy the pool passed
1772 # as argument 1. It checks for pools based on zvols and files, and also
1773 # files contained in a pool that may have a different mountpoint.
1775 function safe_to_destroy_pool { # $1 the pool name
1778 typeset DONT_DESTROY=""
1780 # We check that by deleting the $1 pool, we're not
1781 # going to pull the rug out from other pools. Do this
1782 # by looking at all other pools, ensuring that they
1783 # aren't built from files or zvols contained in this pool.
1785 for pool in $(zpool list -H -o name)
1789 # this is a list of the top-level directories in each of the
1790 # files that make up the path to the files the pool is based on
1791 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1794 # this is a list of the zvols that make up the pool
1795 ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1798 # also want to determine if it's a file-based pool using an
1799 # alternate mountpoint...
1800 POOL_FILE_DIRS=$(zpool status -v $pool | \
1801 grep / | awk '{print $1}' | \
1802 awk -F/ '{print $2}' | grep -v "dev")
1804 for pooldir in $POOL_FILE_DIRS
1806 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1807 grep "${pooldir}$" | awk '{print $1}')
1809 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1813 if [ ! -z "$ZVOLPOOL" ]
1816 log_note "Pool $pool is built from $ZVOLPOOL on $1"
1819 if [ ! -z "$FILEPOOL" ]
1822 log_note "Pool $pool is built from $FILEPOOL on $1"
1825 if [ ! -z "$ALTMOUNTPOOL" ]
1828 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1832 if [ -z "${DONT_DESTROY}" ]
1836 log_note "Warning: it is not safe to destroy $1!"
1842 # Get the available ZFS compression options
1843 # $1 option type zfs_set|zfs_compress
1845 function get_compress_opts
1847 typeset COMPRESS_OPTS
1848 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
1849 gzip-6 gzip-7 gzip-8 gzip-9"
1851 if [[ $1 == "zfs_compress" ]] ; then
1852 COMPRESS_OPTS="on lzjb"
1853 elif [[ $1 == "zfs_set" ]] ; then
1854 COMPRESS_OPTS="on off lzjb"
1856 typeset valid_opts="$COMPRESS_OPTS"
1857 zfs get 2>&1 | grep gzip >/dev/null 2>&1
1858 if [[ $? -eq 0 ]]; then
1859 valid_opts="$valid_opts $GZIP_OPTS"
1865 # Verify zfs operation with -p option work as expected
1866 # $1 operation, value could be create, clone or rename
1867 # $2 dataset type, value could be fs or vol
1869 # $4 new dataset name
1871 function verify_opt_p_ops
1876 typeset newdataset=$4
1878 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
1879 log_fail "$datatype is not supported."
1882 # check parameters accordingly
1887 if [[ $datatype == "vol" ]]; then
1888 ops="create -V $VOLSIZE"
1892 if [[ -z $newdataset ]]; then
1893 log_fail "newdataset should not be empty" \
1896 log_must datasetexists $dataset
1897 log_must snapexists $dataset
1900 if [[ -z $newdataset ]]; then
1901 log_fail "newdataset should not be empty" \
1904 log_must datasetexists $dataset
1905 log_mustnot snapexists $dataset
1908 log_fail "$ops is not supported."
1912 # make sure the upper level filesystem does not exist
1913 if datasetexists ${newdataset%/*} ; then
1914 log_must zfs destroy -rRf ${newdataset%/*}
1917 # without -p option, operation will fail
1918 log_mustnot zfs $ops $dataset $newdataset
1919 log_mustnot datasetexists $newdataset ${newdataset%/*}
1921 # with -p option, operation should succeed
1922 log_must zfs $ops -p $dataset $newdataset
1923 if ! datasetexists $newdataset ; then
1924 log_fail "-p option does not work for $ops"
1927 # when $ops is create or clone, redo the operation still return zero
1928 if [[ $ops != "rename" ]]; then
1929 log_must zfs $ops -p $dataset $newdataset
1936 # Get configuration of pool
1946 if ! poolexists "$pool" ; then
1949 alt_root=$(zpool list -H $pool | awk '{print $NF}')
1950 if [[ $alt_root == "-" ]]; then
1951 value=$(zdb -C $pool | grep "$config:" | awk -F: \
1954 value=$(zdb -e $pool | grep "$config:" | awk -F: \
1957 if [[ -n $value ]] ; then
1967 # Privated function. Random select one of items from arguments.
1972 function _random_get
1979 ((ind = RANDOM % cnt + 1))
1981 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
1986 # Random select one of item from arguments which include NONE string
1988 function random_get_with_non
1993 _random_get "$cnt" "$@"
1997 # Random select one of item from arguments which doesn't include NONE string
2001 _random_get "$#" "$@"
2005 # Detect if the current system support slog
2007 function verify_slog_support
2009 typeset dir=/tmp/disk.$$
2015 mkfile $MINVDEVSIZE $vdev $sdev
2018 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2027 # The function will generate a dataset name with specific length
2028 # $1, the length of the name
2029 # $2, the base string to construct the name
2031 function gen_dataset_name
2034 typeset basestr="$2"
2035 typeset -i baselen=${#basestr}
2039 if ((len % baselen == 0)); then
2040 ((iter = len / baselen))
2042 ((iter = len / baselen + 1))
2044 while ((iter > 0)); do
2045 l_name="${l_name}$basestr"
2054 # Get cksum tuple of dataset
2057 # sample zdb output:
2058 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2059 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2060 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2061 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2062 function datasetcksum
2066 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2067 | awk -F= '{print $7}')
2078 cksum=$(cksum $1 | awk '{print $1}')
2083 # Get the given disk/slice state from the specific field of the pool
2085 function get_device_state #pool disk field("", "spares","logs")
2088 typeset disk=${2#/dev/dsk/}
2089 typeset field=${3:-$pool}
2091 state=$(zpool status -v "$pool" 2>/dev/null | \
2092 nawk -v device=$disk -v pool=$pool -v field=$field \
2093 'BEGIN {startconfig=0; startfield=0; }
2094 /config:/ {startconfig=1}
2095 (startconfig==1) && ($1==field) {startfield=1; next;}
2096 (startfield==1) && ($1==device) {print $2; exit;}
2098 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2104 # print the given directory filesystem type
2112 if [[ -z $dir ]]; then
2113 log_fail "Usage: get_fstype <directory>"
2120 df -n $dir | awk '{print $3}'
2124 # Given a disk, label it to VTOC regardless what label was on the disk
2130 if [[ -z $disk ]]; then
2131 log_fail "The disk name is unspecified."
2133 typeset label_file=/var/tmp/labelvtoc.$$
2134 typeset arch=$(uname -p)
2136 if [[ $arch == "i386" ]]; then
2137 echo "label" > $label_file
2138 echo "0" >> $label_file
2139 echo "" >> $label_file
2140 echo "q" >> $label_file
2141 echo "q" >> $label_file
2143 fdisk -B $disk >/dev/null 2>&1
2144 # wait a while for fdisk finishes
2146 elif [[ $arch == "sparc" ]]; then
2147 echo "label" > $label_file
2148 echo "0" >> $label_file
2149 echo "" >> $label_file
2150 echo "" >> $label_file
2151 echo "" >> $label_file
2152 echo "q" >> $label_file
2154 log_fail "unknown arch type"
2157 format -e -s -d $disk -f $label_file
2158 typeset -i ret_val=$?
2161 # wait the format to finish
2164 if ((ret_val != 0)); then
2165 log_fail "unable to label $disk as VTOC."
2172 # check if the system was installed as zfsroot or not
2173 # return: 0 ture, otherwise false
2177 df -n / | grep zfs > /dev/null 2>&1
2182 # get the root filesystem name if it's zfsroot system.
2184 # return: root filesystem name
2188 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2190 if [[ -z "$rootfs" ]]; then
2191 log_fail "Can not get rootfs"
2193 zfs list $rootfs > /dev/null 2>&1
2194 if (($? == 0)); then
2197 log_fail "This is not a zfsroot system."
2202 # get the rootfs's pool name
2206 function get_rootpool
2210 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2212 if [[ -z "$rootfs" ]]; then
2213 log_fail "Can not get rootpool"
2215 zfs list $rootfs > /dev/null 2>&1
2216 if (($? == 0)); then
2217 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2220 log_fail "This is not a zfsroot system."
2225 # Check if the given device is physical device
2227 function is_physical_device #device
2229 typeset device=${1#/dev/dsk/}
2230 device=${device#/dev/rdsk/}
2232 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2237 # Get the directory path of given device
2239 function get_device_dir #device
2243 if ! $(is_physical_device $device) ; then
2244 if [[ $device != "/" ]]; then
2254 # Get the package name
2256 function get_package_name
2258 typeset dirpath=${1:-$STC_NAME}
2260 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2264 # Get the word numbers from a string separated by white space
2266 function get_word_count
2272 # To verify if the require numbers of disks is given
2274 function verify_disk_count
2276 typeset -i min=${2:-1}
2278 typeset -i count=$(get_word_count "$1")
2280 if ((count < min)); then
2281 log_untested "A minimum of $min disks is required to run." \
2282 " You specified $count disk(s)"
2286 function ds_is_volume
2288 typeset type=$(get_prop type $1)
2289 [[ $type = "volume" ]] && return 0
2293 function ds_is_filesystem
2295 typeset type=$(get_prop type $1)
2296 [[ $type = "filesystem" ]] && return 0
2300 function ds_is_snapshot
2302 typeset type=$(get_prop type $1)
2303 [[ $type = "snapshot" ]] && return 0
2308 # Check if Trusted Extensions are installed and enabled
2310 function is_te_enabled
2312 svcs -H -o state labeld 2>/dev/null | grep "enabled"
2313 if (($? != 0)); then
2320 # Utility function to determine if a system has multiple cpus.
2323 (($(psrinfo | wc -l) > 1))
2326 function get_cpu_freq
2328 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2331 # Run the given command as the user provided.
2337 eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2342 # Check if the pool contains the specified vdevs
2347 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2348 # vdevs is not in the pool, and 2 if pool name is missing.
2350 function vdevs_in_pool
2355 if [[ -z $pool ]]; then
2356 log_note "Missing pool name."
2362 typeset tmpfile=$(mktemp)
2363 zpool list -Hv "$pool" >$tmpfile
2365 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2366 [[ $? -ne 0 ]] && return 1
2380 max=$(echo $((max > i ? max : i)))
2392 min=$(echo $((min < i ? min : i)))
2399 # Generate a random number between 1 and the argument.
2404 echo $(( ($RANDOM % $max) + 1 ))
2407 # Write data that can be compressed into a directory
2408 function write_compressible
2412 typeset nfiles=${3:-1}
2413 typeset bs=${4:-1024k}
2414 typeset fname=${5:-file}
2416 [[ -d $dir ]] || log_fail "No directory: $dir"
2418 log_must eval "fio \
2423 --buffer_compress_percentage=66 \
2424 --buffer_compress_chunk=4096 \
2430 --filename_format='$fname.\$jobnum' >/dev/null"
2438 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2439 objnum=$(stat -c %i $pathname)