4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright 2015 RackTop Systems.
26 * Copyright (c) 2016, Intel Corporation.
30 * Pool import support functions.
32 * Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
33 * these commands are expected to run in the global zone, we can assume
34 * that the devices are all readable when called.
36 * To import a pool, we rely on reading the configuration information from the
37 * ZFS label of each device. If we successfully read the label, then we
38 * organize the configuration information in the following hierarchy:
40 * pool guid -> toplevel vdev guid -> label txg
42 * Duplicate entries matching this same tuple will be discarded. Once we have
43 * examined every device, we pick the best label txg config for each toplevel
44 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
45 * update any paths that have changed. Finally, we attempt to import the pool
46 * using our derived config, and record the results.
61 #include <sys/dktp/fdisk.h>
62 #include <sys/vdev_impl.h>
63 #include <sys/fs/zfs.h>
65 #include <thread_pool.h>
67 #include <libnvpair.h>
70 #include "zutil_import.h"
76 #include <blkid/blkid.h>
78 #define DEV_BYID_PATH "/dev/disk/by-id/"
81 * Skip devices with well known prefixes:
82 * there can be side effects when opening devices which need to be avoided.
84 * hpet - High Precision Event Timer
85 * watchdog[N] - Watchdog must be closed in a special way.
88 should_skip_dev(const char *dev
)
90 return ((strcmp(dev
, "watchdog") == 0) ||
91 (strncmp(dev
, "watchdog", 8) == 0 && isdigit(dev
[8])) ||
92 (strcmp(dev
, "hpet") == 0));
98 return (ioctl(fd
, BLKFLSBUF
));
102 zpool_open_func(void *arg
)
104 rdsk_node_t
*rn
= arg
;
105 libpc_handle_t
*hdl
= rn
->rn_hdl
;
106 struct stat64 statbuf
;
108 uint64_t vdev_guid
= 0;
113 if (should_skip_dev(zfs_basename(rn
->rn_name
)))
117 * Ignore failed stats. We only want regular files and block devices.
118 * Ignore files that are too small to hold a zpool.
120 if (stat64(rn
->rn_name
, &statbuf
) != 0 ||
121 (!S_ISREG(statbuf
.st_mode
) && !S_ISBLK(statbuf
.st_mode
)) ||
122 (S_ISREG(statbuf
.st_mode
) && statbuf
.st_size
< SPA_MINDEVSIZE
))
126 * Preferentially open using O_DIRECT to bypass the block device
127 * cache which may be stale for multipath devices. An EINVAL errno
128 * indicates O_DIRECT is unsupported so fallback to just O_RDONLY.
130 fd
= open(rn
->rn_name
, O_RDONLY
| O_DIRECT
| O_CLOEXEC
);
131 if ((fd
< 0) && (errno
== EINVAL
))
132 fd
= open(rn
->rn_name
, O_RDONLY
| O_CLOEXEC
);
133 if ((fd
< 0) && (errno
== EACCES
))
134 hdl
->lpc_open_access_error
= B_TRUE
;
138 error
= zpool_read_label(fd
, &config
, &num_labels
);
144 if (num_labels
== 0) {
151 * Check that the vdev is for the expected guid. Additional entries
152 * are speculatively added based on the paths stored in the labels.
153 * Entries with valid paths but incorrect guids must be removed.
155 error
= nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
);
156 if (error
|| (rn
->rn_vdev_guid
&& rn
->rn_vdev_guid
!= vdev_guid
)) {
164 rn
->rn_config
= config
;
165 rn
->rn_num_labels
= num_labels
;
168 * Add additional entries for paths described by this label.
170 if (rn
->rn_labelpaths
) {
171 const char *path
= NULL
;
172 const char *devid
= NULL
;
177 if (label_paths(rn
->rn_hdl
, rn
->rn_config
, &path
, &devid
))
181 * Allow devlinks to stabilize so all paths are available.
183 zpool_disk_wait(rn
->rn_name
);
186 slice
= zutil_alloc(hdl
, sizeof (rdsk_node_t
));
187 slice
->rn_name
= zutil_strdup(hdl
, path
);
188 slice
->rn_vdev_guid
= vdev_guid
;
189 slice
->rn_avl
= rn
->rn_avl
;
191 slice
->rn_order
= IMPORT_ORDER_PREFERRED_1
;
192 slice
->rn_labelpaths
= B_FALSE
;
193 pthread_mutex_lock(rn
->rn_lock
);
194 if (avl_find(rn
->rn_avl
, slice
, &where
)) {
195 pthread_mutex_unlock(rn
->rn_lock
);
196 free(slice
->rn_name
);
199 avl_insert(rn
->rn_avl
, slice
, where
);
200 pthread_mutex_unlock(rn
->rn_lock
);
201 zpool_open_func(slice
);
206 slice
= zutil_alloc(hdl
, sizeof (rdsk_node_t
));
207 error
= asprintf(&slice
->rn_name
, "%s%s",
208 DEV_BYID_PATH
, devid
);
214 slice
->rn_vdev_guid
= vdev_guid
;
215 slice
->rn_avl
= rn
->rn_avl
;
217 slice
->rn_order
= IMPORT_ORDER_PREFERRED_2
;
218 slice
->rn_labelpaths
= B_FALSE
;
219 pthread_mutex_lock(rn
->rn_lock
);
220 if (avl_find(rn
->rn_avl
, slice
, &where
)) {
221 pthread_mutex_unlock(rn
->rn_lock
);
222 free(slice
->rn_name
);
225 avl_insert(rn
->rn_avl
, slice
, where
);
226 pthread_mutex_unlock(rn
->rn_lock
);
227 zpool_open_func(slice
);
233 static const char * const
234 zpool_default_import_path
[] = {
235 "/dev/disk/by-vdev", /* Custom rules, use first if they exist */
236 "/dev/mapper", /* Use multipath devices before components */
237 "/dev/disk/by-partlabel", /* Single unique entry set by user */
238 "/dev/disk/by-partuuid", /* Generated partition uuid */
239 "/dev/disk/by-label", /* Custom persistent labels */
240 "/dev/disk/by-uuid", /* Single unique entry and persistent */
241 "/dev/disk/by-id", /* May be multiple entries and persistent */
242 "/dev/disk/by-path", /* Encodes physical location and persistent */
243 "/dev" /* UNSAFE device names will change */
247 zpool_default_search_paths(size_t *count
)
249 *count
= ARRAY_SIZE(zpool_default_import_path
);
250 return (zpool_default_import_path
);
254 * Given a full path to a device determine if that device appears in the
255 * import search path. If it does return the first match and store the
256 * index in the passed 'order' variable, otherwise return an error.
259 zfs_path_order(const char *name
, int *order
)
261 const char *env
= getenv("ZPOOL_IMPORT_PATH");
264 for (int i
= 0; ; ++i
) {
265 env
+= strspn(env
, ":");
266 size_t dirlen
= strcspn(env
, ":");
268 if (strncmp(name
, env
, dirlen
) == 0) {
278 for (int i
= 0; i
< ARRAY_SIZE(zpool_default_import_path
);
280 if (strncmp(name
, zpool_default_import_path
[i
],
281 strlen(zpool_default_import_path
[i
])) == 0) {
292 * Use libblkid to quickly enumerate all known zfs devices.
295 zpool_find_import_blkid(libpc_handle_t
*hdl
, pthread_mutex_t
*lock
,
296 avl_tree_t
**slice_cache
)
300 blkid_dev_iterate iter
;
307 error
= blkid_get_cache(&cache
, NULL
);
311 error
= blkid_probe_all_new(cache
);
313 blkid_put_cache(cache
);
317 iter
= blkid_dev_iterate_begin(cache
);
319 blkid_put_cache(cache
);
323 /* Only const char *s since 2.32 */
324 error
= blkid_dev_set_search(iter
,
325 (char *)"TYPE", (char *)"zfs_member");
327 blkid_dev_iterate_end(iter
);
328 blkid_put_cache(cache
);
332 *slice_cache
= zutil_alloc(hdl
, sizeof (avl_tree_t
));
333 avl_create(*slice_cache
, slice_cache_compare
, sizeof (rdsk_node_t
),
334 offsetof(rdsk_node_t
, rn_node
));
336 while (blkid_dev_next(iter
, &dev
) == 0) {
337 slice
= zutil_alloc(hdl
, sizeof (rdsk_node_t
));
338 slice
->rn_name
= zutil_strdup(hdl
, blkid_dev_devname(dev
));
339 slice
->rn_vdev_guid
= 0;
340 slice
->rn_lock
= lock
;
341 slice
->rn_avl
= *slice_cache
;
343 slice
->rn_labelpaths
= B_TRUE
;
345 error
= zfs_path_order(slice
->rn_name
, &slice
->rn_order
);
347 slice
->rn_order
+= IMPORT_ORDER_SCAN_OFFSET
;
349 slice
->rn_order
= IMPORT_ORDER_DEFAULT
;
351 pthread_mutex_lock(lock
);
352 if (avl_find(*slice_cache
, slice
, &where
)) {
353 free(slice
->rn_name
);
356 avl_insert(*slice_cache
, slice
, where
);
358 pthread_mutex_unlock(lock
);
361 blkid_dev_iterate_end(iter
);
362 blkid_put_cache(cache
);
368 * Linux persistent device strings for vdev labels
370 * based on libudev for consistency with libudev disk add/remove events
373 typedef struct vdev_dev_strs
{
375 char vds_devphys
[128];
381 * Obtain the persistent device id string (describes what)
383 * used by ZED vdev matching for auto-{online,expand,replace}
386 zfs_device_get_devid(struct udev_device
*dev
, char *bufptr
, size_t buflen
)
388 struct udev_list_entry
*entry
;
390 char devbyid
[MAXPATHLEN
];
392 /* The bus based by-id path is preferred */
393 bus
= udev_device_get_property_value(dev
, "ID_BUS");
399 * For multipath nodes use the persistent uuid based identifier
401 * Example: /dev/disk/by-id/dm-uuid-mpath-35000c5006304de3f
403 dm_uuid
= udev_device_get_property_value(dev
, "DM_UUID");
404 if (dm_uuid
!= NULL
) {
405 (void) snprintf(bufptr
, buflen
, "dm-uuid-%s", dm_uuid
);
410 * For volumes use the persistent /dev/zvol/dataset identifier
412 entry
= udev_device_get_devlinks_list_entry(dev
);
413 while (entry
!= NULL
) {
416 name
= udev_list_entry_get_name(entry
);
417 if (strncmp(name
, ZVOL_ROOT
, strlen(ZVOL_ROOT
)) == 0) {
418 (void) strlcpy(bufptr
, name
, buflen
);
421 entry
= udev_list_entry_get_next(entry
);
425 * NVME 'by-id' symlinks are similar to bus case
427 struct udev_device
*parent
;
429 parent
= udev_device_get_parent_with_subsystem_devtype(dev
,
432 bus
= "nvme"; /* continue with bus symlink search */
438 * locate the bus specific by-id link
440 (void) snprintf(devbyid
, sizeof (devbyid
), "%s%s-", DEV_BYID_PATH
, bus
);
441 entry
= udev_device_get_devlinks_list_entry(dev
);
442 while (entry
!= NULL
) {
445 name
= udev_list_entry_get_name(entry
);
446 if (strncmp(name
, devbyid
, strlen(devbyid
)) == 0) {
447 name
+= strlen(DEV_BYID_PATH
);
448 (void) strlcpy(bufptr
, name
, buflen
);
451 entry
= udev_list_entry_get_next(entry
);
458 * Obtain the persistent physical location string (describes where)
460 * used by ZED vdev matching for auto-{online,expand,replace}
463 zfs_device_get_physical(struct udev_device
*dev
, char *bufptr
, size_t buflen
)
465 const char *physpath
= NULL
;
466 struct udev_list_entry
*entry
;
469 * Normal disks use ID_PATH for their physical path.
471 physpath
= udev_device_get_property_value(dev
, "ID_PATH");
472 if (physpath
!= NULL
&& strlen(physpath
) > 0) {
473 (void) strlcpy(bufptr
, physpath
, buflen
);
478 * Device mapper devices are virtual and don't have a physical
479 * path. For them we use ID_VDEV instead, which is setup via the
480 * /etc/vdev_id.conf file. ID_VDEV provides a persistent path
481 * to a virtual device. If you don't have vdev_id.conf setup,
482 * you cannot use multipath autoreplace with device mapper.
484 physpath
= udev_device_get_property_value(dev
, "ID_VDEV");
485 if (physpath
!= NULL
&& strlen(physpath
) > 0) {
486 (void) strlcpy(bufptr
, physpath
, buflen
);
491 * For ZFS volumes use the persistent /dev/zvol/dataset identifier
493 entry
= udev_device_get_devlinks_list_entry(dev
);
494 while (entry
!= NULL
) {
495 physpath
= udev_list_entry_get_name(entry
);
496 if (strncmp(physpath
, ZVOL_ROOT
, strlen(ZVOL_ROOT
)) == 0) {
497 (void) strlcpy(bufptr
, physpath
, buflen
);
500 entry
= udev_list_entry_get_next(entry
);
504 * For all other devices fallback to using the by-uuid name.
506 entry
= udev_device_get_devlinks_list_entry(dev
);
507 while (entry
!= NULL
) {
508 physpath
= udev_list_entry_get_name(entry
);
509 if (strncmp(physpath
, "/dev/disk/by-uuid", 17) == 0) {
510 (void) strlcpy(bufptr
, physpath
, buflen
);
513 entry
= udev_list_entry_get_next(entry
);
520 * A disk is considered a multipath whole disk when:
521 * DEVNAME key value has "dm-"
522 * DM_NAME key value has "mpath" prefix
524 * ID_PART_TABLE_TYPE key does not exist or is not gpt
527 udev_mpath_whole_disk(struct udev_device
*dev
)
529 const char *devname
, *type
, *uuid
;
531 devname
= udev_device_get_property_value(dev
, "DEVNAME");
532 type
= udev_device_get_property_value(dev
, "ID_PART_TABLE_TYPE");
533 uuid
= udev_device_get_property_value(dev
, "DM_UUID");
535 if ((devname
!= NULL
&& strncmp(devname
, "/dev/dm-", 8) == 0) &&
536 ((type
== NULL
) || (strcmp(type
, "gpt") != 0)) &&
545 udev_device_is_ready(struct udev_device
*dev
)
547 #ifdef HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED
548 return (udev_device_get_is_initialized(dev
));
550 /* wait for DEVLINKS property to be initialized */
551 return (udev_device_get_property_value(dev
, "DEVLINKS") != NULL
);
558 zfs_device_get_devid(struct udev_device
*dev
, char *bufptr
, size_t buflen
)
560 (void) dev
, (void) bufptr
, (void) buflen
;
565 zfs_device_get_physical(struct udev_device
*dev
, char *bufptr
, size_t buflen
)
567 (void) dev
, (void) bufptr
, (void) buflen
;
571 #endif /* HAVE_LIBUDEV */
574 * Wait up to timeout_ms for udev to set up the device node. The device is
575 * considered ready when libudev determines it has been initialized, all of
576 * the device links have been verified to exist, and it has been allowed to
577 * settle. At this point the device can be accessed reliably. Depending on
578 * the complexity of the udev rules this process could take several seconds.
581 zpool_label_disk_wait(const char *path
, int timeout_ms
)
585 struct udev_device
*dev
= NULL
;
586 char nodepath
[MAXPATHLEN
];
587 char *sysname
= NULL
;
591 hrtime_t start
, settle
;
593 if ((udev
= udev_new()) == NULL
)
600 if (sysname
== NULL
) {
601 if (realpath(path
, nodepath
) != NULL
) {
602 sysname
= strrchr(nodepath
, '/') + 1;
604 (void) usleep(sleep_ms
* MILLISEC
);
609 dev
= udev_device_new_from_subsystem_sysname(udev
,
611 if ((dev
!= NULL
) && udev_device_is_ready(dev
)) {
612 struct udev_list_entry
*links
, *link
= NULL
;
615 links
= udev_device_get_devlinks_list_entry(dev
);
617 udev_list_entry_foreach(link
, links
) {
618 struct stat64 statbuf
;
621 name
= udev_list_entry_get_name(link
);
623 if (stat64(name
, &statbuf
) == 0 && errno
== 0)
633 settle
= gethrtime();
634 } else if (NSEC2MSEC(gethrtime() - settle
) >=
636 udev_device_unref(dev
);
642 udev_device_unref(dev
);
643 (void) usleep(sleep_ms
* MILLISEC
);
645 } while (NSEC2MSEC(gethrtime() - start
) < timeout_ms
);
653 hrtime_t start
, settle
;
654 struct stat64 statbuf
;
661 if ((stat64(path
, &statbuf
) == 0) && (errno
== 0)) {
663 settle
= gethrtime();
664 else if (NSEC2MSEC(gethrtime() - settle
) >= settle_ms
)
666 } else if (errno
!= ENOENT
) {
670 usleep(sleep_ms
* MILLISEC
);
671 } while (NSEC2MSEC(gethrtime() - start
) < timeout_ms
);
674 #endif /* HAVE_LIBUDEV */
678 * Simplified version of zpool_label_disk_wait() where we wait for a device
679 * to appear using the default timeouts.
682 zpool_disk_wait(const char *path
)
685 timeout
= zpool_getenv_int("ZPOOL_IMPORT_UDEV_TIMEOUT_MS",
688 return (zpool_label_disk_wait(path
, timeout
));
692 * Encode the persistent devices strings
693 * used for the vdev disk label
696 encode_device_strings(const char *path
, vdev_dev_strs_t
*ds
,
701 struct udev_device
*dev
= NULL
;
702 char nodepath
[MAXPATHLEN
];
707 if ((udev
= udev_new()) == NULL
)
710 /* resolve path to a runtime device node instance */
711 if (realpath(path
, nodepath
) == NULL
)
714 sysname
= strrchr(nodepath
, '/') + 1;
717 * Wait up to 3 seconds for udev to set up the device node context
721 dev
= udev_device_new_from_subsystem_sysname(udev
, "block",
725 if (udev_device_is_ready(dev
))
726 break; /* udev ready */
728 udev_device_unref(dev
);
731 if (NSEC2MSEC(gethrtime() - start
) < 10)
732 (void) sched_yield(); /* yield/busy wait up to 10ms */
734 (void) usleep(10 * MILLISEC
);
736 } while (NSEC2MSEC(gethrtime() - start
) < (3 * MILLISEC
));
742 * Only whole disks require extra device strings
744 if (!wholedisk
&& !udev_mpath_whole_disk(dev
))
747 ret
= zfs_device_get_devid(dev
, ds
->vds_devid
, sizeof (ds
->vds_devid
));
751 /* physical location string (optional) */
752 if (zfs_device_get_physical(dev
, ds
->vds_devphys
,
753 sizeof (ds
->vds_devphys
)) != 0) {
754 ds
->vds_devphys
[0] = '\0'; /* empty string --> not available */
758 udev_device_unref(dev
);
772 * Rescan the enclosure sysfs path for turning on enclosure LEDs and store it
773 * in the nvlist * (if applicable). Like:
774 * vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4'
776 * If an old path was in the nvlist, and the rescan can not find a new path,
777 * then keep the old path, since the disk may have been removed.
779 * path: The vdev path (value from ZPOOL_CONFIG_PATH)
780 * key: The nvlist_t name (like ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH)
783 update_vdev_config_dev_sysfs_path(nvlist_t
*nv
, const char *path
,
787 const char *oldpath
= NULL
;
789 (void) nvlist_lookup_string(nv
, key
, &oldpath
);
791 /* Add enclosure sysfs path (if disk is in an enclosure). */
792 upath
= zfs_get_underlying_path(path
);
793 spath
= zfs_get_enclosure_sysfs_path(upath
);
796 (void) nvlist_add_string(nv
, key
, spath
);
799 * We couldn't dynamically scan the disk's enclosure sysfs path.
800 * This could be because the disk went away. If there's an old
801 * enclosure sysfs path in the nvlist, then keep using it.
804 (void) nvlist_remove_all(nv
, key
);
813 * This will get called for each leaf vdev.
816 sysfs_path_pool_vdev_iter_f(void *hdl_data
, nvlist_t
*nv
, void *data
)
818 (void) hdl_data
, (void) data
;
820 const char *path
= NULL
;
821 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
824 /* Rescan our enclosure sysfs path for this vdev */
825 update_vdev_config_dev_sysfs_path(nv
, path
,
826 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
);
831 * Given an nvlist for our pool (with vdev tree), iterate over all the
832 * leaf vdevs and update their ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH.
835 update_vdevs_config_dev_sysfs_path(nvlist_t
*config
)
837 nvlist_t
*nvroot
= NULL
;
838 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
840 for_each_vdev_in_nvlist(nvroot
, sysfs_path_pool_vdev_iter_f
, NULL
);
844 * Update a leaf vdev's persistent device strings
846 * - only applies for a dedicated leaf vdev (aka whole disk)
847 * - updated during pool create|add|attach|import
848 * - used for matching device matching during auto-{online,expand,replace}
849 * - stored in a leaf disk config label (i.e. alongside 'path' NVP)
850 * - these strings are currently not used in kernel (i.e. for vdev_disk_open)
852 * single device node example:
853 * devid: 'scsi-MG03SCA300_350000494a8cb3d67-part1'
854 * phys_path: 'pci-0000:04:00.0-sas-0x50000394a8cb3d67-lun-0'
856 * multipath device node example:
857 * devid: 'dm-uuid-mpath-35000c5006304de3f'
859 * We also store the enclosure sysfs path for turning on enclosure LEDs
861 * vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4'
864 update_vdev_config_dev_strs(nvlist_t
*nv
)
867 const char *env
, *type
, *path
;
868 uint64_t wholedisk
= 0;
871 * For the benefit of legacy ZFS implementations, allow
872 * for opting out of devid strings in the vdev label.
875 * env ZFS_VDEV_DEVID_OPT_OUT=YES zpool import dozer
878 * Older OpenZFS implementations had issues when attempting to
879 * display pool config VDEV names if a "devid" NVP value is
880 * present in the pool's config.
882 * For example, a pool that originated on illumos platform would
883 * have a devid value in the config and "zpool status" would fail
884 * when listing the config.
886 * A pool can be stripped of any "devid" values on import or
887 * prevented from adding them on zpool create|add by setting
888 * ZFS_VDEV_DEVID_OPT_OUT.
890 env
= getenv("ZFS_VDEV_DEVID_OPT_OUT");
891 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
892 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2))) {
893 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
894 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_PHYS_PATH
);
898 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
899 strcmp(type
, VDEV_TYPE_DISK
) != 0) {
902 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
904 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
907 * Update device string values in the config nvlist.
909 if (encode_device_strings(path
, &vds
, (boolean_t
)wholedisk
) == 0) {
910 (void) nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, vds
.vds_devid
);
911 if (vds
.vds_devphys
[0] != '\0') {
912 (void) nvlist_add_string(nv
, ZPOOL_CONFIG_PHYS_PATH
,
915 update_vdev_config_dev_sysfs_path(nv
, path
,
916 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
);
918 /* Clear out any stale entries. */
919 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
920 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_PHYS_PATH
);
921 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
);