4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
25 * Copyright 2015 RackTop Systems.
26 * Copyright (c) 2016, Intel Corporation.
30 * Pool import support functions.
32 * To import a pool, we rely on reading the configuration information from the
33 * ZFS label of each device. If we successfully read the label, then we
34 * organize the configuration information in the following hierarchy:
36 * pool guid -> toplevel vdev guid -> label txg
38 * Duplicate entries matching this same tuple will be discarded. Once we have
39 * examined every device, we pick the best label txg config for each toplevel
40 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
41 * update any paths that have changed. Finally, we attempt to import the pool
42 * using our derived config, and record the results.
61 #include <sys/dktp/fdisk.h>
62 #include <sys/efi_partition.h>
63 #include <sys/vdev_impl.h>
64 #include <blkid/blkid.h>
66 #include "libzfs_impl.h"
70 * Intermediate structures used to gather configuration information.
72 typedef struct config_entry
{
75 struct config_entry
*ce_next
;
78 typedef struct vdev_entry
{
80 config_entry_t
*ve_configs
;
81 struct vdev_entry
*ve_next
;
84 typedef struct pool_entry
{
86 vdev_entry_t
*pe_vdevs
;
87 struct pool_entry
*pe_next
;
90 typedef struct name_entry
{
94 uint64_t ne_num_labels
;
95 struct name_entry
*ne_next
;
98 typedef struct pool_list
{
103 #define DEV_BYID_PATH "/dev/disk/by-id/"
106 * Linux persistent device strings for vdev labels
108 * based on libudev for consistency with libudev disk add/remove events
112 typedef struct vdev_dev_strs
{
114 char vds_devphys
[128];
118 * Obtain the persistent device id string (describes what)
120 * used by ZED vdev matching for auto-{online,expand,replace}
123 zfs_device_get_devid(struct udev_device
*dev
, char *bufptr
, size_t buflen
)
125 struct udev_list_entry
*entry
;
127 char devbyid
[MAXPATHLEN
];
129 /* The bus based by-id path is preferred */
130 bus
= udev_device_get_property_value(dev
, "ID_BUS");
136 * For multipath nodes use the persistent uuid based identifier
138 * Example: /dev/disk/by-id/dm-uuid-mpath-35000c5006304de3f
140 dm_uuid
= udev_device_get_property_value(dev
, "DM_UUID");
141 if (dm_uuid
!= NULL
) {
142 (void) snprintf(bufptr
, buflen
, "dm-uuid-%s", dm_uuid
);
149 * locate the bus specific by-id link
151 (void) snprintf(devbyid
, sizeof (devbyid
), "%s%s-", DEV_BYID_PATH
, bus
);
152 entry
= udev_device_get_devlinks_list_entry(dev
);
153 while (entry
!= NULL
) {
156 name
= udev_list_entry_get_name(entry
);
157 if (strncmp(name
, devbyid
, strlen(devbyid
)) == 0) {
158 name
+= strlen(DEV_BYID_PATH
);
159 (void) strlcpy(bufptr
, name
, buflen
);
162 entry
= udev_list_entry_get_next(entry
);
169 * Obtain the persistent physical location string (describes where)
171 * used by ZED vdev matching for auto-{online,expand,replace}
174 zfs_device_get_physical(struct udev_device
*dev
, char *bufptr
, size_t buflen
)
176 const char *physpath
= NULL
;
179 * Normal disks use ID_PATH for their physical path. Device mapper
180 * devices are virtual and don't have a physical path. For them we
181 * use ID_VDEV instead, which is setup via the /etc/vdev_id.conf file.
182 * ID_VDEV provides a persistent path to a virtual device. If you
183 * don't have vdev_id.conf setup, you cannot use multipath autoreplace.
185 if (!((physpath
= udev_device_get_property_value(dev
, "ID_PATH")) &&
188 udev_device_get_property_value(dev
, "ID_VDEV")) &&
194 (void) strlcpy(bufptr
, physpath
, buflen
);
200 udev_is_mpath(struct udev_device
*dev
)
202 return udev_device_get_property_value(dev
, "DM_UUID") &&
203 udev_device_get_property_value(dev
, "MPATH_SBIN_PATH");
207 * A disk is considered a multipath whole disk when:
208 * DEVNAME key value has "dm-"
209 * DM_NAME key value has "mpath" prefix
211 * ID_PART_TABLE_TYPE key does not exist or is not gpt
214 udev_mpath_whole_disk(struct udev_device
*dev
)
216 const char *devname
, *type
, *uuid
;
218 devname
= udev_device_get_property_value(dev
, "DEVNAME");
219 type
= udev_device_get_property_value(dev
, "ID_PART_TABLE_TYPE");
220 uuid
= udev_device_get_property_value(dev
, "DM_UUID");
222 if ((devname
!= NULL
&& strncmp(devname
, "/dev/dm-", 8) == 0) &&
223 ((type
== NULL
) || (strcmp(type
, "gpt") != 0)) &&
232 * Check if a disk is effectively a multipath whole disk
235 is_mpath_whole_disk(const char *path
)
238 struct udev_device
*dev
= NULL
;
239 char nodepath
[MAXPATHLEN
];
241 boolean_t wholedisk
= B_FALSE
;
243 if (realpath(path
, nodepath
) == NULL
)
245 sysname
= strrchr(nodepath
, '/') + 1;
246 if (strncmp(sysname
, "dm-", 3) != 0)
248 if ((udev
= udev_new()) == NULL
)
250 if ((dev
= udev_device_new_from_subsystem_sysname(udev
, "block",
252 udev_device_unref(dev
);
256 wholedisk
= udev_mpath_whole_disk(dev
);
258 udev_device_unref(dev
);
263 udev_device_is_ready(struct udev_device
*dev
)
265 #ifdef HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED
266 return (udev_device_get_is_initialized(dev
));
268 /* wait for DEVLINKS property to be initialized */
269 return (udev_device_get_property_value(dev
, "DEVLINKS") != NULL
);
274 * Wait up to timeout_ms for udev to set up the device node. The device is
275 * considered ready when libudev determines it has been initialized, all of
276 * the device links have been verified to exist, and it has been allowed to
277 * settle. At this point the device the device can be accessed reliably.
278 * Depending on the complexity of the udev rules this process could take
282 zpool_label_disk_wait(char *path
, int timeout_ms
)
285 struct udev_device
*dev
= NULL
;
286 char nodepath
[MAXPATHLEN
];
287 char *sysname
= NULL
;
291 hrtime_t start
, settle
;
293 if ((udev
= udev_new()) == NULL
)
300 if (sysname
== NULL
) {
301 if (realpath(path
, nodepath
) != NULL
) {
302 sysname
= strrchr(nodepath
, '/') + 1;
304 (void) usleep(sleep_ms
* MILLISEC
);
309 dev
= udev_device_new_from_subsystem_sysname(udev
,
311 if ((dev
!= NULL
) && udev_device_is_ready(dev
)) {
312 struct udev_list_entry
*links
, *link
;
315 links
= udev_device_get_devlinks_list_entry(dev
);
317 udev_list_entry_foreach(link
, links
) {
318 struct stat64 statbuf
;
321 name
= udev_list_entry_get_name(link
);
323 if (stat64(name
, &statbuf
) == 0 && errno
== 0)
333 settle
= gethrtime();
334 } else if (NSEC2MSEC(gethrtime() - settle
) >=
336 udev_device_unref(dev
);
342 udev_device_unref(dev
);
343 (void) usleep(sleep_ms
* MILLISEC
);
345 } while (NSEC2MSEC(gethrtime() - start
) < timeout_ms
);
354 * Encode the persistent devices strings
355 * used for the vdev disk label
358 encode_device_strings(const char *path
, vdev_dev_strs_t
*ds
,
362 struct udev_device
*dev
= NULL
;
363 char nodepath
[MAXPATHLEN
];
368 if ((udev
= udev_new()) == NULL
)
371 /* resolve path to a runtime device node instance */
372 if (realpath(path
, nodepath
) == NULL
)
375 sysname
= strrchr(nodepath
, '/') + 1;
378 * Wait up to 3 seconds for udev to set up the device node context
382 dev
= udev_device_new_from_subsystem_sysname(udev
, "block",
386 if (udev_device_is_ready(dev
))
387 break; /* udev ready */
389 udev_device_unref(dev
);
392 if (NSEC2MSEC(gethrtime() - start
) < 10)
393 (void) sched_yield(); /* yield/busy wait up to 10ms */
395 (void) usleep(10 * MILLISEC
);
397 } while (NSEC2MSEC(gethrtime() - start
) < (3 * MILLISEC
));
403 * Only whole disks require extra device strings
405 if (!wholedisk
&& !udev_mpath_whole_disk(dev
))
408 ret
= zfs_device_get_devid(dev
, ds
->vds_devid
, sizeof (ds
->vds_devid
));
412 /* physical location string (optional) */
413 if (zfs_device_get_physical(dev
, ds
->vds_devphys
,
414 sizeof (ds
->vds_devphys
)) != 0) {
415 ds
->vds_devphys
[0] = '\0'; /* empty string --> not available */
419 udev_device_unref(dev
);
427 * Update a leaf vdev's persistent device strings (Linux only)
429 * - only applies for a dedicated leaf vdev (aka whole disk)
430 * - updated during pool create|add|attach|import
431 * - used for matching device matching during auto-{online,expand,replace}
432 * - stored in a leaf disk config label (i.e. alongside 'path' NVP)
433 * - these strings are currently not used in kernel (i.e. for vdev_disk_open)
435 * single device node example:
436 * devid: 'scsi-MG03SCA300_350000494a8cb3d67-part1'
437 * phys_path: 'pci-0000:04:00.0-sas-0x50000394a8cb3d67-lun-0'
439 * multipath device node example:
440 * devid: 'dm-uuid-mpath-35000c5006304de3f'
442 * We also store the enclosure sysfs path for turning on enclosure LEDs
444 * vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4'
447 update_vdev_config_dev_strs(nvlist_t
*nv
)
450 char *env
, *type
, *path
;
451 uint64_t wholedisk
= 0;
455 * For the benefit of legacy ZFS implementations, allow
456 * for opting out of devid strings in the vdev label.
459 * env ZFS_VDEV_DEVID_OPT_OUT=YES zpool import dozer
462 * Older ZFS on Linux implementations had issues when attempting to
463 * display pool config VDEV names if a "devid" NVP value is present
464 * in the pool's config.
466 * For example, a pool that originated on illumos platform would
467 * have a devid value in the config and "zpool status" would fail
468 * when listing the config.
470 * A pool can be stripped of any "devid" values on import or
471 * prevented from adding them on zpool create|add by setting
472 * ZFS_VDEV_DEVID_OPT_OUT.
474 env
= getenv("ZFS_VDEV_DEVID_OPT_OUT");
475 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
476 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2))) {
477 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
478 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_PHYS_PATH
);
482 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
483 strcmp(type
, VDEV_TYPE_DISK
) != 0) {
486 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
488 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
491 * Update device string values in config nvlist
493 if (encode_device_strings(path
, &vds
, (boolean_t
)wholedisk
) == 0) {
494 (void) nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, vds
.vds_devid
);
495 if (vds
.vds_devphys
[0] != '\0') {
496 (void) nvlist_add_string(nv
, ZPOOL_CONFIG_PHYS_PATH
,
500 /* Add enclosure sysfs path (if disk is in an enclosure) */
501 upath
= zfs_get_underlying_path(path
);
502 spath
= zfs_get_enclosure_sysfs_path(upath
);
504 nvlist_add_string(nv
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
,
507 nvlist_remove_all(nv
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
);
512 /* clear out any stale entries */
513 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
514 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_PHYS_PATH
);
515 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
);
521 is_mpath_whole_disk(const char *path
)
527 * Wait up to timeout_ms for udev to set up the device node. The device is
528 * considered ready when the provided path have been verified to exist and
529 * it has been allowed to settle. At this point the device the device can
530 * be accessed reliably. Depending on the complexity of the udev rules thisi
531 * process could take several seconds.
534 zpool_label_disk_wait(char *path
, int timeout_ms
)
538 hrtime_t start
, settle
;
539 struct stat64 statbuf
;
546 if ((stat64(path
, &statbuf
) == 0) && (errno
== 0)) {
548 settle
= gethrtime();
549 else if (NSEC2MSEC(gethrtime() - settle
) >= settle_ms
)
551 } else if (errno
!= ENOENT
) {
555 usleep(sleep_ms
* MILLISEC
);
556 } while (NSEC2MSEC(gethrtime() - start
) < timeout_ms
);
562 update_vdev_config_dev_strs(nvlist_t
*nv
)
566 #endif /* HAVE_LIBUDEV */
569 * Go through and fix up any path and/or devid information for the given vdev
573 fix_paths(nvlist_t
*nv
, name_entry_t
*names
)
578 name_entry_t
*ne
, *best
;
581 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
582 &child
, &children
) == 0) {
583 for (c
= 0; c
< children
; c
++)
584 if (fix_paths(child
[c
], names
) != 0)
590 * This is a leaf (file or disk) vdev. In either case, go through
591 * the name list and see if we find a matching guid. If so, replace
592 * the path and see if we can calculate a new devid.
594 * There may be multiple names associated with a particular guid, in
595 * which case we have overlapping partitions or multiple paths to the
596 * same disk. In this case we prefer to use the path name which
597 * matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
598 * use the lowest order device which corresponds to the first match
599 * while traversing the ZPOOL_IMPORT_PATH search path.
601 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
602 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
606 for (ne
= names
; ne
!= NULL
; ne
= ne
->ne_next
) {
607 if (ne
->ne_guid
== guid
) {
613 if ((strlen(path
) == strlen(ne
->ne_name
)) &&
614 strncmp(path
, ne
->ne_name
, strlen(path
)) == 0) {
624 /* Prefer paths with move vdev labels. */
625 if (ne
->ne_num_labels
> best
->ne_num_labels
) {
630 /* Prefer paths earlier in the search order. */
631 if (ne
->ne_num_labels
== best
->ne_num_labels
&&
632 ne
->ne_order
< best
->ne_order
) {
642 if (nvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, best
->ne_name
) != 0)
645 /* Linux only - update ZPOOL_CONFIG_DEVID and ZPOOL_CONFIG_PHYS_PATH */
646 update_vdev_config_dev_strs(nv
);
652 * Add the given configuration to the list of known devices.
655 add_config(libzfs_handle_t
*hdl
, pool_list_t
*pl
, const char *path
,
656 int order
, int num_labels
, nvlist_t
*config
)
658 uint64_t pool_guid
, vdev_guid
, top_guid
, txg
, state
;
665 * If this is a hot spare not currently in use or level 2 cache
666 * device, add it to the list of names to translate, but don't do
669 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
671 (state
== POOL_STATE_SPARE
|| state
== POOL_STATE_L2CACHE
) &&
672 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
) == 0) {
673 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
) {
678 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
683 ne
->ne_guid
= vdev_guid
;
684 ne
->ne_order
= order
;
685 ne
->ne_num_labels
= num_labels
;
686 ne
->ne_next
= pl
->names
;
693 * If we have a valid config but cannot read any of these fields, then
694 * it means we have a half-initialized label. In vdev_label_init()
695 * we write a label with txg == 0 so that we can identify the device
696 * in case the user refers to the same disk later on. If we fail to
697 * create the pool, we'll be left with a label in this state
698 * which should not be considered part of a valid pool.
700 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
702 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
704 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
706 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_TXG
,
707 &txg
) != 0 || txg
== 0) {
713 * First, see if we know about this pool. If not, then add it to the
714 * list of known pools.
716 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
717 if (pe
->pe_guid
== pool_guid
)
722 if ((pe
= zfs_alloc(hdl
, sizeof (pool_entry_t
))) == NULL
) {
726 pe
->pe_guid
= pool_guid
;
727 pe
->pe_next
= pl
->pools
;
732 * Second, see if we know about this toplevel vdev. Add it if its
735 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
736 if (ve
->ve_guid
== top_guid
)
741 if ((ve
= zfs_alloc(hdl
, sizeof (vdev_entry_t
))) == NULL
) {
745 ve
->ve_guid
= top_guid
;
746 ve
->ve_next
= pe
->pe_vdevs
;
751 * Third, see if we have a config with a matching transaction group. If
752 * so, then we do nothing. Otherwise, add it to the list of known
755 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= ce
->ce_next
) {
756 if (ce
->ce_txg
== txg
)
761 if ((ce
= zfs_alloc(hdl
, sizeof (config_entry_t
))) == NULL
) {
766 ce
->ce_config
= config
;
767 ce
->ce_next
= ve
->ve_configs
;
774 * At this point we've successfully added our config to the list of
775 * known configs. The last thing to do is add the vdev guid -> path
776 * mappings so that we can fix up the configuration as necessary before
779 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
782 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
787 ne
->ne_guid
= vdev_guid
;
788 ne
->ne_order
= order
;
789 ne
->ne_num_labels
= num_labels
;
790 ne
->ne_next
= pl
->names
;
797 * Returns true if the named pool matches the given GUID.
800 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
806 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
814 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
819 *isactive
= (theguid
== guid
);
824 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
827 zfs_cmd_t zc
= {"\0"};
828 int err
, dstbuf_size
;
830 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
833 dstbuf_size
= MAX(CONFIG_BUF_MINSIZE
, zc
.zc_nvlist_conf_size
* 4);
835 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, dstbuf_size
) != 0) {
836 zcmd_free_nvlists(&zc
);
840 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
841 &zc
)) != 0 && errno
== ENOMEM
) {
842 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
843 zcmd_free_nvlists(&zc
);
849 zcmd_free_nvlists(&zc
);
853 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
854 zcmd_free_nvlists(&zc
);
858 zcmd_free_nvlists(&zc
);
863 * Determine if the vdev id is a hole in the namespace.
866 vdev_is_hole(uint64_t *hole_array
, uint_t holes
, uint_t id
)
870 for (c
= 0; c
< holes
; c
++) {
872 /* Top-level is a hole */
873 if (hole_array
[c
] == id
)
880 * Convert our list of pools into the definitive set of configurations. We
881 * start by picking the best config for each toplevel vdev. Once that's done,
882 * we assemble the toplevel vdevs into a full config for the pool. We make a
883 * pass to fix up any incorrect paths, and then add it to the main list to
884 * return to the user.
887 get_configs(libzfs_handle_t
*hdl
, pool_list_t
*pl
, boolean_t active_ok
)
892 nvlist_t
*ret
= NULL
, *config
= NULL
, *tmp
= NULL
, *nvtop
, *nvroot
;
893 nvlist_t
**spares
, **l2cache
;
894 uint_t i
, nspares
, nl2cache
;
895 boolean_t config_seen
;
897 char *name
, *hostname
= NULL
;
900 nvlist_t
**child
= NULL
;
902 uint64_t *hole_array
, max_id
;
907 boolean_t valid_top_config
= B_FALSE
;
909 if (nvlist_alloc(&ret
, 0, 0) != 0)
912 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
913 uint64_t id
, max_txg
= 0;
915 if (nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) != 0)
917 config_seen
= B_FALSE
;
920 * Iterate over all toplevel vdevs. Grab the pool configuration
921 * from the first one we find, and then go through the rest and
922 * add them as necessary to the 'vdevs' member of the config.
924 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
927 * Determine the best configuration for this vdev by
928 * selecting the config with the latest transaction
932 for (ce
= ve
->ve_configs
; ce
!= NULL
;
935 if (ce
->ce_txg
> best_txg
) {
937 best_txg
= ce
->ce_txg
;
942 * We rely on the fact that the max txg for the
943 * pool will contain the most up-to-date information
944 * about the valid top-levels in the vdev namespace.
946 if (best_txg
> max_txg
) {
947 (void) nvlist_remove(config
,
948 ZPOOL_CONFIG_VDEV_CHILDREN
,
950 (void) nvlist_remove(config
,
951 ZPOOL_CONFIG_HOLE_ARRAY
,
952 DATA_TYPE_UINT64_ARRAY
);
958 valid_top_config
= B_FALSE
;
960 if (nvlist_lookup_uint64(tmp
,
961 ZPOOL_CONFIG_VDEV_CHILDREN
, &max_id
) == 0) {
962 verify(nvlist_add_uint64(config
,
963 ZPOOL_CONFIG_VDEV_CHILDREN
,
965 valid_top_config
= B_TRUE
;
968 if (nvlist_lookup_uint64_array(tmp
,
969 ZPOOL_CONFIG_HOLE_ARRAY
, &hole_array
,
971 verify(nvlist_add_uint64_array(config
,
972 ZPOOL_CONFIG_HOLE_ARRAY
,
973 hole_array
, holes
) == 0);
979 * Copy the relevant pieces of data to the pool
985 * comment (if available)
987 * hostid (if available)
988 * hostname (if available)
990 uint64_t state
, version
;
991 char *comment
= NULL
;
993 version
= fnvlist_lookup_uint64(tmp
,
994 ZPOOL_CONFIG_VERSION
);
995 fnvlist_add_uint64(config
,
996 ZPOOL_CONFIG_VERSION
, version
);
997 guid
= fnvlist_lookup_uint64(tmp
,
998 ZPOOL_CONFIG_POOL_GUID
);
999 fnvlist_add_uint64(config
,
1000 ZPOOL_CONFIG_POOL_GUID
, guid
);
1001 name
= fnvlist_lookup_string(tmp
,
1002 ZPOOL_CONFIG_POOL_NAME
);
1003 fnvlist_add_string(config
,
1004 ZPOOL_CONFIG_POOL_NAME
, name
);
1006 if (nvlist_lookup_string(tmp
,
1007 ZPOOL_CONFIG_COMMENT
, &comment
) == 0)
1008 fnvlist_add_string(config
,
1009 ZPOOL_CONFIG_COMMENT
, comment
);
1011 state
= fnvlist_lookup_uint64(tmp
,
1012 ZPOOL_CONFIG_POOL_STATE
);
1013 fnvlist_add_uint64(config
,
1014 ZPOOL_CONFIG_POOL_STATE
, state
);
1017 if (nvlist_lookup_uint64(tmp
,
1018 ZPOOL_CONFIG_HOSTID
, &hostid
) == 0) {
1019 fnvlist_add_uint64(config
,
1020 ZPOOL_CONFIG_HOSTID
, hostid
);
1021 hostname
= fnvlist_lookup_string(tmp
,
1022 ZPOOL_CONFIG_HOSTNAME
);
1023 fnvlist_add_string(config
,
1024 ZPOOL_CONFIG_HOSTNAME
, hostname
);
1027 config_seen
= B_TRUE
;
1031 * Add this top-level vdev to the child array.
1033 verify(nvlist_lookup_nvlist(tmp
,
1034 ZPOOL_CONFIG_VDEV_TREE
, &nvtop
) == 0);
1035 verify(nvlist_lookup_uint64(nvtop
, ZPOOL_CONFIG_ID
,
1038 if (id
>= children
) {
1039 nvlist_t
**newchild
;
1041 newchild
= zfs_alloc(hdl
, (id
+ 1) *
1042 sizeof (nvlist_t
*));
1043 if (newchild
== NULL
)
1046 for (c
= 0; c
< children
; c
++)
1047 newchild
[c
] = child
[c
];
1053 if (nvlist_dup(nvtop
, &child
[id
], 0) != 0)
1059 * If we have information about all the top-levels then
1060 * clean up the nvlist which we've constructed. This
1061 * means removing any extraneous devices that are
1062 * beyond the valid range or adding devices to the end
1063 * of our array which appear to be missing.
1065 if (valid_top_config
) {
1066 if (max_id
< children
) {
1067 for (c
= max_id
; c
< children
; c
++)
1068 nvlist_free(child
[c
]);
1070 } else if (max_id
> children
) {
1071 nvlist_t
**newchild
;
1073 newchild
= zfs_alloc(hdl
, (max_id
) *
1074 sizeof (nvlist_t
*));
1075 if (newchild
== NULL
)
1078 for (c
= 0; c
< children
; c
++)
1079 newchild
[c
] = child
[c
];
1087 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1091 * The vdev namespace may contain holes as a result of
1092 * device removal. We must add them back into the vdev
1093 * tree before we process any missing devices.
1096 ASSERT(valid_top_config
);
1098 for (c
= 0; c
< children
; c
++) {
1101 if (child
[c
] != NULL
||
1102 !vdev_is_hole(hole_array
, holes
, c
))
1105 if (nvlist_alloc(&holey
, NV_UNIQUE_NAME
,
1110 * Holes in the namespace are treated as
1111 * "hole" top-level vdevs and have a
1112 * special flag set on them.
1114 if (nvlist_add_string(holey
,
1116 VDEV_TYPE_HOLE
) != 0 ||
1117 nvlist_add_uint64(holey
,
1118 ZPOOL_CONFIG_ID
, c
) != 0 ||
1119 nvlist_add_uint64(holey
,
1120 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
1129 * Look for any missing top-level vdevs. If this is the case,
1130 * create a faked up 'missing' vdev as a placeholder. We cannot
1131 * simply compress the child array, because the kernel performs
1132 * certain checks to make sure the vdev IDs match their location
1133 * in the configuration.
1135 for (c
= 0; c
< children
; c
++) {
1136 if (child
[c
] == NULL
) {
1138 if (nvlist_alloc(&missing
, NV_UNIQUE_NAME
,
1141 if (nvlist_add_string(missing
,
1143 VDEV_TYPE_MISSING
) != 0 ||
1144 nvlist_add_uint64(missing
,
1145 ZPOOL_CONFIG_ID
, c
) != 0 ||
1146 nvlist_add_uint64(missing
,
1147 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
1148 nvlist_free(missing
);
1156 * Put all of this pool's top-level vdevs into a root vdev.
1158 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0)
1160 if (nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
,
1161 VDEV_TYPE_ROOT
) != 0 ||
1162 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_ID
, 0ULL) != 0 ||
1163 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_GUID
, guid
) != 0 ||
1164 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
1165 child
, children
) != 0) {
1166 nvlist_free(nvroot
);
1170 for (c
= 0; c
< children
; c
++)
1171 nvlist_free(child
[c
]);
1177 * Go through and fix up any paths and/or devids based on our
1178 * known list of vdev GUID -> path mappings.
1180 if (fix_paths(nvroot
, pl
->names
) != 0) {
1181 nvlist_free(nvroot
);
1186 * Add the root vdev to this pool's configuration.
1188 if (nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
1190 nvlist_free(nvroot
);
1193 nvlist_free(nvroot
);
1196 * zdb uses this path to report on active pools that were
1197 * imported or created using -R.
1203 * Determine if this pool is currently active, in which case we
1204 * can't actually import it.
1206 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1208 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1211 if (pool_active(hdl
, name
, guid
, &isactive
) != 0)
1215 nvlist_free(config
);
1220 if ((nvl
= refresh_config(hdl
, config
)) == NULL
) {
1221 nvlist_free(config
);
1226 nvlist_free(config
);
1230 * Go through and update the paths for spares, now that we have
1233 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
1235 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1236 &spares
, &nspares
) == 0) {
1237 for (i
= 0; i
< nspares
; i
++) {
1238 if (fix_paths(spares
[i
], pl
->names
) != 0)
1244 * Update the paths for l2cache devices.
1246 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1247 &l2cache
, &nl2cache
) == 0) {
1248 for (i
= 0; i
< nl2cache
; i
++) {
1249 if (fix_paths(l2cache
[i
], pl
->names
) != 0)
1255 * Restore the original information read from the actual label.
1257 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTID
,
1259 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTNAME
,
1262 verify(nvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
,
1264 verify(nvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
,
1270 * Add this pool to the list of configs.
1272 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1274 if (nvlist_add_nvlist(ret
, name
, config
) != 0)
1277 nvlist_free(config
);
1284 (void) no_memory(hdl
);
1286 nvlist_free(config
);
1288 for (c
= 0; c
< children
; c
++)
1289 nvlist_free(child
[c
]);
1296 * Return the offset of the given label.
1299 label_offset(uint64_t size
, int l
)
1301 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
1302 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
1303 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
1307 * Given a file descriptor, read the label information and return an nvlist
1308 * describing the configuration, if there is one. The number of valid
1309 * labels found will be returned in num_labels when non-NULL.
1312 zpool_read_label(int fd
, nvlist_t
**config
, int *num_labels
)
1314 struct stat64 statbuf
;
1316 vdev_label_t
*label
;
1317 nvlist_t
*expected_config
= NULL
;
1318 uint64_t expected_guid
= 0, size
;
1323 if (fstat64_blk(fd
, &statbuf
) == -1)
1325 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
1327 error
= posix_memalign((void **)&label
, PAGESIZE
, sizeof (*label
));
1331 for (l
= 0; l
< VDEV_LABELS
; l
++) {
1332 uint64_t state
, guid
, txg
;
1334 if (pread64(fd
, label
, sizeof (vdev_label_t
),
1335 label_offset(size
, l
)) != sizeof (vdev_label_t
))
1338 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
1339 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0)
1342 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_GUID
,
1343 &guid
) != 0 || guid
== 0) {
1344 nvlist_free(*config
);
1348 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
1349 &state
) != 0 || state
> POOL_STATE_L2CACHE
) {
1350 nvlist_free(*config
);
1354 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
1355 (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
1356 &txg
) != 0 || txg
== 0)) {
1357 nvlist_free(*config
);
1361 if (expected_guid
) {
1362 if (expected_guid
== guid
)
1365 nvlist_free(*config
);
1367 expected_config
= *config
;
1368 expected_guid
= guid
;
1373 if (num_labels
!= NULL
)
1374 *num_labels
= count
;
1377 *config
= expected_config
;
1382 typedef struct rdsk_node
{
1383 char *rn_name
; /* Full path to device */
1384 int rn_order
; /* Preferred order (low to high) */
1385 int rn_num_labels
; /* Number of valid labels */
1386 uint64_t rn_vdev_guid
; /* Expected vdev guid when set */
1387 libzfs_handle_t
*rn_hdl
;
1388 nvlist_t
*rn_config
; /* Label config */
1392 boolean_t rn_labelpaths
;
1396 * Sorted by vdev guid and full path to allow for multiple entries with
1397 * the same full path name. This is required because it's possible to
1398 * have multiple block devices with labels that refer to the same
1399 * ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both
1400 * entries need to be added to the cache. Scenarios where this can occur
1401 * include overwritten pool labels, devices which are visible from multiple
1402 * hosts and multipath devices.
1405 slice_cache_compare(const void *arg1
, const void *arg2
)
1407 const char *nm1
= ((rdsk_node_t
*)arg1
)->rn_name
;
1408 const char *nm2
= ((rdsk_node_t
*)arg2
)->rn_name
;
1409 uint64_t guid1
= ((rdsk_node_t
*)arg1
)->rn_vdev_guid
;
1410 uint64_t guid2
= ((rdsk_node_t
*)arg2
)->rn_vdev_guid
;
1413 rv
= AVL_CMP(guid1
, guid2
);
1417 return (AVL_ISIGN(strcmp(nm1
, nm2
)));
1421 is_watchdog_dev(char *dev
)
1423 /* For 'watchdog' dev */
1424 if (strcmp(dev
, "watchdog") == 0)
1427 /* For 'watchdog<digit><whatever> */
1428 if (strstr(dev
, "watchdog") == dev
&& isdigit(dev
[8]))
1435 label_paths_impl(libzfs_handle_t
*hdl
, nvlist_t
*nvroot
, uint64_t pool_guid
,
1436 uint64_t vdev_guid
, char **path
, char **devid
)
1444 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
1445 &child
, &children
) == 0) {
1446 for (c
= 0; c
< children
; c
++) {
1447 error
= label_paths_impl(hdl
, child
[c
],
1448 pool_guid
, vdev_guid
, path
, devid
);
1458 error
= nvlist_lookup_uint64(nvroot
, ZPOOL_CONFIG_GUID
, &guid
);
1459 if ((error
!= 0) || (guid
!= vdev_guid
))
1462 error
= nvlist_lookup_string(nvroot
, ZPOOL_CONFIG_PATH
, &val
);
1466 error
= nvlist_lookup_string(nvroot
, ZPOOL_CONFIG_DEVID
, &val
);
1474 * Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID
1475 * and store these strings as config_path and devid_path respectively.
1476 * The returned pointers are only valid as long as label remains valid.
1479 label_paths(libzfs_handle_t
*hdl
, nvlist_t
*label
, char **path
, char **devid
)
1488 if (nvlist_lookup_nvlist(label
, ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) ||
1489 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) ||
1490 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_GUID
, &vdev_guid
))
1493 return (label_paths_impl(hdl
, nvroot
, pool_guid
, vdev_guid
, path
,
1498 zpool_open_func(void *arg
)
1500 rdsk_node_t
*rn
= arg
;
1501 libzfs_handle_t
*hdl
= rn
->rn_hdl
;
1502 struct stat64 statbuf
;
1504 char *bname
, *dupname
;
1505 uint64_t vdev_guid
= 0;
1511 * Skip devices with well known prefixes there can be side effects
1512 * when opening devices which need to be avoided.
1514 * hpet - High Precision Event Timer
1515 * watchdog - Watchdog must be closed in a special way.
1517 dupname
= zfs_strdup(hdl
, rn
->rn_name
);
1518 bname
= basename(dupname
);
1519 error
= ((strcmp(bname
, "hpet") == 0) || is_watchdog_dev(bname
));
1525 * Ignore failed stats. We only want regular files and block devices.
1527 if (stat64(rn
->rn_name
, &statbuf
) != 0 ||
1528 (!S_ISREG(statbuf
.st_mode
) && !S_ISBLK(statbuf
.st_mode
)))
1532 * Preferentially open using O_DIRECT to bypass the block device
1533 * cache which may be stale for multipath devices. An EINVAL errno
1534 * indicates O_DIRECT is unsupported so fallback to just O_RDONLY.
1536 fd
= open(rn
->rn_name
, O_RDONLY
| O_DIRECT
);
1537 if ((fd
< 0) && (errno
== EINVAL
))
1538 fd
= open(rn
->rn_name
, O_RDONLY
);
1544 * This file is too small to hold a zpool
1546 if (S_ISREG(statbuf
.st_mode
) && statbuf
.st_size
< SPA_MINDEVSIZE
) {
1551 error
= zpool_read_label(fd
, &config
, &num_labels
);
1557 if (num_labels
== 0) {
1559 nvlist_free(config
);
1564 * Check that the vdev is for the expected guid. Additional entries
1565 * are speculatively added based on the paths stored in the labels.
1566 * Entries with valid paths but incorrect guids must be removed.
1568 error
= nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
);
1569 if (error
|| (rn
->rn_vdev_guid
&& rn
->rn_vdev_guid
!= vdev_guid
)) {
1571 nvlist_free(config
);
1577 rn
->rn_config
= config
;
1578 rn
->rn_num_labels
= num_labels
;
1581 * Add additional entries for paths described by this label.
1583 if (rn
->rn_labelpaths
) {
1590 if (label_paths(rn
->rn_hdl
, rn
->rn_config
, &path
, &devid
))
1594 * Allow devlinks to stabilize so all paths are available.
1596 zpool_label_disk_wait(rn
->rn_name
, DISK_LABEL_WAIT
);
1599 slice
= zfs_alloc(hdl
, sizeof (rdsk_node_t
));
1600 slice
->rn_name
= zfs_strdup(hdl
, path
);
1601 slice
->rn_vdev_guid
= vdev_guid
;
1602 slice
->rn_avl
= rn
->rn_avl
;
1603 slice
->rn_hdl
= hdl
;
1604 slice
->rn_order
= IMPORT_ORDER_PREFERRED_1
;
1605 slice
->rn_labelpaths
= B_FALSE
;
1606 mutex_enter(rn
->rn_lock
);
1607 if (avl_find(rn
->rn_avl
, slice
, &where
)) {
1608 mutex_exit(rn
->rn_lock
);
1609 free(slice
->rn_name
);
1612 avl_insert(rn
->rn_avl
, slice
, where
);
1613 mutex_exit(rn
->rn_lock
);
1614 zpool_open_func(slice
);
1618 if (devid
!= NULL
) {
1619 slice
= zfs_alloc(hdl
, sizeof (rdsk_node_t
));
1620 error
= asprintf(&slice
->rn_name
, "%s%s",
1621 DEV_BYID_PATH
, devid
);
1627 slice
->rn_vdev_guid
= vdev_guid
;
1628 slice
->rn_avl
= rn
->rn_avl
;
1629 slice
->rn_hdl
= hdl
;
1630 slice
->rn_order
= IMPORT_ORDER_PREFERRED_2
;
1631 slice
->rn_labelpaths
= B_FALSE
;
1632 mutex_enter(rn
->rn_lock
);
1633 if (avl_find(rn
->rn_avl
, slice
, &where
)) {
1634 mutex_exit(rn
->rn_lock
);
1635 free(slice
->rn_name
);
1638 avl_insert(rn
->rn_avl
, slice
, where
);
1639 mutex_exit(rn
->rn_lock
);
1640 zpool_open_func(slice
);
1647 * Given a file descriptor, clear (zero) the label information. This function
1648 * is used in the appliance stack as part of the ZFS sysevent module and
1649 * to implement the "zpool labelclear" command.
1652 zpool_clear_label(int fd
)
1654 struct stat64 statbuf
;
1656 vdev_label_t
*label
;
1659 if (fstat64_blk(fd
, &statbuf
) == -1)
1661 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
1663 if ((label
= calloc(sizeof (vdev_label_t
), 1)) == NULL
)
1666 for (l
= 0; l
< VDEV_LABELS
; l
++) {
1667 if (pwrite64(fd
, label
, sizeof (vdev_label_t
),
1668 label_offset(size
, l
)) != sizeof (vdev_label_t
)) {
1679 * Scan a list of directories for zfs devices.
1682 zpool_find_import_scan(libzfs_handle_t
*hdl
, kmutex_t
*lock
,
1683 avl_tree_t
**slice_cache
, char **dir
, int dirs
)
1690 *slice_cache
= NULL
;
1691 cache
= zfs_alloc(hdl
, sizeof (avl_tree_t
));
1692 avl_create(cache
, slice_cache_compare
, sizeof (rdsk_node_t
),
1693 offsetof(rdsk_node_t
, rn_node
));
1695 for (i
= 0; i
< dirs
; i
++) {
1696 char path
[MAXPATHLEN
];
1697 struct dirent64
*dp
;
1700 if (realpath(dir
[i
], path
) == NULL
) {
1702 if (error
== ENOENT
)
1705 zfs_error_aux(hdl
, strerror(error
));
1706 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
, dgettext(
1707 TEXT_DOMAIN
, "cannot resolve path '%s'"), dir
[i
]);
1711 dirp
= opendir(path
);
1714 zfs_error_aux(hdl
, strerror(error
));
1715 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
1716 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), path
);
1720 while ((dp
= readdir64(dirp
)) != NULL
) {
1721 const char *name
= dp
->d_name
;
1722 if (name
[0] == '.' &&
1723 (name
[1] == 0 || (name
[1] == '.' && name
[2] == 0)))
1726 slice
= zfs_alloc(hdl
, sizeof (rdsk_node_t
));
1727 error
= asprintf(&slice
->rn_name
, "%s/%s", path
, name
);
1732 slice
->rn_vdev_guid
= 0;
1733 slice
->rn_lock
= lock
;
1734 slice
->rn_avl
= cache
;
1735 slice
->rn_hdl
= hdl
;
1736 slice
->rn_order
= i
+ IMPORT_ORDER_SCAN_OFFSET
;
1737 slice
->rn_labelpaths
= B_FALSE
;
1739 avl_add(cache
, slice
);
1743 (void) closedir(dirp
);
1746 *slice_cache
= cache
;
1751 while ((slice
= avl_destroy_nodes(cache
, &cookie
)) != NULL
) {
1752 free(slice
->rn_name
);
1761 * Use libblkid to quickly enumerate all known zfs devices.
1764 zpool_find_import_blkid(libzfs_handle_t
*hdl
, kmutex_t
*lock
,
1765 avl_tree_t
**slice_cache
)
1769 blkid_dev_iterate iter
;
1774 *slice_cache
= NULL
;
1776 error
= blkid_get_cache(&cache
, NULL
);
1780 error
= blkid_probe_all_new(cache
);
1782 blkid_put_cache(cache
);
1786 iter
= blkid_dev_iterate_begin(cache
);
1788 blkid_put_cache(cache
);
1792 error
= blkid_dev_set_search(iter
, "TYPE", "zfs_member");
1794 blkid_dev_iterate_end(iter
);
1795 blkid_put_cache(cache
);
1799 *slice_cache
= zfs_alloc(hdl
, sizeof (avl_tree_t
));
1800 avl_create(*slice_cache
, slice_cache_compare
, sizeof (rdsk_node_t
),
1801 offsetof(rdsk_node_t
, rn_node
));
1803 while (blkid_dev_next(iter
, &dev
) == 0) {
1804 slice
= zfs_alloc(hdl
, sizeof (rdsk_node_t
));
1805 slice
->rn_name
= zfs_strdup(hdl
, blkid_dev_devname(dev
));
1806 slice
->rn_vdev_guid
= 0;
1807 slice
->rn_lock
= lock
;
1808 slice
->rn_avl
= *slice_cache
;
1809 slice
->rn_hdl
= hdl
;
1810 slice
->rn_labelpaths
= B_TRUE
;
1812 error
= zfs_path_order(slice
->rn_name
, &slice
->rn_order
);
1814 slice
->rn_order
+= IMPORT_ORDER_SCAN_OFFSET
;
1816 slice
->rn_order
= IMPORT_ORDER_DEFAULT
;
1819 if (avl_find(*slice_cache
, slice
, &where
)) {
1820 free(slice
->rn_name
);
1823 avl_insert(*slice_cache
, slice
, where
);
1828 blkid_dev_iterate_end(iter
);
1829 blkid_put_cache(cache
);
1835 zpool_default_import_path
[DEFAULT_IMPORT_PATH_SIZE
] = {
1836 "/dev/disk/by-vdev", /* Custom rules, use first if they exist */
1837 "/dev/mapper", /* Use multipath devices before components */
1838 "/dev/disk/by-partlabel", /* Single unique entry set by user */
1839 "/dev/disk/by-partuuid", /* Generated partition uuid */
1840 "/dev/disk/by-label", /* Custom persistent labels */
1841 "/dev/disk/by-uuid", /* Single unique entry and persistent */
1842 "/dev/disk/by-id", /* May be multiple entries and persistent */
1843 "/dev/disk/by-path", /* Encodes physical location and persistent */
1844 "/dev" /* UNSAFE device names will change */
1848 * Given a list of directories to search, find all pools stored on disk. This
1849 * includes partial pools which are not available to import. If no args are
1850 * given (argc is 0), then the default directory (/dev/dsk) is searched.
1851 * poolname or guid (but not both) are provided by the caller when trying
1852 * to import a specific pool.
1855 zpool_find_import_impl(libzfs_handle_t
*hdl
, importargs_t
*iarg
)
1857 nvlist_t
*ret
= NULL
;
1858 pool_list_t pools
= { 0 };
1859 pool_entry_t
*pe
, *penext
;
1860 vdev_entry_t
*ve
, *venext
;
1861 config_entry_t
*ce
, *cenext
;
1862 name_entry_t
*ne
, *nenext
;
1869 verify(iarg
->poolname
== NULL
|| iarg
->guid
== 0);
1870 mutex_init(&lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1873 * Locate pool member vdevs using libblkid or by directory scanning.
1874 * On success a newly allocated AVL tree which is populated with an
1875 * entry for each discovered vdev will be returned as the cache.
1876 * It's the callers responsibility to consume and destroy this tree.
1878 if (iarg
->scan
|| iarg
->paths
!= 0) {
1879 int dirs
= iarg
->paths
;
1880 char **dir
= iarg
->path
;
1883 dir
= zpool_default_import_path
;
1884 dirs
= DEFAULT_IMPORT_PATH_SIZE
;
1887 if (zpool_find_import_scan(hdl
, &lock
, &cache
, dir
, dirs
) != 0)
1890 if (zpool_find_import_blkid(hdl
, &lock
, &cache
) != 0)
1895 * Create a thread pool to parallelize the process of reading and
1896 * validating labels, a large number of threads can be used due to
1897 * minimal contention.
1899 t
= taskq_create("z_import", 2 * boot_ncpus
, defclsyspri
,
1900 2 * boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1902 for (slice
= avl_first(cache
); slice
;
1903 (slice
= avl_walk(cache
, slice
, AVL_AFTER
)))
1904 (void) taskq_dispatch(t
, zpool_open_func
, slice
, TQ_SLEEP
);
1910 * Process the cache filtering out any entries which are not
1911 * for the specificed pool then adding matching label configs.
1914 while ((slice
= avl_destroy_nodes(cache
, &cookie
)) != NULL
) {
1915 if (slice
->rn_config
!= NULL
) {
1916 nvlist_t
*config
= slice
->rn_config
;
1917 boolean_t matched
= B_TRUE
;
1920 if (iarg
->poolname
!= NULL
) {
1923 matched
= nvlist_lookup_string(config
,
1924 ZPOOL_CONFIG_POOL_NAME
, &pname
) == 0 &&
1925 strcmp(iarg
->poolname
, pname
) == 0;
1926 } else if (iarg
->guid
!= 0) {
1929 matched
= nvlist_lookup_uint64(config
,
1930 ZPOOL_CONFIG_POOL_GUID
, &this_guid
) == 0 &&
1931 iarg
->guid
== this_guid
;
1934 nvlist_free(config
);
1937 * Verify all remaining entries can be opened
1938 * exclusively. This will prune all underlying
1939 * multipath devices which otherwise could
1940 * result in the vdev appearing as UNAVAIL.
1942 * Under zdb, this step isn't required and
1943 * would prevent a zdb -e of active pools with
1946 fd
= open(slice
->rn_name
, O_RDONLY
| O_EXCL
);
1947 if (fd
>= 0 || iarg
->can_be_active
) {
1950 add_config(hdl
, &pools
,
1951 slice
->rn_name
, slice
->rn_order
,
1952 slice
->rn_num_labels
, config
);
1954 nvlist_free(config
);
1958 free(slice
->rn_name
);
1963 mutex_destroy(&lock
);
1965 ret
= get_configs(hdl
, &pools
, iarg
->can_be_active
);
1967 for (pe
= pools
.pools
; pe
!= NULL
; pe
= penext
) {
1968 penext
= pe
->pe_next
;
1969 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= venext
) {
1970 venext
= ve
->ve_next
;
1971 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= cenext
) {
1972 cenext
= ce
->ce_next
;
1973 nvlist_free(ce
->ce_config
);
1981 for (ne
= pools
.names
; ne
!= NULL
; ne
= nenext
) {
1982 nenext
= ne
->ne_next
;
1991 zpool_find_import(libzfs_handle_t
*hdl
, int argc
, char **argv
)
1993 importargs_t iarg
= { 0 };
1998 return (zpool_find_import_impl(hdl
, &iarg
));
2002 * Given a cache file, return the contents as a list of importable pools.
2003 * poolname or guid (but not both) are provided by the caller when trying
2004 * to import a specific pool.
2007 zpool_find_import_cached(libzfs_handle_t
*hdl
, const char *cachefile
,
2008 char *poolname
, uint64_t guid
)
2012 struct stat64 statbuf
;
2013 nvlist_t
*raw
, *src
, *dst
;
2020 verify(poolname
== NULL
|| guid
== 0);
2022 if ((fd
= open(cachefile
, O_RDONLY
)) < 0) {
2023 zfs_error_aux(hdl
, "%s", strerror(errno
));
2024 (void) zfs_error(hdl
, EZFS_BADCACHE
,
2025 dgettext(TEXT_DOMAIN
, "failed to open cache file"));
2029 if (fstat64(fd
, &statbuf
) != 0) {
2030 zfs_error_aux(hdl
, "%s", strerror(errno
));
2032 (void) zfs_error(hdl
, EZFS_BADCACHE
,
2033 dgettext(TEXT_DOMAIN
, "failed to get size of cache file"));
2037 if ((buf
= zfs_alloc(hdl
, statbuf
.st_size
)) == NULL
) {
2042 if (read(fd
, buf
, statbuf
.st_size
) != statbuf
.st_size
) {
2045 (void) zfs_error(hdl
, EZFS_BADCACHE
,
2046 dgettext(TEXT_DOMAIN
,
2047 "failed to read cache file contents"));
2053 if (nvlist_unpack(buf
, statbuf
.st_size
, &raw
, 0) != 0) {
2055 (void) zfs_error(hdl
, EZFS_BADCACHE
,
2056 dgettext(TEXT_DOMAIN
,
2057 "invalid or corrupt cache file contents"));
2064 * Go through and get the current state of the pools and refresh their
2067 if (nvlist_alloc(&pools
, 0, 0) != 0) {
2068 (void) no_memory(hdl
);
2074 while ((elem
= nvlist_next_nvpair(raw
, elem
)) != NULL
) {
2075 src
= fnvpair_value_nvlist(elem
);
2077 name
= fnvlist_lookup_string(src
, ZPOOL_CONFIG_POOL_NAME
);
2078 if (poolname
!= NULL
&& strcmp(poolname
, name
) != 0)
2081 this_guid
= fnvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
);
2082 if (guid
!= 0 && guid
!= this_guid
)
2085 if (pool_active(hdl
, name
, this_guid
, &active
) != 0) {
2094 if ((dst
= refresh_config(hdl
, src
)) == NULL
) {
2100 if (nvlist_add_nvlist(pools
, nvpair_name(elem
), dst
) != 0) {
2101 (void) no_memory(hdl
);
2115 name_or_guid_exists(zpool_handle_t
*zhp
, void *data
)
2117 importargs_t
*import
= data
;
2120 if (import
->poolname
!= NULL
) {
2123 verify(nvlist_lookup_string(zhp
->zpool_config
,
2124 ZPOOL_CONFIG_POOL_NAME
, &pool_name
) == 0);
2125 if (strcmp(pool_name
, import
->poolname
) == 0)
2130 verify(nvlist_lookup_uint64(zhp
->zpool_config
,
2131 ZPOOL_CONFIG_POOL_GUID
, &pool_guid
) == 0);
2132 if (pool_guid
== import
->guid
)
2141 zpool_search_import(libzfs_handle_t
*hdl
, importargs_t
*import
)
2143 verify(import
->poolname
== NULL
|| import
->guid
== 0);
2146 import
->exists
= zpool_iter(hdl
, name_or_guid_exists
, import
);
2148 if (import
->cachefile
!= NULL
)
2149 return (zpool_find_import_cached(hdl
, import
->cachefile
,
2150 import
->poolname
, import
->guid
));
2152 return (zpool_find_import_impl(hdl
, import
));
2156 find_guid(nvlist_t
*nv
, uint64_t guid
)
2162 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
2166 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2167 &child
, &children
) == 0) {
2168 for (c
= 0; c
< children
; c
++)
2169 if (find_guid(child
[c
], guid
))
2176 typedef struct aux_cbdata
{
2177 const char *cb_type
;
2179 zpool_handle_t
*cb_zhp
;
2183 find_aux(zpool_handle_t
*zhp
, void *data
)
2185 aux_cbdata_t
*cbp
= data
;
2191 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2194 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
2195 &list
, &count
) == 0) {
2196 for (i
= 0; i
< count
; i
++) {
2197 verify(nvlist_lookup_uint64(list
[i
],
2198 ZPOOL_CONFIG_GUID
, &guid
) == 0);
2199 if (guid
== cbp
->cb_guid
) {
2211 * Determines if the pool is in use. If so, it returns true and the state of
2212 * the pool as well as the name of the pool. Both strings are allocated and
2213 * must be freed by the caller.
2216 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
2222 uint64_t guid
, vdev_guid
;
2223 zpool_handle_t
*zhp
;
2224 nvlist_t
*pool_config
;
2225 uint64_t stateval
, isspare
;
2226 aux_cbdata_t cb
= { 0 };
2231 if (zpool_read_label(fd
, &config
, NULL
) != 0) {
2232 (void) no_memory(hdl
);
2239 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
2241 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
2244 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
2245 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
2247 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
2252 case POOL_STATE_EXPORTED
:
2254 * A pool with an exported state may in fact be imported
2255 * read-only, so check the in-core state to see if it's
2256 * active and imported read-only. If it is, set
2257 * its state to active.
2259 if (pool_active(hdl
, name
, guid
, &isactive
) == 0 && isactive
&&
2260 (zhp
= zpool_open_canfail(hdl
, name
)) != NULL
) {
2261 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_READONLY
, NULL
))
2262 stateval
= POOL_STATE_ACTIVE
;
2265 * All we needed the zpool handle for is the
2266 * readonly prop check.
2274 case POOL_STATE_ACTIVE
:
2276 * For an active pool, we have to determine if it's really part
2277 * of a currently active pool (in which case the pool will exist
2278 * and the guid will be the same), or whether it's part of an
2279 * active pool that was disconnected without being explicitly
2282 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
2283 nvlist_free(config
);
2289 * Because the device may have been removed while
2290 * offlined, we only report it as active if the vdev is
2291 * still present in the config. Otherwise, pretend like
2294 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
2295 (pool_config
= zpool_get_config(zhp
, NULL
))
2299 verify(nvlist_lookup_nvlist(pool_config
,
2300 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
2301 ret
= find_guid(nvroot
, vdev_guid
);
2307 * If this is an active spare within another pool, we
2308 * treat it like an unused hot spare. This allows the
2309 * user to create a pool with a hot spare that currently
2310 * in use within another pool. Since we return B_TRUE,
2311 * libdiskmgt will continue to prevent generic consumers
2312 * from using the device.
2314 if (ret
&& nvlist_lookup_uint64(config
,
2315 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
2316 stateval
= POOL_STATE_SPARE
;
2321 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
2326 case POOL_STATE_SPARE
:
2328 * For a hot spare, it can be either definitively in use, or
2329 * potentially active. To determine if it's in use, we iterate
2330 * over all pools in the system and search for one with a spare
2331 * with a matching guid.
2333 * Due to the shared nature of spares, we don't actually report
2334 * the potentially active case as in use. This means the user
2335 * can freely create pools on the hot spares of exported pools,
2336 * but to do otherwise makes the resulting code complicated, and
2337 * we end up having to deal with this case anyway.
2340 cb
.cb_guid
= vdev_guid
;
2341 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
2342 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
2343 name
= (char *)zpool_get_name(cb
.cb_zhp
);
2350 case POOL_STATE_L2CACHE
:
2353 * Check if any pool is currently using this l2cache device.
2356 cb
.cb_guid
= vdev_guid
;
2357 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
2358 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
2359 name
= (char *)zpool_get_name(cb
.cb_zhp
);
2372 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
2374 zpool_close(cb
.cb_zhp
);
2375 nvlist_free(config
);
2378 *state
= (pool_state_t
)stateval
;
2382 zpool_close(cb
.cb_zhp
);
2384 nvlist_free(config
);