4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
29 * Pool import support functions.
31 * To import a pool, we rely on reading the configuration information from the
32 * ZFS label of each device. If we successfully read the label, then we
33 * organize the configuration information in the following hierarchy:
35 * pool guid -> toplevel vdev guid -> label txg
37 * Duplicate entries matching this same tuple will be discarded. Once we have
38 * examined every device, we pick the best label txg config for each toplevel
39 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
40 * update any paths that have changed. Finally, we attempt to import the pool
41 * using our derived config, and record the results.
54 #include <sys/vdev_impl.h>
57 #include "libzfs_impl.h"
60 * Intermediate structures used to gather configuration information.
62 typedef struct config_entry
{
65 struct config_entry
*ce_next
;
68 typedef struct vdev_entry
{
70 config_entry_t
*ve_configs
;
71 struct vdev_entry
*ve_next
;
74 typedef struct pool_entry
{
76 vdev_entry_t
*pe_vdevs
;
77 struct pool_entry
*pe_next
;
80 typedef struct name_entry
{
83 struct name_entry
*ne_next
;
86 typedef struct pool_list
{
92 get_devid(const char *path
)
98 if ((fd
= open(path
, O_RDONLY
)) < 0)
103 if (devid_get(fd
, &devid
) == 0) {
104 if (devid_get_minor_name(fd
, &minor
) == 0)
105 ret
= devid_str_encode(devid
, minor
);
107 devid_str_free(minor
);
117 * Go through and fix up any path and/or devid information for the given vdev
121 fix_paths(nvlist_t
*nv
, name_entry_t
*names
)
126 name_entry_t
*ne
, *best
;
130 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
131 &child
, &children
) == 0) {
132 for (c
= 0; c
< children
; c
++)
133 if (fix_paths(child
[c
], names
) != 0)
139 * This is a leaf (file or disk) vdev. In either case, go through
140 * the name list and see if we find a matching guid. If so, replace
141 * the path and see if we can calculate a new devid.
143 * There may be multiple names associated with a particular guid, in
144 * which case we have overlapping slices or multiple paths to the same
145 * disk. If this is the case, then we want to pick the path that is
146 * the most similar to the original, where "most similar" is the number
147 * of matching characters starting from the end of the path. This will
148 * preserve slice numbers even if the disks have been reorganized, and
149 * will also catch preferred disk names if multiple paths exist.
151 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
152 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
157 for (ne
= names
; ne
!= NULL
; ne
= ne
->ne_next
) {
158 if (ne
->ne_guid
== guid
) {
159 const char *src
, *dst
;
167 src
= ne
->ne_name
+ strlen(ne
->ne_name
) - 1;
168 dst
= path
+ strlen(path
) - 1;
169 for (count
= 0; src
>= ne
->ne_name
&& dst
>= path
;
170 src
--, dst
--, count
++)
175 * At this point, 'count' is the number of characters
176 * matched from the end.
178 if (count
> matched
|| best
== NULL
) {
188 if (nvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, best
->ne_name
) != 0)
191 if ((devid
= get_devid(best
->ne_name
)) == NULL
) {
192 (void) nvlist_remove_all(nv
, ZPOOL_CONFIG_DEVID
);
194 if (nvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, devid
) != 0)
196 devid_str_free(devid
);
203 * Add the given configuration to the list of known devices.
206 add_config(libzfs_handle_t
*hdl
, pool_list_t
*pl
, const char *path
,
209 uint64_t pool_guid
, vdev_guid
, top_guid
, txg
, state
;
216 * If this is a hot spare not currently in use or level 2 cache
217 * device, add it to the list of names to translate, but don't do
220 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
222 (state
== POOL_STATE_SPARE
|| state
== POOL_STATE_L2CACHE
) &&
223 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
, &vdev_guid
) == 0) {
224 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
227 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
231 ne
->ne_guid
= vdev_guid
;
232 ne
->ne_next
= pl
->names
;
238 * If we have a valid config but cannot read any of these fields, then
239 * it means we have a half-initialized label. In vdev_label_init()
240 * we write a label with txg == 0 so that we can identify the device
241 * in case the user refers to the same disk later on. If we fail to
242 * create the pool, we'll be left with a label in this state
243 * which should not be considered part of a valid pool.
245 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
247 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
249 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
251 nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_TXG
,
252 &txg
) != 0 || txg
== 0) {
258 * First, see if we know about this pool. If not, then add it to the
259 * list of known pools.
261 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
262 if (pe
->pe_guid
== pool_guid
)
267 if ((pe
= zfs_alloc(hdl
, sizeof (pool_entry_t
))) == NULL
) {
271 pe
->pe_guid
= pool_guid
;
272 pe
->pe_next
= pl
->pools
;
277 * Second, see if we know about this toplevel vdev. Add it if its
280 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
281 if (ve
->ve_guid
== top_guid
)
286 if ((ve
= zfs_alloc(hdl
, sizeof (vdev_entry_t
))) == NULL
) {
290 ve
->ve_guid
= top_guid
;
291 ve
->ve_next
= pe
->pe_vdevs
;
296 * Third, see if we have a config with a matching transaction group. If
297 * so, then we do nothing. Otherwise, add it to the list of known
300 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= ce
->ce_next
) {
301 if (ce
->ce_txg
== txg
)
306 if ((ce
= zfs_alloc(hdl
, sizeof (config_entry_t
))) == NULL
) {
311 ce
->ce_config
= config
;
312 ce
->ce_next
= ve
->ve_configs
;
319 * At this point we've successfully added our config to the list of
320 * known configs. The last thing to do is add the vdev guid -> path
321 * mappings so that we can fix up the configuration as necessary before
324 if ((ne
= zfs_alloc(hdl
, sizeof (name_entry_t
))) == NULL
)
327 if ((ne
->ne_name
= zfs_strdup(hdl
, path
)) == NULL
) {
332 ne
->ne_guid
= vdev_guid
;
333 ne
->ne_next
= pl
->names
;
340 * Returns true if the named pool matches the given GUID.
343 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
349 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
357 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
362 *isactive
= (theguid
== guid
);
367 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
370 zfs_cmd_t zc
= { 0 };
373 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
376 if (zcmd_alloc_dst_nvlist(hdl
, &zc
,
377 zc
.zc_nvlist_conf_size
* 2) != 0) {
378 zcmd_free_nvlists(&zc
);
382 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
383 &zc
)) != 0 && errno
== ENOMEM
) {
384 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
385 zcmd_free_nvlists(&zc
);
391 (void) zpool_standard_error(hdl
, errno
,
392 dgettext(TEXT_DOMAIN
, "cannot discover pools"));
393 zcmd_free_nvlists(&zc
);
397 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
398 zcmd_free_nvlists(&zc
);
402 zcmd_free_nvlists(&zc
);
407 * Convert our list of pools into the definitive set of configurations. We
408 * start by picking the best config for each toplevel vdev. Once that's done,
409 * we assemble the toplevel vdevs into a full config for the pool. We make a
410 * pass to fix up any incorrect paths, and then add it to the main list to
411 * return to the user.
414 get_configs(libzfs_handle_t
*hdl
, pool_list_t
*pl
, boolean_t active_ok
)
419 nvlist_t
*ret
= NULL
, *config
= NULL
, *tmp
, *nvtop
, *nvroot
;
420 nvlist_t
**spares
, **l2cache
;
421 uint_t i
, nspares
, nl2cache
;
422 boolean_t config_seen
;
424 char *name
, *hostname
;
425 uint64_t version
, guid
;
427 nvlist_t
**child
= NULL
;
432 boolean_t found_one
= B_FALSE
;
434 if (nvlist_alloc(&ret
, 0, 0) != 0)
437 for (pe
= pl
->pools
; pe
!= NULL
; pe
= pe
->pe_next
) {
440 if (nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) != 0)
442 config_seen
= B_FALSE
;
445 * Iterate over all toplevel vdevs. Grab the pool configuration
446 * from the first one we find, and then go through the rest and
447 * add them as necessary to the 'vdevs' member of the config.
449 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= ve
->ve_next
) {
452 * Determine the best configuration for this vdev by
453 * selecting the config with the latest transaction
457 for (ce
= ve
->ve_configs
; ce
!= NULL
;
460 if (ce
->ce_txg
> best_txg
) {
462 best_txg
= ce
->ce_txg
;
468 * Copy the relevant pieces of data to the pool
475 * hostid (if available)
476 * hostname (if available)
480 verify(nvlist_lookup_uint64(tmp
,
481 ZPOOL_CONFIG_VERSION
, &version
) == 0);
482 if (nvlist_add_uint64(config
,
483 ZPOOL_CONFIG_VERSION
, version
) != 0)
485 verify(nvlist_lookup_uint64(tmp
,
486 ZPOOL_CONFIG_POOL_GUID
, &guid
) == 0);
487 if (nvlist_add_uint64(config
,
488 ZPOOL_CONFIG_POOL_GUID
, guid
) != 0)
490 verify(nvlist_lookup_string(tmp
,
491 ZPOOL_CONFIG_POOL_NAME
, &name
) == 0);
492 if (nvlist_add_string(config
,
493 ZPOOL_CONFIG_POOL_NAME
, name
) != 0)
495 verify(nvlist_lookup_uint64(tmp
,
496 ZPOOL_CONFIG_POOL_STATE
, &state
) == 0);
497 if (nvlist_add_uint64(config
,
498 ZPOOL_CONFIG_POOL_STATE
, state
) != 0)
501 if (nvlist_lookup_uint64(tmp
,
502 ZPOOL_CONFIG_HOSTID
, &hostid
) == 0) {
503 if (nvlist_add_uint64(config
,
504 ZPOOL_CONFIG_HOSTID
, hostid
) != 0)
506 verify(nvlist_lookup_string(tmp
,
507 ZPOOL_CONFIG_HOSTNAME
,
509 if (nvlist_add_string(config
,
510 ZPOOL_CONFIG_HOSTNAME
,
515 config_seen
= B_TRUE
;
519 * Add this top-level vdev to the child array.
521 verify(nvlist_lookup_nvlist(tmp
,
522 ZPOOL_CONFIG_VDEV_TREE
, &nvtop
) == 0);
523 verify(nvlist_lookup_uint64(nvtop
, ZPOOL_CONFIG_ID
,
525 if (id
>= children
) {
528 newchild
= zfs_alloc(hdl
, (id
+ 1) *
529 sizeof (nvlist_t
*));
530 if (newchild
== NULL
)
533 for (c
= 0; c
< children
; c
++)
534 newchild
[c
] = child
[c
];
540 if (nvlist_dup(nvtop
, &child
[id
], 0) != 0)
545 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
549 * Look for any missing top-level vdevs. If this is the case,
550 * create a faked up 'missing' vdev as a placeholder. We cannot
551 * simply compress the child array, because the kernel performs
552 * certain checks to make sure the vdev IDs match their location
553 * in the configuration.
555 for (c
= 0; c
< children
; c
++)
556 if (child
[c
] == NULL
) {
558 if (nvlist_alloc(&missing
, NV_UNIQUE_NAME
,
561 if (nvlist_add_string(missing
,
563 VDEV_TYPE_MISSING
) != 0 ||
564 nvlist_add_uint64(missing
,
565 ZPOOL_CONFIG_ID
, c
) != 0 ||
566 nvlist_add_uint64(missing
,
567 ZPOOL_CONFIG_GUID
, 0ULL) != 0) {
568 nvlist_free(missing
);
575 * Put all of this pool's top-level vdevs into a root vdev.
577 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0)
579 if (nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
,
580 VDEV_TYPE_ROOT
) != 0 ||
581 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_ID
, 0ULL) != 0 ||
582 nvlist_add_uint64(nvroot
, ZPOOL_CONFIG_GUID
, guid
) != 0 ||
583 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
584 child
, children
) != 0) {
589 for (c
= 0; c
< children
; c
++)
590 nvlist_free(child
[c
]);
596 * Go through and fix up any paths and/or devids based on our
597 * known list of vdev GUID -> path mappings.
599 if (fix_paths(nvroot
, pl
->names
) != 0) {
605 * Add the root vdev to this pool's configuration.
607 if (nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
615 * zdb uses this path to report on active pools that were
616 * imported or created using -R.
622 * Determine if this pool is currently active, in which case we
623 * can't actually import it.
625 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
627 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
630 if (pool_active(hdl
, name
, guid
, &isactive
) != 0)
639 if ((nvl
= refresh_config(hdl
, config
)) == NULL
)
646 * Go through and update the paths for spares, now that we have
649 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
651 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
652 &spares
, &nspares
) == 0) {
653 for (i
= 0; i
< nspares
; i
++) {
654 if (fix_paths(spares
[i
], pl
->names
) != 0)
660 * Update the paths for l2cache devices.
662 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
663 &l2cache
, &nl2cache
) == 0) {
664 for (i
= 0; i
< nl2cache
; i
++) {
665 if (fix_paths(l2cache
[i
], pl
->names
) != 0)
671 * Restore the original information read from the actual label.
673 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTID
,
675 (void) nvlist_remove(config
, ZPOOL_CONFIG_HOSTNAME
,
678 verify(nvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
,
680 verify(nvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
,
686 * Add this pool to the list of configs.
688 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
690 if (nvlist_add_nvlist(ret
, name
, config
) != 0)
706 (void) no_memory(hdl
);
710 for (c
= 0; c
< children
; c
++)
711 nvlist_free(child
[c
]);
718 * Return the offset of the given label.
721 label_offset(uint64_t size
, int l
)
723 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
724 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
725 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
729 * Given a file descriptor, read the label information and return an nvlist
730 * describing the configuration, if there is one.
733 zpool_read_label(int fd
, nvlist_t
**config
)
735 struct stat64 statbuf
;
738 uint64_t state
, txg
, size
;
742 if (fstat64(fd
, &statbuf
) == -1)
744 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
746 if ((label
= malloc(sizeof (vdev_label_t
))) == NULL
)
749 for (l
= 0; l
< VDEV_LABELS
; l
++) {
750 if (pread64(fd
, label
, sizeof (vdev_label_t
),
751 label_offset(size
, l
)) != sizeof (vdev_label_t
))
754 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
755 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0)
758 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
759 &state
) != 0 || state
> POOL_STATE_L2CACHE
) {
760 nvlist_free(*config
);
764 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
765 (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
766 &txg
) != 0 || txg
== 0)) {
767 nvlist_free(*config
);
781 * Given a list of directories to search, find all pools stored on disk. This
782 * includes partial pools which are not available to import. If no args are
783 * given (argc is 0), then the default directory (/dev/dsk) is searched.
784 * poolname or guid (but not both) are provided by the caller when trying
785 * to import a specific pool.
788 zpool_find_import_impl(libzfs_handle_t
*hdl
, int argc
, char **argv
,
789 boolean_t active_ok
, char *poolname
, uint64_t guid
)
794 char path
[MAXPATHLEN
];
797 struct stat64 statbuf
;
798 nvlist_t
*ret
= NULL
, *config
;
799 static char *default_dir
= "/dev/dsk";
801 pool_list_t pools
= { 0 };
802 pool_entry_t
*pe
, *penext
;
803 vdev_entry_t
*ve
, *venext
;
804 config_entry_t
*ce
, *cenext
;
805 name_entry_t
*ne
, *nenext
;
807 verify(poolname
== NULL
|| guid
== 0);
815 * Go through and read the label configuration information from every
816 * possible device, organizing the information according to pool GUID
819 for (i
= 0; i
< argc
; i
++) {
823 /* use realpath to normalize the path */
824 if (realpath(argv
[i
], path
) == 0) {
825 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
826 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
830 end
= &path
[strlen(path
)];
833 pathleft
= &path
[sizeof (path
)] - end
;
836 * Using raw devices instead of block devices when we're
837 * reading the labels skips a bunch of slow operations during
838 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
840 if (strcmp(path
, "/dev/dsk/") == 0)
845 if ((dfd
= open64(rdsk
, O_RDONLY
)) < 0 ||
846 (dirp
= fdopendir(dfd
)) == NULL
) {
847 zfs_error_aux(hdl
, strerror(errno
));
848 (void) zfs_error_fmt(hdl
, EZFS_BADPATH
,
849 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
855 * This is not MT-safe, but we have no MT consumers of libzfs
857 while ((dp
= readdir64(dirp
)) != NULL
) {
858 const char *name
= dp
->d_name
;
859 if (name
[0] == '.' &&
860 (name
[1] == 0 || (name
[1] == '.' && name
[2] == 0)))
863 (void)snprintf(path
, sizeof (path
), "%s/%s",
866 if ((fd
= open(path
, O_RDONLY
)) < 0)
870 * Ignore failed stats. We only want regular
871 * files, character devs and block devs.
873 if (fstat64(fd
, &statbuf
) != 0 ||
874 (!S_ISREG(statbuf
.st_mode
) &&
875 !S_ISCHR(statbuf
.st_mode
) &&
876 !S_ISBLK(statbuf
.st_mode
))) {
881 if ((zpool_read_label(fd
, &config
)) != 0) {
883 (void) no_memory(hdl
);
889 if (config
!= NULL
) {
890 boolean_t matched
= B_TRUE
;
892 if (poolname
!= NULL
) {
895 matched
= nvlist_lookup_string(config
,
896 ZPOOL_CONFIG_POOL_NAME
,
898 strcmp(poolname
, pname
) == 0;
899 } else if (guid
!= 0) {
902 matched
= nvlist_lookup_uint64(config
,
903 ZPOOL_CONFIG_POOL_GUID
,
912 /* use the non-raw path for the config */
913 (void) strlcpy(end
, name
, pathleft
);
914 if (add_config(hdl
, &pools
, path
, config
) != 0)
919 (void) closedir(dirp
);
923 ret
= get_configs(hdl
, &pools
, active_ok
);
926 for (pe
= pools
.pools
; pe
!= NULL
; pe
= penext
) {
927 penext
= pe
->pe_next
;
928 for (ve
= pe
->pe_vdevs
; ve
!= NULL
; ve
= venext
) {
929 venext
= ve
->ve_next
;
930 for (ce
= ve
->ve_configs
; ce
!= NULL
; ce
= cenext
) {
931 cenext
= ce
->ce_next
;
933 nvlist_free(ce
->ce_config
);
941 for (ne
= pools
.names
; ne
!= NULL
; ne
= nenext
) {
942 nenext
= ne
->ne_next
;
949 (void) closedir(dirp
);
955 zpool_find_import(libzfs_handle_t
*hdl
, int argc
, char **argv
)
957 return (zpool_find_import_impl(hdl
, argc
, argv
, B_FALSE
, NULL
, 0));
961 zpool_find_import_byname(libzfs_handle_t
*hdl
, int argc
, char **argv
,
964 return (zpool_find_import_impl(hdl
, argc
, argv
, B_FALSE
, pool
, 0));
968 zpool_find_import_byguid(libzfs_handle_t
*hdl
, int argc
, char **argv
,
971 return (zpool_find_import_impl(hdl
, argc
, argv
, B_FALSE
, NULL
, guid
));
975 zpool_find_import_activeok(libzfs_handle_t
*hdl
, int argc
, char **argv
)
977 return (zpool_find_import_impl(hdl
, argc
, argv
, B_TRUE
, NULL
, 0));
981 * Given a cache file, return the contents as a list of importable pools.
982 * poolname or guid (but not both) are provided by the caller when trying
983 * to import a specific pool.
986 zpool_find_import_cached(libzfs_handle_t
*hdl
, const char *cachefile
,
987 char *poolname
, uint64_t guid
)
991 struct stat64 statbuf
;
992 nvlist_t
*raw
, *src
, *dst
;
999 verify(poolname
== NULL
|| guid
== 0);
1001 if ((fd
= open(cachefile
, O_RDONLY
)) < 0) {
1002 zfs_error_aux(hdl
, "%s", strerror(errno
));
1003 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1004 dgettext(TEXT_DOMAIN
, "failed to open cache file"));
1008 if (fstat64(fd
, &statbuf
) != 0) {
1009 zfs_error_aux(hdl
, "%s", strerror(errno
));
1011 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1012 dgettext(TEXT_DOMAIN
, "failed to get size of cache file"));
1016 if ((buf
= zfs_alloc(hdl
, statbuf
.st_size
)) == NULL
) {
1021 if (read(fd
, buf
, statbuf
.st_size
) != statbuf
.st_size
) {
1024 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1025 dgettext(TEXT_DOMAIN
,
1026 "failed to read cache file contents"));
1032 if (nvlist_unpack(buf
, statbuf
.st_size
, &raw
, 0) != 0) {
1034 (void) zfs_error(hdl
, EZFS_BADCACHE
,
1035 dgettext(TEXT_DOMAIN
,
1036 "invalid or corrupt cache file contents"));
1043 * Go through and get the current state of the pools and refresh their
1046 if (nvlist_alloc(&pools
, 0, 0) != 0) {
1047 (void) no_memory(hdl
);
1053 while ((elem
= nvlist_next_nvpair(raw
, elem
)) != NULL
) {
1054 verify(nvpair_value_nvlist(elem
, &src
) == 0);
1056 verify(nvlist_lookup_string(src
, ZPOOL_CONFIG_POOL_NAME
,
1058 if (poolname
!= NULL
&& strcmp(poolname
, name
) != 0)
1061 verify(nvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
,
1064 verify(nvlist_lookup_uint64(src
, ZPOOL_CONFIG_POOL_GUID
,
1066 if (guid
!= this_guid
)
1070 if (pool_active(hdl
, name
, this_guid
, &active
) != 0) {
1079 if ((dst
= refresh_config(hdl
, src
)) == NULL
) {
1085 if (nvlist_add_nvlist(pools
, nvpair_name(elem
), dst
) != 0) {
1086 (void) no_memory(hdl
);
1101 find_guid(nvlist_t
*nv
, uint64_t guid
)
1107 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
1111 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1112 &child
, &children
) == 0) {
1113 for (c
= 0; c
< children
; c
++)
1114 if (find_guid(child
[c
], guid
))
1121 typedef struct aux_cbdata
{
1122 const char *cb_type
;
1124 zpool_handle_t
*cb_zhp
;
1128 find_aux(zpool_handle_t
*zhp
, void *data
)
1130 aux_cbdata_t
*cbp
= data
;
1136 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1139 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
1140 &list
, &count
) == 0) {
1141 for (i
= 0; i
< count
; i
++) {
1142 verify(nvlist_lookup_uint64(list
[i
],
1143 ZPOOL_CONFIG_GUID
, &guid
) == 0);
1144 if (guid
== cbp
->cb_guid
) {
1156 * Determines if the pool is in use. If so, it returns true and the state of
1157 * the pool as well as the name of the pool. Both strings are allocated and
1158 * must be freed by the caller.
1161 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
1167 uint64_t guid
, vdev_guid
;
1168 zpool_handle_t
*zhp
;
1169 nvlist_t
*pool_config
;
1170 uint64_t stateval
, isspare
;
1171 aux_cbdata_t cb
= { 0 };
1176 if (zpool_read_label(fd
, &config
) != 0) {
1177 (void) no_memory(hdl
);
1184 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
1186 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
1189 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
1190 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1192 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1197 case POOL_STATE_EXPORTED
:
1201 case POOL_STATE_ACTIVE
:
1203 * For an active pool, we have to determine if it's really part
1204 * of a currently active pool (in which case the pool will exist
1205 * and the guid will be the same), or whether it's part of an
1206 * active pool that was disconnected without being explicitly
1209 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
1210 nvlist_free(config
);
1216 * Because the device may have been removed while
1217 * offlined, we only report it as active if the vdev is
1218 * still present in the config. Otherwise, pretend like
1221 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
1222 (pool_config
= zpool_get_config(zhp
, NULL
))
1226 verify(nvlist_lookup_nvlist(pool_config
,
1227 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1228 ret
= find_guid(nvroot
, vdev_guid
);
1234 * If this is an active spare within another pool, we
1235 * treat it like an unused hot spare. This allows the
1236 * user to create a pool with a hot spare that currently
1237 * in use within another pool. Since we return B_TRUE,
1238 * libdiskmgt will continue to prevent generic consumers
1239 * from using the device.
1241 if (ret
&& nvlist_lookup_uint64(config
,
1242 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
1243 stateval
= POOL_STATE_SPARE
;
1248 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
1253 case POOL_STATE_SPARE
:
1255 * For a hot spare, it can be either definitively in use, or
1256 * potentially active. To determine if it's in use, we iterate
1257 * over all pools in the system and search for one with a spare
1258 * with a matching guid.
1260 * Due to the shared nature of spares, we don't actually report
1261 * the potentially active case as in use. This means the user
1262 * can freely create pools on the hot spares of exported pools,
1263 * but to do otherwise makes the resulting code complicated, and
1264 * we end up having to deal with this case anyway.
1267 cb
.cb_guid
= vdev_guid
;
1268 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
1269 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1270 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1277 case POOL_STATE_L2CACHE
:
1280 * Check if any pool is currently using this l2cache device.
1283 cb
.cb_guid
= vdev_guid
;
1284 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
1285 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
1286 name
= (char *)zpool_get_name(cb
.cb_zhp
);
1299 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
1301 zpool_close(cb
.cb_zhp
);
1302 nvlist_free(config
);
1305 *state
= (pool_state_t
)stateval
;
1309 zpool_close(cb
.cb_zhp
);
1311 nvlist_free(config
);