4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
46 #include "zfs_namecheck.h"
48 #include "libzfs_impl.h"
50 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
52 #if defined(__i386) || defined(__amd64)
53 #define BOOTCMD "installgrub(1M)"
55 #define BOOTCMD "installboot(1M)"
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t
*zhp
)
68 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
70 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
72 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
75 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
76 if (errno
== ENOMEM
) {
77 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
78 zcmd_free_nvlists(&zc
);
82 zcmd_free_nvlists(&zc
);
87 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
88 zcmd_free_nvlists(&zc
);
92 zcmd_free_nvlists(&zc
);
98 zpool_props_refresh(zpool_handle_t
*zhp
)
102 old_props
= zhp
->zpool_props
;
104 if (zpool_get_all_props(zhp
) != 0)
107 nvlist_free(old_props
);
112 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
118 zprop_source_t source
;
120 nvl
= zhp
->zpool_props
;
121 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
122 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
124 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
126 source
= ZPROP_SRC_DEFAULT
;
127 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
138 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
142 zprop_source_t source
;
144 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop
== ZPOOL_PROP_GUID
) &&
151 (nvlist_lookup_nvlist(zhp
->zpool_config
,
152 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
153 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
157 return (zpool_prop_default_numeric(prop
));
160 nvl
= zhp
->zpool_props
;
161 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
162 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
164 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
166 source
= ZPROP_SRC_DEFAULT
;
167 value
= zpool_prop_default_numeric(prop
);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
183 case VDEV_STATE_CLOSED
:
184 case VDEV_STATE_OFFLINE
:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED
:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN
:
189 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
190 return (gettext("FAULTED"));
192 return (gettext("UNAVAIL"));
193 case VDEV_STATE_FAULTED
:
194 return (gettext("FAULTED"));
195 case VDEV_STATE_DEGRADED
:
196 return (gettext("DEGRADED"));
197 case VDEV_STATE_HEALTHY
:
198 return (gettext("ONLINE"));
201 return (gettext("UNKNOWN"));
205 * Get a zpool property value for 'prop' and return the value in
206 * a pre-allocated buffer.
209 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
210 zprop_source_t
*srctype
)
214 zprop_source_t src
= ZPROP_SRC_NONE
;
219 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
220 if (prop
== ZPOOL_PROP_NAME
)
221 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
222 else if (prop
== ZPOOL_PROP_HEALTH
)
223 (void) strlcpy(buf
, "FAULTED", len
);
225 (void) strlcpy(buf
, "-", len
);
229 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
230 prop
!= ZPOOL_PROP_NAME
)
233 switch (zpool_prop_get_type(prop
)) {
234 case PROP_TYPE_STRING
:
235 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
239 case PROP_TYPE_NUMBER
:
240 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
243 case ZPOOL_PROP_SIZE
:
244 case ZPOOL_PROP_USED
:
245 case ZPOOL_PROP_AVAILABLE
:
246 (void) zfs_nicenum(intval
, buf
, len
);
249 case ZPOOL_PROP_CAPACITY
:
250 (void) snprintf(buf
, len
, "%llu%%",
251 (u_longlong_t
)intval
);
254 case ZPOOL_PROP_HEALTH
:
255 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
256 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
257 verify(nvlist_lookup_uint64_array(nvroot
,
258 ZPOOL_CONFIG_STATS
, (uint64_t **)&vs
, &vsc
) == 0);
260 (void) strlcpy(buf
, zpool_state_to_name(intval
,
264 (void) snprintf(buf
, len
, "%llu", intval
);
268 case PROP_TYPE_INDEX
:
269 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
270 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
273 (void) strlcpy(buf
, strval
, len
);
287 * Check if the bootfs name has the same pool name as it is set to.
288 * Assuming bootfs is a valid dataset name.
291 bootfs_name_valid(const char *pool
, char *bootfs
)
293 int len
= strlen(pool
);
295 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
298 if (strncmp(pool
, bootfs
, len
) == 0 &&
299 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
306 * Inspect the configuration to determine if any of the devices contain
310 pool_uses_efi(nvlist_t
*config
)
315 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
316 &child
, &children
) != 0)
317 return (read_efi_label(config
, NULL
) >= 0);
319 for (c
= 0; c
< children
; c
++) {
320 if (pool_uses_efi(child
[c
]))
327 pool_is_bootable(zpool_handle_t
*zhp
)
329 char bootfs
[ZPOOL_MAXNAMELEN
];
331 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
332 sizeof (bootfs
), NULL
) == 0 && strncmp(bootfs
, "-",
333 sizeof (bootfs
)) != 0);
338 * Given an nvlist of zpool properties to be set, validate that they are
339 * correct, and parse any numeric properties (index, boolean, etc) if they are
340 * specified as strings.
343 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
344 nvlist_t
*props
, uint64_t version
, boolean_t create_or_import
, char *errbuf
)
352 struct stat64 statbuf
;
356 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
357 (void) no_memory(hdl
);
362 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
363 const char *propname
= nvpair_name(elem
);
366 * Make sure this property is valid and applies to this type.
368 if ((prop
= zpool_name_to_prop(propname
)) == ZPROP_INVAL
) {
369 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
370 "invalid property '%s'"), propname
);
371 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
375 if (zpool_prop_readonly(prop
)) {
376 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
377 "is readonly"), propname
);
378 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
382 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
383 &strval
, &intval
, errbuf
) != 0)
387 * Perform additional checking for specific properties.
390 case ZPOOL_PROP_VERSION
:
391 if (intval
< version
|| intval
> SPA_VERSION
) {
392 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
393 "property '%s' number %d is invalid."),
395 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
400 case ZPOOL_PROP_BOOTFS
:
401 if (create_or_import
) {
402 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
403 "property '%s' cannot be set at creation "
404 "or import time"), propname
);
405 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
409 if (version
< SPA_VERSION_BOOTFS
) {
410 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
411 "pool must be upgraded to support "
412 "'%s' property"), propname
);
413 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
418 * bootfs property value has to be a dataset name and
419 * the dataset has to be in the same pool as it sets to.
421 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
423 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
424 "is an invalid name"), strval
);
425 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
429 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
430 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
431 "could not open pool '%s'"), poolname
);
432 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
435 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
436 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
439 * bootfs property cannot be set on a disk which has
442 if (pool_uses_efi(nvroot
)) {
443 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
444 "property '%s' not supported on "
445 "EFI labeled devices"), propname
);
446 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
453 case ZPOOL_PROP_ALTROOT
:
454 if (!create_or_import
) {
455 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
456 "property '%s' can only be set during pool "
457 "creation or import"), propname
);
458 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
462 if (strval
[0] != '/') {
463 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
464 "bad alternate root '%s'"), strval
);
465 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
470 case ZPOOL_PROP_CACHEFILE
:
471 if (strval
[0] == '\0')
474 if (strcmp(strval
, "none") == 0)
477 if (strval
[0] != '/') {
478 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
479 "property '%s' must be empty, an "
480 "absolute path, or 'none'"), propname
);
481 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
485 slash
= strrchr(strval
, '/');
487 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
488 strcmp(slash
, "/..") == 0) {
489 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
490 "'%s' is not a valid file"), strval
);
491 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
497 if (strval
[0] != '\0' &&
498 (stat64(strval
, &statbuf
) != 0 ||
499 !S_ISDIR(statbuf
.st_mode
))) {
500 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
501 "'%s' is not a valid directory"),
503 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
514 nvlist_free(retprops
);
519 * Set zpool property : propname=propval.
522 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
524 zfs_cmd_t zc
= { 0 };
527 nvlist_t
*nvl
= NULL
;
531 (void) snprintf(errbuf
, sizeof (errbuf
),
532 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
535 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
))
536 return (zfs_error(zhp
->zpool_hdl
, EZFS_POOLPROPS
, errbuf
));
538 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
539 return (no_memory(zhp
->zpool_hdl
));
541 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
543 return (no_memory(zhp
->zpool_hdl
));
546 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
547 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
548 zhp
->zpool_name
, nvl
, version
, B_FALSE
, errbuf
)) == NULL
) {
557 * Execute the corresponding ioctl() to set this property.
559 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
561 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
566 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
568 zcmd_free_nvlists(&zc
);
572 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
574 (void) zpool_props_refresh(zhp
);
580 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
582 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
584 char buf
[ZFS_MAXPROPLEN
];
586 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
589 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
594 if (entry
->pl_prop
!= ZPROP_INVAL
&&
595 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
597 if (strlen(buf
) > entry
->pl_width
)
598 entry
->pl_width
= strlen(buf
);
607 * Validate the given pool name, optionally putting an extended error message in
611 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
617 ret
= pool_namecheck(pool
, &why
, &what
);
620 * The rules for reserved pool names were extended at a later point.
621 * But we need to support users with existing pools that may now be
622 * invalid. So we only check for this expanded set of names during a
623 * create (or import), and only in userland.
625 if (ret
== 0 && !isopen
&&
626 (strncmp(pool
, "mirror", 6) == 0 ||
627 strncmp(pool
, "raidz", 5) == 0 ||
628 strncmp(pool
, "spare", 5) == 0 ||
629 strcmp(pool
, "log") == 0)) {
632 dgettext(TEXT_DOMAIN
, "name is reserved"));
640 case NAME_ERR_TOOLONG
:
642 dgettext(TEXT_DOMAIN
, "name is too long"));
645 case NAME_ERR_INVALCHAR
:
647 dgettext(TEXT_DOMAIN
, "invalid character "
648 "'%c' in pool name"), what
);
651 case NAME_ERR_NOLETTER
:
652 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
653 "name must begin with a letter"));
656 case NAME_ERR_RESERVED
:
657 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
658 "name is reserved"));
661 case NAME_ERR_DISKLIKE
:
662 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
663 "pool name is reserved"));
666 case NAME_ERR_LEADING_SLASH
:
667 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
668 "leading slash in name"));
671 case NAME_ERR_EMPTY_COMPONENT
:
672 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
673 "empty component in name"));
676 case NAME_ERR_TRAILING_SLASH
:
677 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
678 "trailing slash in name"));
681 case NAME_ERR_MULTIPLE_AT
:
682 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
683 "multiple '@' delimiters in name"));
695 * Open a handle to the given pool, even if the pool is currently in the FAULTED
699 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
705 * Make sure the pool name is valid.
707 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
708 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
709 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
714 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
717 zhp
->zpool_hdl
= hdl
;
718 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
720 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
726 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
727 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
728 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
737 * Like the above, but silent on error. Used when iterating over pools (because
738 * the configuration cache may be out of date).
741 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
746 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
749 zhp
->zpool_hdl
= hdl
;
750 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
752 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
768 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
772 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
776 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
779 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
780 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
781 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
790 * Close the handle. Simply frees the memory associated with the handle.
793 zpool_close(zpool_handle_t
*zhp
)
795 if (zhp
->zpool_config
)
796 nvlist_free(zhp
->zpool_config
);
797 if (zhp
->zpool_old_config
)
798 nvlist_free(zhp
->zpool_old_config
);
799 if (zhp
->zpool_props
)
800 nvlist_free(zhp
->zpool_props
);
805 * Return the name of the pool.
808 zpool_get_name(zpool_handle_t
*zhp
)
810 return (zhp
->zpool_name
);
815 * Return the state of the pool (ACTIVE or UNAVAILABLE)
818 zpool_get_state(zpool_handle_t
*zhp
)
820 return (zhp
->zpool_state
);
824 * Create the named pool, using the provided vdev list. It is assumed
825 * that the consumer has already validated the contents of the nvlist, so we
826 * don't have to worry about error semantics.
829 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
830 nvlist_t
*props
, nvlist_t
*fsprops
)
832 zfs_cmd_t zc
= { 0 };
833 nvlist_t
*zc_fsprops
= NULL
;
834 nvlist_t
*zc_props
= NULL
;
839 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
840 "cannot create '%s'"), pool
);
842 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
843 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
845 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
849 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
850 SPA_VERSION_1
, B_TRUE
, msg
)) == NULL
) {
859 zoned
= ((nvlist_lookup_string(fsprops
,
860 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
861 strcmp(zonestr
, "on") == 0);
863 if ((zc_fsprops
= zfs_valid_proplist(hdl
,
864 ZFS_TYPE_FILESYSTEM
, fsprops
, zoned
, NULL
, msg
)) == NULL
) {
868 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
871 if (nvlist_add_nvlist(zc_props
,
872 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
877 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
880 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
882 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
884 zcmd_free_nvlists(&zc
);
885 nvlist_free(zc_props
);
886 nvlist_free(zc_fsprops
);
891 * This can happen if the user has specified the same
892 * device multiple times. We can't reliably detect this
893 * until we try to add it and see we already have a
896 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
897 "one or more vdevs refer to the same device"));
898 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
902 * This occurs when one of the devices is below
903 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
904 * device was the problem device since there's no
905 * reliable way to determine device size from userland.
910 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
912 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
913 "one or more devices is less than the "
914 "minimum size (%s)"), buf
);
916 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
919 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
920 "one or more devices is out of space"));
921 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
924 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
925 "cache device must be a disk or disk slice"));
926 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
929 return (zpool_standard_error(hdl
, errno
, msg
));
934 * If this is an alternate root pool, then we automatically set the
935 * mountpoint of the root dataset to be '/'.
937 if (nvlist_lookup_string(props
, zpool_prop_to_name(ZPOOL_PROP_ALTROOT
),
941 verify((zhp
= zfs_open(hdl
, pool
, ZFS_TYPE_DATASET
)) != NULL
);
942 verify(zfs_prop_set(zhp
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
),
949 zcmd_free_nvlists(&zc
);
950 nvlist_free(zc_props
);
951 nvlist_free(zc_fsprops
);
956 * Destroy the given pool. It is up to the caller to ensure that there are no
957 * datasets left in the pool.
960 zpool_destroy(zpool_handle_t
*zhp
)
962 zfs_cmd_t zc
= { 0 };
963 zfs_handle_t
*zfp
= NULL
;
964 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
967 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
968 (zfp
= zfs_open(zhp
->zpool_hdl
, zhp
->zpool_name
,
969 ZFS_TYPE_FILESYSTEM
)) == NULL
)
972 if (zpool_remove_zvol_links(zhp
) != 0)
975 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
977 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
978 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
979 "cannot destroy '%s'"), zhp
->zpool_name
);
981 if (errno
== EROFS
) {
982 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
983 "one or more devices is read only"));
984 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
986 (void) zpool_standard_error(hdl
, errno
, msg
);
995 remove_mountpoint(zfp
);
1003 * Add the given vdevs to the pool. The caller must have already performed the
1004 * necessary verification to ensure that the vdev specification is well-formed.
1007 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1009 zfs_cmd_t zc
= { 0 };
1011 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1013 nvlist_t
**spares
, **l2cache
;
1014 uint_t nspares
, nl2cache
;
1016 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1017 "cannot add to '%s'"), zhp
->zpool_name
);
1019 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1020 SPA_VERSION_SPARES
&&
1021 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1022 &spares
, &nspares
) == 0) {
1023 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1024 "upgraded to add hot spares"));
1025 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1028 if (pool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1029 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1032 for (s
= 0; s
< nspares
; s
++) {
1035 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1036 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1037 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1038 "device '%s' contains an EFI label and "
1039 "cannot be used on root pools."),
1040 zpool_vdev_name(hdl
, NULL
, spares
[s
]));
1041 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1046 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1047 SPA_VERSION_L2CACHE
&&
1048 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1049 &l2cache
, &nl2cache
) == 0) {
1050 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1051 "upgraded to add cache devices"));
1052 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1055 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1057 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1059 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1063 * This can happen if the user has specified the same
1064 * device multiple times. We can't reliably detect this
1065 * until we try to add it and see we already have a
1068 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1069 "one or more vdevs refer to the same device"));
1070 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1075 * This occurrs when one of the devices is below
1076 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1077 * device was the problem device since there's no
1078 * reliable way to determine device size from userland.
1083 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1085 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1086 "device is less than the minimum "
1089 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1093 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1094 "pool must be upgraded to add these vdevs"));
1095 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1099 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1100 "root pool can not have multiple vdevs"
1101 " or separate logs"));
1102 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
);
1106 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1107 "cache device must be a disk or disk slice"));
1108 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1112 (void) zpool_standard_error(hdl
, errno
, msg
);
1120 zcmd_free_nvlists(&zc
);
1126 * Exports the pool from the system. The caller must ensure that there are no
1127 * mounted datasets in the pool.
1130 zpool_export(zpool_handle_t
*zhp
, boolean_t force
)
1132 zfs_cmd_t zc
= { 0 };
1135 if (zpool_remove_zvol_links(zhp
) != 0)
1138 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1139 "cannot export '%s'"), zhp
->zpool_name
);
1141 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1142 zc
.zc_cookie
= force
;
1144 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1147 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1148 "use '-f' to override the following errors:\n"
1149 "'%s' has an active shared spare which could be"
1150 " used by other pools once '%s' is exported."),
1151 zhp
->zpool_name
, zhp
->zpool_name
);
1152 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1155 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1164 * zpool_import() is a contracted interface. Should be kept the same
1167 * Applications should use zpool_import_props() to import a pool with
1168 * new properties value to be set.
1171 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1174 nvlist_t
*props
= NULL
;
1177 if (altroot
!= NULL
) {
1178 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1179 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1180 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1184 if (nvlist_add_string(props
,
1185 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1186 nvlist_add_string(props
,
1187 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1189 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1190 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1195 ret
= zpool_import_props(hdl
, config
, newname
, props
, B_FALSE
);
1202 * Import the given pool using the known configuration and a list of
1203 * properties to be set. The configuration should have come from
1204 * zpool_find_import(). The 'newname' parameters control whether the pool
1205 * is imported with a different name.
1208 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1209 nvlist_t
*props
, boolean_t importfaulted
)
1211 zfs_cmd_t zc
= { 0 };
1217 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1220 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1221 "cannot import pool '%s'"), origname
);
1223 if (newname
!= NULL
) {
1224 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1225 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1226 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1228 thename
= (char *)newname
;
1236 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1239 if ((props
= zpool_valid_proplist(hdl
, origname
,
1240 props
, version
, B_TRUE
, errbuf
)) == NULL
) {
1242 } else if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1248 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1250 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1253 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1258 zc
.zc_cookie
= (uint64_t)importfaulted
;
1260 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
) != 0) {
1262 if (newname
== NULL
)
1263 (void) snprintf(desc
, sizeof (desc
),
1264 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1267 (void) snprintf(desc
, sizeof (desc
),
1268 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1274 * Unsupported version.
1276 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1280 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1284 (void) zpool_standard_error(hdl
, errno
, desc
);
1289 zpool_handle_t
*zhp
;
1292 * This should never fail, but play it safe anyway.
1294 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0) {
1296 } else if (zhp
!= NULL
) {
1297 ret
= zpool_create_zvol_links(zhp
);
1303 zcmd_free_nvlists(&zc
);
1313 zpool_scrub(zpool_handle_t
*zhp
, pool_scrub_type_t type
)
1315 zfs_cmd_t zc
= { 0 };
1317 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1319 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1320 zc
.zc_cookie
= type
;
1322 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SCRUB
, &zc
) == 0)
1325 (void) snprintf(msg
, sizeof (msg
),
1326 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1329 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1331 return (zpool_standard_error(hdl
, errno
, msg
));
1335 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1336 * spare; but FALSE if its an INUSE spare.
1339 vdev_to_nvlist_iter(nvlist_t
*nv
, const char *search
, uint64_t guid
,
1340 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
1344 uint64_t theguid
, present
;
1346 uint64_t wholedisk
= 0;
1350 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &theguid
) == 0);
1352 if (search
== NULL
&&
1353 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, &present
) == 0) {
1355 * If the device has never been present since import, the only
1356 * reliable way to match the vdev is by GUID.
1358 if (theguid
== guid
)
1360 } else if (search
!= NULL
&&
1361 nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
1362 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
1366 * For whole disks, the internal path has 's0', but the
1367 * path passed in by the user doesn't.
1369 if (strlen(search
) == strlen(path
) - 2 &&
1370 strncmp(search
, path
, strlen(search
)) == 0)
1372 } else if (strcmp(search
, path
) == 0) {
1377 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1378 &child
, &children
) != 0)
1381 for (c
= 0; c
< children
; c
++) {
1382 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
, guid
,
1383 avail_spare
, l2cache
, NULL
)) != NULL
) {
1385 * The 'is_log' value is only set for the toplevel
1386 * vdev, not the leaf vdevs. So we always lookup the
1387 * log device from the root of the vdev tree (where
1388 * 'log' is non-NULL).
1391 nvlist_lookup_uint64(child
[c
],
1392 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
1400 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
1401 &child
, &children
) == 0) {
1402 for (c
= 0; c
< children
; c
++) {
1403 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
, guid
,
1404 avail_spare
, l2cache
, NULL
)) != NULL
) {
1405 *avail_spare
= B_TRUE
;
1411 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
1412 &child
, &children
) == 0) {
1413 for (c
= 0; c
< children
; c
++) {
1414 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
, guid
,
1415 avail_spare
, l2cache
, NULL
)) != NULL
) {
1426 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
1427 boolean_t
*l2cache
, boolean_t
*log
)
1429 char buf
[MAXPATHLEN
];
1435 guid
= strtoull(path
, &end
, 10);
1436 if (guid
!= 0 && *end
== '\0') {
1438 } else if (path
[0] != '/') {
1439 (void) snprintf(buf
, sizeof (buf
), "%s%s", "/dev/dsk/", path
);
1445 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1448 *avail_spare
= B_FALSE
;
1452 return (vdev_to_nvlist_iter(nvroot
, search
, guid
, avail_spare
,
1457 vdev_online(nvlist_t
*nv
)
1461 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
1462 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
1463 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
1470 * Get phys_path for a root pool
1471 * Return 0 on success; non-zeron on failure.
1474 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
)
1476 nvlist_t
*vdev_root
;
1482 * Make sure this is a root pool, as phys_path doesn't mean
1483 * anything to a non-root pool.
1485 if (!pool_is_bootable(zhp
))
1488 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1489 ZPOOL_CONFIG_VDEV_TREE
, &vdev_root
) == 0);
1491 if (nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
1492 &child
, &count
) != 0)
1495 for (i
= 0; i
< count
; i
++) {
1502 if (nvlist_lookup_string(child
[i
], ZPOOL_CONFIG_TYPE
, &type
)
1506 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
1507 if (!vdev_online(child
[i
]))
1509 verify(nvlist_lookup_string(child
[i
],
1510 ZPOOL_CONFIG_PHYS_PATH
, &tmppath
) == 0);
1511 (void) strncpy(physpath
, tmppath
, strlen(tmppath
));
1512 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0) {
1513 if (nvlist_lookup_nvlist_array(child
[i
],
1514 ZPOOL_CONFIG_CHILDREN
, &child2
, &count2
) != 0)
1517 for (j
= 0; j
< count2
; j
++) {
1518 if (!vdev_online(child2
[j
]))
1520 if (nvlist_lookup_string(child2
[j
],
1521 ZPOOL_CONFIG_PHYS_PATH
, &tmppath
) != 0)
1524 if ((strlen(physpath
) + strlen(tmppath
)) >
1528 if (strlen(physpath
) == 0) {
1529 (void) strncpy(physpath
, tmppath
,
1532 (void) strcat(physpath
, " ");
1533 (void) strcat(physpath
, tmppath
);
1545 * Returns TRUE if the given guid corresponds to the given type.
1546 * This is used to check for hot spares (INUSE or not), and level 2 cache
1550 is_guid_type(zpool_handle_t
*zhp
, uint64_t guid
, const char *type
)
1552 uint64_t target_guid
;
1558 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1560 if (nvlist_lookup_nvlist_array(nvroot
, type
, &list
, &count
) == 0) {
1561 for (i
= 0; i
< count
; i
++) {
1562 verify(nvlist_lookup_uint64(list
[i
], ZPOOL_CONFIG_GUID
,
1563 &target_guid
) == 0);
1564 if (guid
== target_guid
)
1573 * Bring the specified vdev online. The 'flags' parameter is a set of the
1574 * ZFS_ONLINE_* flags.
1577 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
1578 vdev_state_t
*newstate
)
1580 zfs_cmd_t zc
= { 0 };
1583 boolean_t avail_spare
, l2cache
;
1584 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1586 (void) snprintf(msg
, sizeof (msg
),
1587 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
1589 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1590 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
1592 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1594 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1597 is_guid_type(zhp
, zc
.zc_guid
, ZPOOL_CONFIG_SPARES
) == B_TRUE
)
1598 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1600 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
1603 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0)
1604 return (zpool_standard_error(hdl
, errno
, msg
));
1606 *newstate
= zc
.zc_cookie
;
1611 * Take the specified vdev offline
1614 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
1616 zfs_cmd_t zc
= { 0 };
1619 boolean_t avail_spare
, l2cache
;
1620 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1622 (void) snprintf(msg
, sizeof (msg
),
1623 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
1625 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1626 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
1628 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1630 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1633 is_guid_type(zhp
, zc
.zc_guid
, ZPOOL_CONFIG_SPARES
) == B_TRUE
)
1634 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1636 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
1637 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
1639 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
1646 * There are no other replicas of this device.
1648 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
1651 return (zpool_standard_error(hdl
, errno
, msg
));
1656 * Mark the given vdev faulted.
1659 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
)
1661 zfs_cmd_t zc
= { 0 };
1663 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1665 (void) snprintf(msg
, sizeof (msg
),
1666 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), guid
);
1668 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1670 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
1672 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
1679 * There are no other replicas of this device.
1681 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
1684 return (zpool_standard_error(hdl
, errno
, msg
));
1690 * Mark the given vdev degraded.
1693 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
)
1695 zfs_cmd_t zc
= { 0 };
1697 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1699 (void) snprintf(msg
, sizeof (msg
),
1700 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), guid
);
1702 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1704 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
1706 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
1709 return (zpool_standard_error(hdl
, errno
, msg
));
1713 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1717 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
1723 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
1725 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
1728 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
1729 children
== 2 && child
[which
] == tgt
)
1732 for (c
= 0; c
< children
; c
++)
1733 if (is_replacing_spare(child
[c
], tgt
, which
))
1741 * Attach new_disk (fully described by nvroot) to old_disk.
1742 * If 'replacing' is specified, the new disk will replace the old one.
1745 zpool_vdev_attach(zpool_handle_t
*zhp
,
1746 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
1748 zfs_cmd_t zc
= { 0 };
1752 boolean_t avail_spare
, l2cache
, islog
;
1754 char *path
, *newname
;
1757 nvlist_t
*config_root
;
1758 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1759 boolean_t rootpool
= pool_is_bootable(zhp
);
1762 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1763 "cannot replace %s with %s"), old_disk
, new_disk
);
1765 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1766 "cannot attach %s to %s"), new_disk
, old_disk
);
1769 * If this is a root pool, make sure that we're not attaching an
1770 * EFI labeled device.
1772 if (rootpool
&& pool_uses_efi(nvroot
)) {
1773 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1774 "EFI labeled devices are not supported on root pools."));
1775 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1778 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1779 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
1781 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1784 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1787 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
1789 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1790 zc
.zc_cookie
= replacing
;
1792 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
1793 &child
, &children
) != 0 || children
!= 1) {
1794 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1795 "new device must be a single disk"));
1796 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
1799 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
1800 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
1802 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0])) == NULL
)
1806 * If the target is a hot spare that has been swapped in, we can only
1807 * replace it with another hot spare.
1810 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
1811 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
1812 NULL
) == NULL
|| !avail_spare
) &&
1813 is_replacing_spare(config_root
, tgt
, 1)) {
1814 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1815 "can only be replaced by another hot spare"));
1817 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
1821 * If we are attempting to replace a spare, it canot be applied to an
1822 * already spared device.
1825 nvlist_lookup_string(child
[0], ZPOOL_CONFIG_PATH
, &path
) == 0 &&
1826 zpool_find_vdev(zhp
, newname
, &avail_spare
,
1827 &l2cache
, NULL
) != NULL
&& avail_spare
&&
1828 is_replacing_spare(config_root
, tgt
, 0)) {
1829 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1830 "device has already been replaced with a spare"));
1832 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
1837 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1840 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
1842 zcmd_free_nvlists(&zc
);
1847 * XXX - This should be removed once we can
1848 * automatically install the bootblocks on the
1849 * newly attached disk.
1851 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Please "
1852 "be sure to invoke %s to make '%s' bootable.\n"),
1861 * Can't attach to or replace this type of vdev.
1865 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1866 "cannot replace a log with a spare"));
1868 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1869 "cannot replace a replacing device"));
1871 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1872 "can only attach to mirrors and top-level "
1875 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
1880 * The new device must be a single disk.
1882 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1883 "new device must be a single disk"));
1884 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
1888 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
1890 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1895 * The new device is too small.
1897 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1898 "device is too small"));
1899 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1904 * The new device has a different alignment requirement.
1906 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1907 "devices have different sector alignment"));
1908 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1913 * The resulting top-level vdev spec won't fit in the label.
1915 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
1919 (void) zpool_standard_error(hdl
, errno
, msg
);
1926 * Detach the specified device.
1929 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
1931 zfs_cmd_t zc
= { 0 };
1934 boolean_t avail_spare
, l2cache
;
1935 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1937 (void) snprintf(msg
, sizeof (msg
),
1938 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
1940 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1941 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
1943 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
1946 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
1949 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
1951 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
1953 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
1960 * Can't detach from this type of vdev.
1962 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
1963 "applicable to mirror and replacing vdevs"));
1964 (void) zfs_error(zhp
->zpool_hdl
, EZFS_BADTARGET
, msg
);
1969 * There are no other replicas of this device.
1971 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
1975 (void) zpool_standard_error(hdl
, errno
, msg
);
1982 * Remove the given device. Currently, this is supported only for hot spares
1983 * and level 2 cache devices.
1986 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
1988 zfs_cmd_t zc
= { 0 };
1991 boolean_t avail_spare
, l2cache
;
1992 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1994 (void) snprintf(msg
, sizeof (msg
),
1995 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
1997 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1998 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2000 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2002 if (!avail_spare
&& !l2cache
) {
2003 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2004 "only inactive hot spares or cache devices "
2006 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2009 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2011 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
2014 return (zpool_standard_error(hdl
, errno
, msg
));
2018 * Clear the errors for the pool, or the particular device if specified.
2021 zpool_clear(zpool_handle_t
*zhp
, const char *path
)
2023 zfs_cmd_t zc
= { 0 };
2026 boolean_t avail_spare
, l2cache
;
2027 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2030 (void) snprintf(msg
, sizeof (msg
),
2031 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
2034 (void) snprintf(msg
, sizeof (msg
),
2035 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
2038 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2040 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
2041 &l2cache
, NULL
)) == 0)
2042 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2045 * Don't allow error clearing for hot spares. Do allow
2046 * error clearing for l2cache devices.
2049 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2051 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
2055 if (zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
) == 0)
2058 return (zpool_standard_error(hdl
, errno
, msg
));
2062 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2065 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
2067 zfs_cmd_t zc
= { 0 };
2069 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2071 (void) snprintf(msg
, sizeof (msg
),
2072 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
2075 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2078 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
2081 return (zpool_standard_error(hdl
, errno
, msg
));
2085 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2089 zpool_iter_zvol(zpool_handle_t
*zhp
, int (*cb
)(const char *, void *),
2092 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2093 char (*paths
)[MAXPATHLEN
];
2095 int curr
, base
, ret
= 0;
2103 if ((base
= open("/dev/zvol/dsk", O_RDONLY
)) < 0)
2104 return (errno
== ENOENT
? 0 : -1);
2106 if (fstatat(base
, zhp
->zpool_name
, &st
, 0) != 0) {
2109 return (err
== ENOENT
? 0 : -1);
2113 * Oddly this wasn't a directory -- ignore that failure since we
2114 * know there are no links lower in the (non-existant) hierarchy.
2116 if (!S_ISDIR(st
.st_mode
)) {
2121 if ((paths
= zfs_alloc(hdl
, size
* sizeof (paths
[0]))) == NULL
) {
2126 (void) strlcpy(paths
[0], zhp
->zpool_name
, sizeof (paths
[0]));
2131 if (fstatat(base
, paths
[curr
], &st
, AT_SYMLINK_NOFOLLOW
) != 0)
2134 if (S_ISDIR(st
.st_mode
)) {
2135 if ((fd
= openat(base
, paths
[curr
], O_RDONLY
)) < 0)
2138 if ((dirp
= fdopendir(fd
)) == NULL
) {
2143 while ((dp
= readdir(dirp
)) != NULL
) {
2144 if (dp
->d_name
[0] == '.')
2147 if (curr
+ 1 == size
) {
2148 paths
= zfs_realloc(hdl
, paths
,
2149 size
* sizeof (paths
[0]),
2150 size
* 2 * sizeof (paths
[0]));
2151 if (paths
== NULL
) {
2152 (void) closedir(dirp
);
2160 (void) strlcpy(paths
[curr
+ 1], paths
[curr
],
2161 sizeof (paths
[curr
+ 1]));
2162 (void) strlcat(paths
[curr
], "/",
2163 sizeof (paths
[curr
]));
2164 (void) strlcat(paths
[curr
], dp
->d_name
,
2165 sizeof (paths
[curr
]));
2169 (void) closedir(dirp
);
2172 if ((ret
= cb(paths
[curr
], data
)) != 0)
2178 #endif /* PORT_NETBSD */
2191 typedef struct zvol_cb
{
2192 zpool_handle_t
*zcb_pool
;
2193 boolean_t zcb_create
;
2198 do_zvol_create(zfs_handle_t
*zhp
, void *data
)
2202 if (ZFS_IS_VOLUME(zhp
)) {
2203 (void) zvol_create_link(zhp
->zfs_hdl
, zhp
->zfs_name
);
2204 ret
= zfs_iter_snapshots(zhp
, do_zvol_create
, NULL
);
2208 ret
= zfs_iter_filesystems(zhp
, do_zvol_create
, NULL
);
2216 * Iterate over all zvols in the pool and make any necessary minor nodes.
2219 zpool_create_zvol_links(zpool_handle_t
*zhp
)
2225 * If the pool is unavailable, just return success.
2227 if ((zfp
= make_dataset_handle(zhp
->zpool_hdl
,
2228 zhp
->zpool_name
)) == NULL
)
2231 ret
= zfs_iter_filesystems(zfp
, do_zvol_create
, NULL
);
2238 do_zvol_remove(const char *dataset
, void *data
)
2240 zpool_handle_t
*zhp
= data
;
2242 return (zvol_remove_link(zhp
->zpool_hdl
, dataset
));
2246 * Iterate over all zvols in the pool and remove any minor nodes. We iterate
2247 * by examining the /dev links so that a corrupted pool doesn't impede this
2251 zpool_remove_zvol_links(zpool_handle_t
*zhp
)
2253 return (zpool_iter_zvol(zhp
, do_zvol_remove
, zhp
));
2257 * Convert from a devid string to a path.
2260 devid_to_path(char *devid_str
)
2265 devid_nmlist_t
*list
= NULL
;
2268 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
2271 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
2273 devid_str_free(minor
);
2279 if ((path
= strdup(list
[0].devname
)) == NULL
)
2282 devid_free_nmlist(list
);
2288 * Convert from a path to a devid string.
2291 path_to_devid(const char *path
)
2297 if ((fd
= open(path
, O_RDONLY
)) < 0)
2302 if (devid_get(fd
, &devid
) == 0) {
2303 if (devid_get_minor_name(fd
, &minor
) == 0)
2304 ret
= devid_str_encode(devid
, minor
);
2306 devid_str_free(minor
);
2315 * Issue the necessary ioctl() to update the stored path value for the vdev. We
2316 * ignore any failure here, since a common case is for an unprivileged user to
2317 * type 'zpool status', and we'll display the correct information anyway.
2320 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
2322 zfs_cmd_t zc
= { 0 };
2324 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2325 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
2326 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
2329 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
2333 * Given a vdev, return the name to display in iostat. If the vdev has a path,
2334 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2335 * We also check if this is a whole disk, in which case we strip off the
2336 * trailing 's0' slice name.
2338 * This routine is also responsible for identifying when disks have been
2339 * reconfigured in a new location. The kernel will have opened the device by
2340 * devid, but the path will still refer to the old location. To catch this, we
2341 * first do a path -> devid translation (which is fast for the common case). If
2342 * the devid matches, we're done. If not, we do a reverse devid -> path
2343 * translation and issue the appropriate ioctl() to update the path of the vdev.
2344 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2348 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
)
2356 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
2358 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
2360 (void) snprintf(buf
, sizeof (buf
), "%llu",
2361 (u_longlong_t
)value
);
2363 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
2366 * If the device is dead (faulted, offline, etc) then don't
2367 * bother opening it. Otherwise we may be forcing the user to
2368 * open a misbehaving device, which can have undesirable
2371 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_STATS
,
2372 (uint64_t **)&vs
, &vsc
) != 0 ||
2373 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
2375 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
2377 * Determine if the current path is correct.
2379 char *newdevid
= path_to_devid(path
);
2381 if (newdevid
== NULL
||
2382 strcmp(devid
, newdevid
) != 0) {
2385 if ((newpath
= devid_to_path(devid
)) != NULL
) {
2387 * Update the path appropriately.
2389 set_path(zhp
, nv
, newpath
);
2390 if (nvlist_add_string(nv
,
2391 ZPOOL_CONFIG_PATH
, newpath
) == 0)
2392 verify(nvlist_lookup_string(nv
,
2400 devid_str_free(newdevid
);
2403 if (strncmp(path
, "/dev/dsk/", 9) == 0)
2406 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2407 &value
) == 0 && value
) {
2408 char *tmp
= zfs_strdup(hdl
, path
);
2411 tmp
[strlen(path
) - 2] = '\0';
2415 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
2418 * If it's a raidz device, we need to stick in the parity level.
2420 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
2421 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
2423 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
2424 (u_longlong_t
)value
);
2429 return (zfs_strdup(hdl
, path
));
2433 zbookmark_compare(const void *a
, const void *b
)
2435 return (memcmp(a
, b
, sizeof (zbookmark_t
)));
2439 * Retrieve the persistent error log, uniquify the members, and return to the
2443 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
2445 zfs_cmd_t zc
= { 0 };
2447 zbookmark_t
*zb
= NULL
;
2451 * Retrieve the raw error list from the kernel. If the number of errors
2452 * has increased, allocate more space and continue until we get the
2455 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
2459 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
2460 count
* sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
2462 zc
.zc_nvlist_dst_size
= count
;
2463 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
2465 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
2467 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
2468 if (errno
== ENOMEM
) {
2469 count
= zc
.zc_nvlist_dst_size
;
2470 if ((zc
.zc_nvlist_dst
= (uintptr_t)
2471 zfs_alloc(zhp
->zpool_hdl
, count
*
2472 sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
2483 * Sort the resulting bookmarks. This is a little confusing due to the
2484 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
2485 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2486 * _not_ copied as part of the process. So we point the start of our
2487 * array appropriate and decrement the total number of elements.
2489 zb
= ((zbookmark_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
2490 zc
.zc_nvlist_dst_size
;
2491 count
-= zc
.zc_nvlist_dst_size
;
2493 qsort(zb
, count
, sizeof (zbookmark_t
), zbookmark_compare
);
2495 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
2498 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2500 for (i
= 0; i
< count
; i
++) {
2503 /* ignoring zb_blkid and zb_level for now */
2504 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
2505 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
2508 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
2510 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
2511 zb
[i
].zb_objset
) != 0) {
2515 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
2516 zb
[i
].zb_object
) != 0) {
2520 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
2527 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
2531 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
2532 return (no_memory(zhp
->zpool_hdl
));
2536 * Upgrade a ZFS pool to the latest on-disk version.
2539 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
2541 zfs_cmd_t zc
= { 0 };
2542 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2544 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
2545 zc
.zc_cookie
= new_version
;
2547 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
2548 return (zpool_standard_error_fmt(hdl
, errno
,
2549 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
2555 zpool_set_history_str(const char *subcommand
, int argc
, char **argv
,
2560 (void) strlcpy(history_str
, subcommand
, HIS_MAX_RECORD_LEN
);
2561 for (i
= 1; i
< argc
; i
++) {
2562 if (strlen(history_str
) + 1 + strlen(argv
[i
]) >
2565 (void) strlcat(history_str
, " ", HIS_MAX_RECORD_LEN
);
2566 (void) strlcat(history_str
, argv
[i
], HIS_MAX_RECORD_LEN
);
2571 * Stage command history for logging.
2574 zpool_stage_history(libzfs_handle_t
*hdl
, const char *history_str
)
2576 if (history_str
== NULL
)
2579 if (strlen(history_str
) > HIS_MAX_RECORD_LEN
)
2582 if (hdl
->libzfs_log_str
!= NULL
)
2583 free(hdl
->libzfs_log_str
);
2585 if ((hdl
->libzfs_log_str
= strdup(history_str
)) == NULL
)
2586 return (no_memory(hdl
));
2592 * Perform ioctl to get some command history of a pool.
2594 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
2595 * logical offset of the history buffer to start reading from.
2597 * Upon return, 'off' is the next logical offset to read from and
2598 * 'len' is the actual amount of bytes read into 'buf'.
2601 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
2603 zfs_cmd_t zc
= { 0 };
2604 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2606 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2608 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
2609 zc
.zc_history_len
= *len
;
2610 zc
.zc_history_offset
= *off
;
2612 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
2615 return (zfs_error_fmt(hdl
, EZFS_PERM
,
2616 dgettext(TEXT_DOMAIN
,
2617 "cannot show history for pool '%s'"),
2620 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
2621 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
2622 "'%s'"), zhp
->zpool_name
));
2624 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
2625 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
2626 "'%s', pool must be upgraded"), zhp
->zpool_name
));
2628 return (zpool_standard_error_fmt(hdl
, errno
,
2629 dgettext(TEXT_DOMAIN
,
2630 "cannot get history for '%s'"), zhp
->zpool_name
));
2634 *len
= zc
.zc_history_len
;
2635 *off
= zc
.zc_history_offset
;
2641 * Process the buffer of nvlists, unpacking and storing each nvlist record
2642 * into 'records'. 'leftover' is set to the number of bytes that weren't
2643 * processed as there wasn't a complete record.
2646 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
2647 nvlist_t
***records
, uint_t
*numrecords
)
2653 while (bytes_read
> sizeof (reclen
)) {
2655 /* get length of packed record (stored as little endian) */
2656 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
2657 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
2659 if (bytes_read
< sizeof (reclen
) + reclen
)
2663 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
2665 bytes_read
-= sizeof (reclen
) + reclen
;
2666 buf
+= sizeof (reclen
) + reclen
;
2668 /* add record to nvlist array */
2670 if (ISP2(*numrecords
+ 1)) {
2671 *records
= realloc(*records
,
2672 *numrecords
* 2 * sizeof (nvlist_t
*));
2674 (*records
)[*numrecords
- 1] = nv
;
2677 *leftover
= bytes_read
;
2681 #define HIS_BUF_LEN (128*1024)
2684 * Retrieve the command history of a pool.
2687 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
2689 char buf
[HIS_BUF_LEN
];
2691 nvlist_t
**records
= NULL
;
2692 uint_t numrecords
= 0;
2696 uint64_t bytes_read
= sizeof (buf
);
2699 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
2702 /* if nothing else was read in, we're at EOF, just return */
2706 if ((err
= zpool_history_unpack(buf
, bytes_read
,
2707 &leftover
, &records
, &numrecords
)) != 0)
2715 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
2716 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
2717 records
, numrecords
) == 0);
2719 for (i
= 0; i
< numrecords
; i
++)
2720 nvlist_free(records
[i
]);
2727 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
2728 char *pathname
, size_t len
)
2730 zfs_cmd_t zc
= { 0 };
2731 boolean_t mounted
= B_FALSE
;
2732 char *mntpnt
= NULL
;
2733 char dsname
[MAXNAMELEN
];
2736 /* special case for the MOS */
2737 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>", obj
);
2741 /* get the dataset's name */
2742 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2744 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
2745 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
2746 /* just write out a path of two object numbers */
2747 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
2751 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
2753 /* find out if the dataset is mounted */
2754 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
2756 /* get the corrupted object's path */
2757 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
2759 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
2762 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
2765 (void) snprintf(pathname
, len
, "%s:%s",
2766 dsname
, zc
.zc_value
);
2769 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
, obj
);
2774 #define RDISK_ROOT "/dev/rdsk"
2775 #define BACKUP_SLICE "s2"
2777 * Don't start the slice at the default block of 34; many storage
2778 * devices will use a stripe width of 128k, so start there instead.
2780 #define NEW_START_BLOCK 256
2783 * Read the EFI label from the config, if a label does not exist then
2784 * pass back the error to the caller. If the caller has passed a non-NULL
2785 * diskaddr argument then we set it to the starting address of the EFI
2789 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
2793 char diskname
[MAXPATHLEN
];
2796 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
2799 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", RDISK_ROOT
,
2800 strrchr(path
, '/'));
2801 if ((fd
= open(diskname
, O_RDONLY
|O_NDELAY
)) >= 0) {
2802 struct dk_gpt
*vtoc
;
2804 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
2806 *sb
= vtoc
->efi_parts
[0].p_start
;
2815 * determine where a partition starts on a disk in the current
2819 find_start_block(nvlist_t
*config
)
2823 diskaddr_t sb
= MAXOFFSET_T
;
2826 if (nvlist_lookup_nvlist_array(config
,
2827 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
2828 if (nvlist_lookup_uint64(config
,
2829 ZPOOL_CONFIG_WHOLE_DISK
,
2830 &wholedisk
) != 0 || !wholedisk
) {
2831 return (MAXOFFSET_T
);
2833 if (read_efi_label(config
, &sb
) < 0)
2838 for (c
= 0; c
< children
; c
++) {
2839 sb
= find_start_block(child
[c
]);
2840 if (sb
!= MAXOFFSET_T
) {
2844 return (MAXOFFSET_T
);
2848 * Label an individual disk. The name provided is the short name,
2849 * stripped of any leading /dev path.
2852 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
2854 char path
[MAXPATHLEN
];
2855 struct dk_gpt
*vtoc
;
2857 size_t resv
= EFI_MIN_RESV_SIZE
;
2858 uint64_t slice_size
;
2859 diskaddr_t start_block
;
2862 /* prepare an error message just in case */
2863 (void) snprintf(errbuf
, sizeof (errbuf
),
2864 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
2869 if (pool_is_bootable(zhp
)) {
2870 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2871 "EFI labeled devices are not supported on root "
2873 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
2876 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
2877 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
2879 if (zhp
->zpool_start_block
== 0)
2880 start_block
= find_start_block(nvroot
);
2882 start_block
= zhp
->zpool_start_block
;
2883 zhp
->zpool_start_block
= start_block
;
2886 start_block
= NEW_START_BLOCK
;
2889 (void) snprintf(path
, sizeof (path
), "%s/%s%s", RDISK_ROOT
, name
,
2892 if ((fd
= open(path
, O_RDWR
| O_NDELAY
)) < 0) {
2894 * This shouldn't happen. We've long since verified that this
2895 * is a valid device.
2898 dgettext(TEXT_DOMAIN
, "unable to open device"));
2899 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
2902 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
2904 * The only way this can fail is if we run out of memory, or we
2905 * were unable to read the disk's capacity
2907 if (errno
== ENOMEM
)
2908 (void) no_memory(hdl
);
2911 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2912 "unable to read disk capacity"), name
);
2914 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
2917 slice_size
= vtoc
->efi_last_u_lba
+ 1;
2918 slice_size
-= EFI_MIN_RESV_SIZE
;
2919 if (start_block
== MAXOFFSET_T
)
2920 start_block
= NEW_START_BLOCK
;
2921 slice_size
-= start_block
;
2923 vtoc
->efi_parts
[0].p_start
= start_block
;
2924 vtoc
->efi_parts
[0].p_size
= slice_size
;
2927 * Why we use V_USR: V_BACKUP confuses users, and is considered
2928 * disposable by some EFI utilities (since EFI doesn't have a backup
2929 * slice). V_UNASSIGNED is supposed to be used only for zero size
2930 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
2931 * etc. were all pretty specific. V_USR is as close to reality as we
2932 * can get, in the absence of V_OTHER.
2934 vtoc
->efi_parts
[0].p_tag
= V_USR
;
2935 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
2937 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
2938 vtoc
->efi_parts
[8].p_size
= resv
;
2939 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
2941 if (efi_write(fd
, vtoc
) != 0) {
2943 * Some block drivers (like pcata) may not support EFI
2944 * GPT labels. Print out a helpful error message dir-
2945 * ecting the user to manually label the disk and give
2951 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2952 "try using fdisk(1M) and then provide a specific slice"));
2953 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
2962 supported_dump_vdev_type(libzfs_handle_t
*hdl
, nvlist_t
*config
, char *errbuf
)
2968 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
2969 if (strcmp(type
, VDEV_TYPE_RAIDZ
) == 0 ||
2970 strcmp(type
, VDEV_TYPE_FILE
) == 0 ||
2971 strcmp(type
, VDEV_TYPE_LOG
) == 0 ||
2972 strcmp(type
, VDEV_TYPE_MISSING
) == 0) {
2973 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2974 "vdev type '%s' is not supported"), type
);
2975 (void) zfs_error(hdl
, EZFS_VDEVNOTSUP
, errbuf
);
2978 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
2979 &child
, &children
) == 0) {
2980 for (c
= 0; c
< children
; c
++) {
2981 if (!supported_dump_vdev_type(hdl
, child
[c
], errbuf
))
2989 * check if this zvol is allowable for use as a dump device; zero if
2990 * it is, > 0 if it isn't, < 0 if it isn't a zvol
2993 zvol_check_dump_config(char *arg
)
2995 zpool_handle_t
*zhp
= NULL
;
2996 nvlist_t
*config
, *nvroot
;
3000 libzfs_handle_t
*hdl
;
3002 char poolname
[ZPOOL_MAXNAMELEN
];
3003 int pathlen
= strlen(ZVOL_FULL_DEV_DIR
);
3006 if (strncmp(arg
, ZVOL_FULL_DEV_DIR
, pathlen
)) {
3010 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
3011 "dump is not supported on device '%s'"), arg
);
3013 if ((hdl
= libzfs_init()) == NULL
)
3015 libzfs_print_on_error(hdl
, B_TRUE
);
3017 volname
= arg
+ pathlen
;
3019 /* check the configuration of the pool */
3020 if ((p
= strchr(volname
, '/')) == NULL
) {
3021 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3022 "malformed dataset name"));
3023 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
3025 } else if (p
- volname
>= ZFS_MAXNAMELEN
) {
3026 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3027 "dataset name is too long"));
3028 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, errbuf
);
3031 (void) strncpy(poolname
, volname
, p
- volname
);
3032 poolname
[p
- volname
] = '\0';
3035 if ((zhp
= zpool_open(hdl
, poolname
)) == NULL
) {
3036 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3037 "could not open pool '%s'"), poolname
);
3038 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
3041 config
= zpool_get_config(zhp
, NULL
);
3042 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
3044 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3045 "could not obtain vdev configuration for '%s'"), poolname
);
3046 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, errbuf
);
3050 verify(nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
3051 &top
, &toplevels
) == 0);
3052 if (toplevels
!= 1) {
3053 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3054 "'%s' has multiple top level vdevs"), poolname
);
3055 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, errbuf
);
3059 if (!supported_dump_vdev_type(hdl
, top
[0], errbuf
)) {