4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
41 #include <sys/efi_partition.h>
43 #include <sys/zfs_ioctl.h>
46 #include "zfs_namecheck.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
52 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
54 typedef struct prop_flags
{
55 int create
:1; /* Validate property on creation */
56 int import
:1; /* Validate property on import */
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
66 zpool_get_all_props(zpool_handle_t
*zhp
)
68 zfs_cmd_t zc
= {"\0"};
69 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
71 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
73 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
76 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
77 if (errno
== ENOMEM
) {
78 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
79 zcmd_free_nvlists(&zc
);
83 zcmd_free_nvlists(&zc
);
88 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
89 zcmd_free_nvlists(&zc
);
93 zcmd_free_nvlists(&zc
);
99 zpool_props_refresh(zpool_handle_t
*zhp
)
103 old_props
= zhp
->zpool_props
;
105 if (zpool_get_all_props(zhp
) != 0)
108 nvlist_free(old_props
);
113 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
119 zprop_source_t source
;
121 nvl
= zhp
->zpool_props
;
122 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
123 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
125 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
127 source
= ZPROP_SRC_DEFAULT
;
128 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
139 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
143 zprop_source_t source
;
145 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
151 if ((prop
== ZPOOL_PROP_GUID
) &&
152 (nvlist_lookup_nvlist(zhp
->zpool_config
,
153 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
154 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
158 return (zpool_prop_default_numeric(prop
));
161 nvl
= zhp
->zpool_props
;
162 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
163 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
165 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
167 source
= ZPROP_SRC_DEFAULT
;
168 value
= zpool_prop_default_numeric(prop
);
178 * Map VDEV STATE to printed strings.
181 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
184 case VDEV_STATE_CLOSED
:
185 case VDEV_STATE_OFFLINE
:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED
:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN
:
190 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
191 return (gettext("FAULTED"));
192 else if (aux
== VDEV_AUX_SPLIT_POOL
)
193 return (gettext("SPLIT"));
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED
:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED
:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY
:
201 return (gettext("ONLINE"));
207 return (gettext("UNKNOWN"));
211 * Map POOL STATE to printed strings.
214 zpool_pool_state_to_name(pool_state_t state
)
219 case POOL_STATE_ACTIVE
:
220 return (gettext("ACTIVE"));
221 case POOL_STATE_EXPORTED
:
222 return (gettext("EXPORTED"));
223 case POOL_STATE_DESTROYED
:
224 return (gettext("DESTROYED"));
225 case POOL_STATE_SPARE
:
226 return (gettext("SPARE"));
227 case POOL_STATE_L2CACHE
:
228 return (gettext("L2CACHE"));
229 case POOL_STATE_UNINITIALIZED
:
230 return (gettext("UNINITIALIZED"));
231 case POOL_STATE_UNAVAIL
:
232 return (gettext("UNAVAIL"));
233 case POOL_STATE_POTENTIALLY_ACTIVE
:
234 return (gettext("POTENTIALLY_ACTIVE"));
237 return (gettext("UNKNOWN"));
241 * Get a zpool property value for 'prop' and return the value in
242 * a pre-allocated buffer.
245 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
,
246 size_t len
, zprop_source_t
*srctype
, boolean_t literal
)
250 zprop_source_t src
= ZPROP_SRC_NONE
;
255 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
257 case ZPOOL_PROP_NAME
:
258 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
261 case ZPOOL_PROP_HEALTH
:
262 (void) strlcpy(buf
, "FAULTED", len
);
265 case ZPOOL_PROP_GUID
:
266 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
267 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
270 case ZPOOL_PROP_ALTROOT
:
271 case ZPOOL_PROP_CACHEFILE
:
272 case ZPOOL_PROP_COMMENT
:
273 if (zhp
->zpool_props
!= NULL
||
274 zpool_get_all_props(zhp
) == 0) {
276 zpool_get_prop_string(zhp
, prop
, &src
),
282 (void) strlcpy(buf
, "-", len
);
291 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
292 prop
!= ZPOOL_PROP_NAME
)
295 switch (zpool_prop_get_type(prop
)) {
296 case PROP_TYPE_STRING
:
297 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
301 case PROP_TYPE_NUMBER
:
302 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
305 case ZPOOL_PROP_SIZE
:
306 case ZPOOL_PROP_ALLOCATED
:
307 case ZPOOL_PROP_FREE
:
308 case ZPOOL_PROP_FREEING
:
309 case ZPOOL_PROP_LEAKED
:
310 case ZPOOL_PROP_ASHIFT
:
312 (void) snprintf(buf
, len
, "%llu",
313 (u_longlong_t
)intval
);
315 (void) zfs_nicenum(intval
, buf
, len
);
318 case ZPOOL_PROP_EXPANDSZ
:
320 (void) strlcpy(buf
, "-", len
);
321 } else if (literal
) {
322 (void) snprintf(buf
, len
, "%llu",
323 (u_longlong_t
)intval
);
325 (void) zfs_nicebytes(intval
, buf
, len
);
329 case ZPOOL_PROP_CAPACITY
:
331 (void) snprintf(buf
, len
, "%llu",
332 (u_longlong_t
)intval
);
334 (void) snprintf(buf
, len
, "%llu%%",
335 (u_longlong_t
)intval
);
339 case ZPOOL_PROP_FRAGMENTATION
:
340 if (intval
== UINT64_MAX
) {
341 (void) strlcpy(buf
, "-", len
);
342 } else if (literal
) {
343 (void) snprintf(buf
, len
, "%llu",
344 (u_longlong_t
)intval
);
346 (void) snprintf(buf
, len
, "%llu%%",
347 (u_longlong_t
)intval
);
351 case ZPOOL_PROP_DEDUPRATIO
:
353 (void) snprintf(buf
, len
, "%llu.%02llu",
354 (u_longlong_t
)(intval
/ 100),
355 (u_longlong_t
)(intval
% 100));
357 (void) snprintf(buf
, len
, "%llu.%02llux",
358 (u_longlong_t
)(intval
/ 100),
359 (u_longlong_t
)(intval
% 100));
362 case ZPOOL_PROP_HEALTH
:
363 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
364 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
365 verify(nvlist_lookup_uint64_array(nvroot
,
366 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
369 (void) strlcpy(buf
, zpool_state_to_name(intval
,
372 case ZPOOL_PROP_VERSION
:
373 if (intval
>= SPA_VERSION_FEATURES
) {
374 (void) snprintf(buf
, len
, "-");
379 (void) snprintf(buf
, len
, "%llu", (u_longlong_t
)intval
);
383 case PROP_TYPE_INDEX
:
384 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
385 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
388 (void) strlcpy(buf
, strval
, len
);
402 * Check if the bootfs name has the same pool name as it is set to.
403 * Assuming bootfs is a valid dataset name.
406 bootfs_name_valid(const char *pool
, char *bootfs
)
408 int len
= strlen(pool
);
410 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
413 if (strncmp(pool
, bootfs
, len
) == 0 &&
414 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
421 zpool_is_bootable(zpool_handle_t
*zhp
)
423 char bootfs
[ZFS_MAX_DATASET_NAME_LEN
];
425 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
426 sizeof (bootfs
), NULL
, B_FALSE
) == 0 && strncmp(bootfs
, "-",
427 sizeof (bootfs
)) != 0);
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
437 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
438 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
446 struct stat64 statbuf
;
449 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
450 (void) no_memory(hdl
);
455 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
456 const char *propname
= nvpair_name(elem
);
458 prop
= zpool_name_to_prop(propname
);
459 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
461 char *fname
= strchr(propname
, '@') + 1;
463 err
= zfeature_lookup_name(fname
, NULL
);
465 ASSERT3U(err
, ==, ENOENT
);
466 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
467 "invalid feature '%s'"), fname
);
468 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
472 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
473 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
474 "'%s' must be a string"), propname
);
475 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
479 (void) nvpair_value_string(elem
, &strval
);
480 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0 &&
481 strcmp(strval
, ZFS_FEATURE_DISABLED
) != 0) {
482 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
483 "property '%s' can only be set to "
484 "'enabled' or 'disabled'"), propname
);
485 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
489 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
490 (void) no_memory(hdl
);
497 * Make sure this property is valid and applies to this type.
499 if (prop
== ZPROP_INVAL
) {
500 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
501 "invalid property '%s'"), propname
);
502 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
506 if (zpool_prop_readonly(prop
)) {
507 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
508 "is readonly"), propname
);
509 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
513 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
514 &strval
, &intval
, errbuf
) != 0)
518 * Perform additional checking for specific properties.
521 case ZPOOL_PROP_VERSION
:
522 if (intval
< version
||
523 !SPA_VERSION_IS_SUPPORTED(intval
)) {
524 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
525 "property '%s' number %d is invalid."),
527 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
532 case ZPOOL_PROP_ASHIFT
:
534 (intval
< ASHIFT_MIN
|| intval
> ASHIFT_MAX
)) {
535 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
536 "invalid '%s=%d' property: only values "
537 "between %" PRId32
" and %" PRId32
" "
539 propname
, intval
, ASHIFT_MIN
, ASHIFT_MAX
);
540 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
545 case ZPOOL_PROP_BOOTFS
:
546 if (flags
.create
|| flags
.import
) {
547 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
548 "property '%s' cannot be set at creation "
549 "or import time"), propname
);
550 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
554 if (version
< SPA_VERSION_BOOTFS
) {
555 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
556 "pool must be upgraded to support "
557 "'%s' property"), propname
);
558 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
563 * bootfs property value has to be a dataset name and
564 * the dataset has to be in the same pool as it sets to.
566 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
568 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
569 "is an invalid name"), strval
);
570 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
574 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
575 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
576 "could not open pool '%s'"), poolname
);
577 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
583 case ZPOOL_PROP_ALTROOT
:
584 if (!flags
.create
&& !flags
.import
) {
585 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
586 "property '%s' can only be set during pool "
587 "creation or import"), propname
);
588 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
592 if (strval
[0] != '/') {
593 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
594 "bad alternate root '%s'"), strval
);
595 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
600 case ZPOOL_PROP_CACHEFILE
:
601 if (strval
[0] == '\0')
604 if (strcmp(strval
, "none") == 0)
607 if (strval
[0] != '/') {
608 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
609 "property '%s' must be empty, an "
610 "absolute path, or 'none'"), propname
);
611 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
615 slash
= strrchr(strval
, '/');
617 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
618 strcmp(slash
, "/..") == 0) {
619 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
620 "'%s' is not a valid file"), strval
);
621 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
627 if (strval
[0] != '\0' &&
628 (stat64(strval
, &statbuf
) != 0 ||
629 !S_ISDIR(statbuf
.st_mode
))) {
630 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
631 "'%s' is not a valid directory"),
633 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
640 case ZPOOL_PROP_COMMENT
:
641 for (check
= strval
; *check
!= '\0'; check
++) {
642 if (!isprint(*check
)) {
644 dgettext(TEXT_DOMAIN
,
645 "comment may only have printable "
647 (void) zfs_error(hdl
, EZFS_BADPROP
,
652 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
653 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
654 "comment must not exceed %d characters"),
656 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
660 case ZPOOL_PROP_READONLY
:
662 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
663 "property '%s' can only be set at "
664 "import time"), propname
);
665 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
669 case ZPOOL_PROP_TNAME
:
671 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
672 "property '%s' can only be set at "
673 "creation time"), propname
);
674 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
680 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
681 "property '%s'(%d) not defined"), propname
, prop
);
688 nvlist_free(retprops
);
693 * Set zpool property : propname=propval.
696 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
698 zfs_cmd_t zc
= {"\0"};
701 nvlist_t
*nvl
= NULL
;
704 prop_flags_t flags
= { 0 };
706 (void) snprintf(errbuf
, sizeof (errbuf
),
707 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
710 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
711 return (no_memory(zhp
->zpool_hdl
));
713 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
715 return (no_memory(zhp
->zpool_hdl
));
718 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
719 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
720 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
729 * Execute the corresponding ioctl() to set this property.
731 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
733 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
738 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
740 zcmd_free_nvlists(&zc
);
744 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
746 (void) zpool_props_refresh(zhp
);
752 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
754 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
756 char buf
[ZFS_MAXPROPLEN
];
757 nvlist_t
*features
= NULL
;
760 boolean_t firstexpand
= (NULL
== *plp
);
763 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
767 while (*last
!= NULL
)
768 last
= &(*last
)->pl_next
;
771 features
= zpool_get_features(zhp
);
773 if ((*plp
)->pl_all
&& firstexpand
) {
774 for (i
= 0; i
< SPA_FEATURES
; i
++) {
775 zprop_list_t
*entry
= zfs_alloc(hdl
,
776 sizeof (zprop_list_t
));
777 entry
->pl_prop
= ZPROP_INVAL
;
778 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
779 spa_feature_table
[i
].fi_uname
);
780 entry
->pl_width
= strlen(entry
->pl_user_prop
);
781 entry
->pl_all
= B_TRUE
;
784 last
= &entry
->pl_next
;
788 /* add any unsupported features */
789 for (nvp
= nvlist_next_nvpair(features
, NULL
);
790 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
795 if (zfeature_is_supported(nvpair_name(nvp
)))
798 propname
= zfs_asprintf(hdl
, "unsupported@%s",
802 * Before adding the property to the list make sure that no
803 * other pool already added the same property.
807 while (entry
!= NULL
) {
808 if (entry
->pl_user_prop
!= NULL
&&
809 strcmp(propname
, entry
->pl_user_prop
) == 0) {
813 entry
= entry
->pl_next
;
820 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
821 entry
->pl_prop
= ZPROP_INVAL
;
822 entry
->pl_user_prop
= propname
;
823 entry
->pl_width
= strlen(entry
->pl_user_prop
);
824 entry
->pl_all
= B_TRUE
;
827 last
= &entry
->pl_next
;
830 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
835 if (entry
->pl_prop
!= ZPROP_INVAL
&&
836 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
837 NULL
, B_FALSE
) == 0) {
838 if (strlen(buf
) > entry
->pl_width
)
839 entry
->pl_width
= strlen(buf
);
847 * Get the state for the given feature on the given ZFS pool.
850 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
854 boolean_t found
= B_FALSE
;
855 nvlist_t
*features
= zpool_get_features(zhp
);
857 const char *feature
= strchr(propname
, '@') + 1;
859 supported
= zpool_prop_feature(propname
);
860 ASSERT(supported
|| zpool_prop_unsupported(propname
));
863 * Convert from feature name to feature guid. This conversion is
864 * unnecessary for unsupported@... properties because they already
871 ret
= zfeature_lookup_name(feature
, &fid
);
873 (void) strlcpy(buf
, "-", len
);
876 feature
= spa_feature_table
[fid
].fi_guid
;
879 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
884 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
887 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
889 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
894 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
896 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
899 (void) strlcpy(buf
, "-", len
);
908 * Don't start the slice at the default block of 34; many storage
909 * devices will use a stripe width of 128k, other vendors prefer a 1m
910 * alignment. It is best to play it safe and ensure a 1m alignment
911 * given 512B blocks. When the block size is larger by a power of 2
912 * we will still be 1m aligned. Some devices are sensitive to the
913 * partition ending alignment as well.
915 #define NEW_START_BLOCK 2048
916 #define PARTITION_END_ALIGNMENT 2048
919 * Validate the given pool name, optionally putting an extended error message in
923 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
929 ret
= pool_namecheck(pool
, &why
, &what
);
932 * The rules for reserved pool names were extended at a later point.
933 * But we need to support users with existing pools that may now be
934 * invalid. So we only check for this expanded set of names during a
935 * create (or import), and only in userland.
937 if (ret
== 0 && !isopen
&&
938 (strncmp(pool
, "mirror", 6) == 0 ||
939 strncmp(pool
, "raidz", 5) == 0 ||
940 strncmp(pool
, "spare", 5) == 0 ||
941 strcmp(pool
, "log") == 0)) {
944 dgettext(TEXT_DOMAIN
, "name is reserved"));
952 case NAME_ERR_TOOLONG
:
954 dgettext(TEXT_DOMAIN
, "name is too long"));
957 case NAME_ERR_INVALCHAR
:
959 dgettext(TEXT_DOMAIN
, "invalid character "
960 "'%c' in pool name"), what
);
963 case NAME_ERR_NOLETTER
:
964 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
965 "name must begin with a letter"));
968 case NAME_ERR_RESERVED
:
969 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
970 "name is reserved"));
973 case NAME_ERR_DISKLIKE
:
974 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
975 "pool name is reserved"));
978 case NAME_ERR_LEADING_SLASH
:
979 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
980 "leading slash in name"));
983 case NAME_ERR_EMPTY_COMPONENT
:
984 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
985 "empty component in name"));
988 case NAME_ERR_TRAILING_SLASH
:
989 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
990 "trailing slash in name"));
993 case NAME_ERR_MULTIPLE_DELIMITERS
:
994 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
995 "multiple '@' and/or '#' delimiters in "
1000 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1001 "permission set is missing '@'"));
1005 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1006 "(%d) not defined"), why
);
1017 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1021 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
1023 zpool_handle_t
*zhp
;
1027 * Make sure the pool name is valid.
1029 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
1030 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1031 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
1036 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1039 zhp
->zpool_hdl
= hdl
;
1040 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1042 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1048 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1049 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1050 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1059 * Like the above, but silent on error. Used when iterating over pools (because
1060 * the configuration cache may be out of date).
1063 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1065 zpool_handle_t
*zhp
;
1068 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1071 zhp
->zpool_hdl
= hdl
;
1072 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1074 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1090 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1094 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1096 zpool_handle_t
*zhp
;
1098 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1101 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1102 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1103 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1112 * Close the handle. Simply frees the memory associated with the handle.
1115 zpool_close(zpool_handle_t
*zhp
)
1117 nvlist_free(zhp
->zpool_config
);
1118 nvlist_free(zhp
->zpool_old_config
);
1119 nvlist_free(zhp
->zpool_props
);
1124 * Return the name of the pool.
1127 zpool_get_name(zpool_handle_t
*zhp
)
1129 return (zhp
->zpool_name
);
1134 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1137 zpool_get_state(zpool_handle_t
*zhp
)
1139 return (zhp
->zpool_state
);
1143 * Create the named pool, using the provided vdev list. It is assumed
1144 * that the consumer has already validated the contents of the nvlist, so we
1145 * don't have to worry about error semantics.
1148 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1149 nvlist_t
*props
, nvlist_t
*fsprops
)
1151 zfs_cmd_t zc
= {"\0"};
1152 nvlist_t
*zc_fsprops
= NULL
;
1153 nvlist_t
*zc_props
= NULL
;
1157 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1158 "cannot create '%s'"), pool
);
1160 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1161 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1163 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1167 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1169 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1170 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1179 zoned
= ((nvlist_lookup_string(fsprops
,
1180 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1181 strcmp(zonestr
, "on") == 0);
1183 if ((zc_fsprops
= zfs_valid_proplist(hdl
, ZFS_TYPE_FILESYSTEM
,
1184 fsprops
, zoned
, NULL
, NULL
, msg
)) == NULL
) {
1188 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1191 if (nvlist_add_nvlist(zc_props
,
1192 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1197 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1200 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1202 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1204 zcmd_free_nvlists(&zc
);
1205 nvlist_free(zc_props
);
1206 nvlist_free(zc_fsprops
);
1211 * This can happen if the user has specified the same
1212 * device multiple times. We can't reliably detect this
1213 * until we try to add it and see we already have a
1214 * label. This can also happen under if the device is
1215 * part of an active md or lvm device.
1217 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1218 "one or more vdevs refer to the same device, or "
1219 "one of\nthe devices is part of an active md or "
1221 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1225 * This happens if the record size is smaller or larger
1226 * than the allowed size range, or not a power of 2.
1228 * NOTE: although zfs_valid_proplist is called earlier,
1229 * this case may have slipped through since the
1230 * pool does not exist yet and it is therefore
1231 * impossible to read properties e.g. max blocksize
1234 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1235 "record size invalid"));
1236 return (zfs_error(hdl
, EZFS_BADPROP
, msg
));
1240 * This occurs when one of the devices is below
1241 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1242 * device was the problem device since there's no
1243 * reliable way to determine device size from userland.
1248 zfs_nicebytes(SPA_MINDEVSIZE
, buf
,
1251 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1252 "one or more devices is less than the "
1253 "minimum size (%s)"), buf
);
1255 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1258 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1259 "one or more devices is out of space"));
1260 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1263 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1264 "cache device must be a disk or disk slice"));
1265 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1268 return (zpool_standard_error(hdl
, errno
, msg
));
1273 zcmd_free_nvlists(&zc
);
1274 nvlist_free(zc_props
);
1275 nvlist_free(zc_fsprops
);
1280 * Destroy the given pool. It is up to the caller to ensure that there are no
1281 * datasets left in the pool.
1284 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1286 zfs_cmd_t zc
= {"\0"};
1287 zfs_handle_t
*zfp
= NULL
;
1288 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1291 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1292 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1295 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1296 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1298 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1299 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1300 "cannot destroy '%s'"), zhp
->zpool_name
);
1302 if (errno
== EROFS
) {
1303 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1304 "one or more devices is read only"));
1305 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1307 (void) zpool_standard_error(hdl
, errno
, msg
);
1316 remove_mountpoint(zfp
);
1324 * Add the given vdevs to the pool. The caller must have already performed the
1325 * necessary verification to ensure that the vdev specification is well-formed.
1328 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1330 zfs_cmd_t zc
= {"\0"};
1332 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1334 nvlist_t
**spares
, **l2cache
;
1335 uint_t nspares
, nl2cache
;
1337 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1338 "cannot add to '%s'"), zhp
->zpool_name
);
1340 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1341 SPA_VERSION_SPARES
&&
1342 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1343 &spares
, &nspares
) == 0) {
1344 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1345 "upgraded to add hot spares"));
1346 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1349 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1350 SPA_VERSION_L2CACHE
&&
1351 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1352 &l2cache
, &nl2cache
) == 0) {
1353 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1354 "upgraded to add cache devices"));
1355 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1358 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1360 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1362 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1366 * This can happen if the user has specified the same
1367 * device multiple times. We can't reliably detect this
1368 * until we try to add it and see we already have a
1371 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1372 "one or more vdevs refer to the same device"));
1373 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1378 * This occurrs when one of the devices is below
1379 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1380 * device was the problem device since there's no
1381 * reliable way to determine device size from userland.
1386 zfs_nicebytes(SPA_MINDEVSIZE
, buf
,
1389 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1390 "device is less than the minimum "
1393 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1397 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1398 "pool must be upgraded to add these vdevs"));
1399 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1403 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1404 "cache device must be a disk or disk slice"));
1405 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1409 (void) zpool_standard_error(hdl
, errno
, msg
);
1417 zcmd_free_nvlists(&zc
);
1423 * Exports the pool from the system. The caller must ensure that there are no
1424 * mounted datasets in the pool.
1427 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1428 const char *log_str
)
1430 zfs_cmd_t zc
= {"\0"};
1433 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1434 "cannot export '%s'"), zhp
->zpool_name
);
1436 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1437 zc
.zc_cookie
= force
;
1438 zc
.zc_guid
= hardforce
;
1439 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1441 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1444 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1445 "use '-f' to override the following errors:\n"
1446 "'%s' has an active shared spare which could be"
1447 " used by other pools once '%s' is exported."),
1448 zhp
->zpool_name
, zhp
->zpool_name
);
1449 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1452 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1461 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1463 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1467 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1469 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1473 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1476 nvlist_t
*nv
= NULL
;
1482 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1485 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1486 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1490 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1492 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1494 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1495 strftime(timestr
, 128, "%c", &t
) != 0) {
1497 (void) printf(dgettext(TEXT_DOMAIN
,
1498 "Would be able to return %s "
1499 "to its state as of %s.\n"),
1502 (void) printf(dgettext(TEXT_DOMAIN
,
1503 "Pool %s returned to its state as of %s.\n"),
1507 (void) printf(dgettext(TEXT_DOMAIN
,
1508 "%s approximately %lld "),
1509 dryrun
? "Would discard" : "Discarded",
1510 ((longlong_t
)loss
+ 30) / 60);
1511 (void) printf(dgettext(TEXT_DOMAIN
,
1512 "minutes of transactions.\n"));
1513 } else if (loss
> 0) {
1514 (void) printf(dgettext(TEXT_DOMAIN
,
1515 "%s approximately %lld "),
1516 dryrun
? "Would discard" : "Discarded",
1518 (void) printf(dgettext(TEXT_DOMAIN
,
1519 "seconds of transactions.\n"));
1525 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1528 nvlist_t
*nv
= NULL
;
1530 uint64_t edata
= UINT64_MAX
;
1535 if (!hdl
->libzfs_printerr
)
1539 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1541 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1543 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1544 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1545 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1546 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1549 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1550 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1553 (void) printf(dgettext(TEXT_DOMAIN
,
1554 "Recovery is possible, but will result in some data loss.\n"));
1556 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1557 strftime(timestr
, 128, "%c", &t
) != 0) {
1558 (void) printf(dgettext(TEXT_DOMAIN
,
1559 "\tReturning the pool to its state as of %s\n"
1560 "\tshould correct the problem. "),
1563 (void) printf(dgettext(TEXT_DOMAIN
,
1564 "\tReverting the pool to an earlier state "
1565 "should correct the problem.\n\t"));
1569 (void) printf(dgettext(TEXT_DOMAIN
,
1570 "Approximately %lld minutes of data\n"
1571 "\tmust be discarded, irreversibly. "),
1572 ((longlong_t
)loss
+ 30) / 60);
1573 } else if (loss
> 0) {
1574 (void) printf(dgettext(TEXT_DOMAIN
,
1575 "Approximately %lld seconds of data\n"
1576 "\tmust be discarded, irreversibly. "),
1579 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1581 (void) printf(dgettext(TEXT_DOMAIN
,
1582 "After rewind, at least\n"
1583 "\tone persistent user-data error will remain. "));
1585 (void) printf(dgettext(TEXT_DOMAIN
,
1586 "After rewind, several\n"
1587 "\tpersistent user-data errors will remain. "));
1590 (void) printf(dgettext(TEXT_DOMAIN
,
1591 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1592 reason
>= 0 ? "clear" : "import", name
);
1594 (void) printf(dgettext(TEXT_DOMAIN
,
1595 "A scrub of the pool\n"
1596 "\tis strongly recommended after recovery.\n"));
1600 (void) printf(dgettext(TEXT_DOMAIN
,
1601 "Destroy and re-create the pool from\n\ta backup source.\n"));
1605 * zpool_import() is a contracted interface. Should be kept the same
1608 * Applications should use zpool_import_props() to import a pool with
1609 * new properties value to be set.
1612 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1615 nvlist_t
*props
= NULL
;
1618 if (altroot
!= NULL
) {
1619 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1620 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1621 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1625 if (nvlist_add_string(props
,
1626 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1627 nvlist_add_string(props
,
1628 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1630 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1631 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1636 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1643 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1649 uint64_t is_log
= 0;
1651 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1655 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1656 is_log
? " [log]" : "");
1658 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1659 &child
, &children
) != 0)
1662 for (c
= 0; c
< children
; c
++) {
1663 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], VDEV_NAME_TYPE_ID
);
1664 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1670 zpool_print_unsup_feat(nvlist_t
*config
)
1672 nvlist_t
*nvinfo
, *unsup_feat
;
1675 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1677 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1680 for (nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1681 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1684 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1685 verify(nvpair_value_string(nvp
, &desc
) == 0);
1687 if (strlen(desc
) > 0)
1688 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1690 (void) printf("\t%s\n", nvpair_name(nvp
));
1695 * Import the given pool using the known configuration and a list of
1696 * properties to be set. The configuration should have come from
1697 * zpool_find_import(). The 'newname' parameters control whether the pool
1698 * is imported with a different name.
1701 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1702 nvlist_t
*props
, int flags
)
1704 zfs_cmd_t zc
= {"\0"};
1705 zpool_rewind_policy_t policy
;
1706 nvlist_t
*nv
= NULL
;
1707 nvlist_t
*nvinfo
= NULL
;
1708 nvlist_t
*missing
= NULL
;
1715 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1718 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1719 "cannot import pool '%s'"), origname
);
1721 if (newname
!= NULL
) {
1722 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1723 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1724 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1726 thename
= (char *)newname
;
1731 if (props
!= NULL
) {
1733 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1735 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1738 if ((props
= zpool_valid_proplist(hdl
, origname
,
1739 props
, version
, flags
, errbuf
)) == NULL
)
1741 if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1748 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1750 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1753 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1754 zcmd_free_nvlists(&zc
);
1757 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1758 zcmd_free_nvlists(&zc
);
1762 zc
.zc_cookie
= flags
;
1763 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1765 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1766 zcmd_free_nvlists(&zc
);
1773 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1775 zcmd_free_nvlists(&zc
);
1777 zpool_get_rewind_policy(config
, &policy
);
1783 * Dry-run failed, but we print out what success
1784 * looks like if we found a best txg
1786 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1787 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1793 if (newname
== NULL
)
1794 (void) snprintf(desc
, sizeof (desc
),
1795 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1798 (void) snprintf(desc
, sizeof (desc
),
1799 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1804 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1805 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1806 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1807 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1808 "pool uses the following feature(s) not "
1809 "supported by this system:\n"));
1810 zpool_print_unsup_feat(nv
);
1811 if (nvlist_exists(nvinfo
,
1812 ZPOOL_CONFIG_CAN_RDONLY
)) {
1813 (void) printf(dgettext(TEXT_DOMAIN
,
1814 "All unsupported features are only "
1815 "required for writing to the pool."
1816 "\nThe pool can be imported using "
1817 "'-o readonly=on'.\n"));
1821 * Unsupported version.
1823 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1827 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1831 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1832 "one or more devices is read only"));
1833 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1837 if (nv
&& nvlist_lookup_nvlist(nv
,
1838 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1839 nvlist_lookup_nvlist(nvinfo
,
1840 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1841 (void) printf(dgettext(TEXT_DOMAIN
,
1842 "The devices below are missing, use "
1843 "'-m' to import the pool anyway:\n"));
1844 print_vdev_tree(hdl
, NULL
, missing
, 2);
1845 (void) printf("\n");
1847 (void) zpool_standard_error(hdl
, error
, desc
);
1851 (void) zpool_standard_error(hdl
, error
, desc
);
1855 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1856 "one or more devices are already in use\n"));
1857 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1860 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1861 "new name of at least one dataset is longer than "
1862 "the maximum allowable length"));
1863 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, desc
);
1866 (void) zpool_standard_error(hdl
, error
, desc
);
1867 zpool_explain_recover(hdl
,
1868 newname
? origname
: thename
, -error
, nv
);
1875 zpool_handle_t
*zhp
;
1878 * This should never fail, but play it safe anyway.
1880 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1882 else if (zhp
!= NULL
)
1884 if (policy
.zrp_request
&
1885 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1886 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1887 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1900 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1902 zfs_cmd_t zc
= {"\0"};
1904 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1906 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1907 zc
.zc_cookie
= func
;
1909 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1910 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1913 if (func
== POOL_SCAN_SCRUB
) {
1914 (void) snprintf(msg
, sizeof (msg
),
1915 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1916 } else if (func
== POOL_SCAN_NONE
) {
1917 (void) snprintf(msg
, sizeof (msg
),
1918 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1921 assert(!"unexpected result");
1924 if (errno
== EBUSY
) {
1926 pool_scan_stat_t
*ps
= NULL
;
1929 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1930 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1931 (void) nvlist_lookup_uint64_array(nvroot
,
1932 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1933 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1934 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1936 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1937 } else if (errno
== ENOENT
) {
1938 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1940 return (zpool_standard_error(hdl
, errno
, msg
));
1945 * Find a vdev that matches the search criteria specified. We use the
1946 * the nvpair name to determine how we should look for the device.
1947 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1948 * spare; but FALSE if its an INUSE spare.
1951 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1952 boolean_t
*l2cache
, boolean_t
*log
)
1959 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1961 /* Nothing to look for */
1962 if (search
== NULL
|| pair
== NULL
)
1965 /* Obtain the key we will use to search */
1966 srchkey
= nvpair_name(pair
);
1968 switch (nvpair_type(pair
)) {
1969 case DATA_TYPE_UINT64
:
1970 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1971 uint64_t srchval
, theguid
;
1973 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1974 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1976 if (theguid
== srchval
)
1981 case DATA_TYPE_STRING
: {
1982 char *srchval
, *val
;
1984 verify(nvpair_value_string(pair
, &srchval
) == 0);
1985 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
1989 * Search for the requested value. Special cases:
1991 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1992 * "-part1", or "p1". The suffix is hidden from the user,
1993 * but included in the string, so this matches around it.
1994 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1995 * is used to check all possible expanded paths.
1996 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1998 * Otherwise, all other searches are simple string compares.
2000 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0) {
2001 uint64_t wholedisk
= 0;
2003 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
2005 if (zfs_strcmp_pathname(srchval
, val
, wholedisk
) == 0)
2008 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2009 char *type
, *idx
, *end
, *p
;
2010 uint64_t id
, vdev_id
;
2013 * Determine our vdev type, keeping in mind
2014 * that the srchval is composed of a type and
2015 * vdev id pair (i.e. mirror-4).
2017 if ((type
= strdup(srchval
)) == NULL
)
2020 if ((p
= strrchr(type
, '-')) == NULL
) {
2028 * If the types don't match then keep looking.
2030 if (strncmp(val
, type
, strlen(val
)) != 0) {
2035 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2036 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2037 strncmp(type
, VDEV_TYPE_MIRROR
,
2038 strlen(VDEV_TYPE_MIRROR
)) == 0);
2039 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2043 vdev_id
= strtoull(idx
, &end
, 10);
2050 * Now verify that we have the correct vdev id.
2059 if (strcmp(srchval
, val
) == 0)
2068 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2069 &child
, &children
) != 0)
2072 for (c
= 0; c
< children
; c
++) {
2073 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2074 avail_spare
, l2cache
, NULL
)) != NULL
) {
2076 * The 'is_log' value is only set for the toplevel
2077 * vdev, not the leaf vdevs. So we always lookup the
2078 * log device from the root of the vdev tree (where
2079 * 'log' is non-NULL).
2082 nvlist_lookup_uint64(child
[c
],
2083 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2091 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2092 &child
, &children
) == 0) {
2093 for (c
= 0; c
< children
; c
++) {
2094 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2095 avail_spare
, l2cache
, NULL
)) != NULL
) {
2096 *avail_spare
= B_TRUE
;
2102 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2103 &child
, &children
) == 0) {
2104 for (c
= 0; c
< children
; c
++) {
2105 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2106 avail_spare
, l2cache
, NULL
)) != NULL
) {
2117 * Given a physical path (minus the "/devices" prefix), find the
2121 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2122 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2124 nvlist_t
*search
, *nvroot
, *ret
;
2126 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2127 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2129 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2132 *avail_spare
= B_FALSE
;
2136 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2137 nvlist_free(search
);
2143 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2146 zpool_vdev_is_interior(const char *name
)
2148 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2149 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2155 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2156 boolean_t
*l2cache
, boolean_t
*log
)
2159 nvlist_t
*nvroot
, *search
, *ret
;
2162 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2164 guid
= strtoull(path
, &end
, 0);
2165 if (guid
!= 0 && *end
== '\0') {
2166 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2167 } else if (zpool_vdev_is_interior(path
)) {
2168 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2170 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2173 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2176 *avail_spare
= B_FALSE
;
2180 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2181 nvlist_free(search
);
2187 vdev_online(nvlist_t
*nv
)
2191 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2192 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2193 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2200 * Helper function for zpool_get_physpaths().
2203 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2204 size_t *bytes_written
)
2206 size_t bytes_left
, pos
, rsz
;
2210 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2212 return (EZFS_NODEVICE
);
2214 pos
= *bytes_written
;
2215 bytes_left
= physpath_size
- pos
;
2216 format
= (pos
== 0) ? "%s" : " %s";
2218 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2219 *bytes_written
+= rsz
;
2221 if (rsz
>= bytes_left
) {
2222 /* if physpath was not copied properly, clear it */
2223 if (bytes_left
!= 0) {
2226 return (EZFS_NOSPC
);
2232 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2233 size_t *rsz
, boolean_t is_spare
)
2238 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2239 return (EZFS_INVALCONFIG
);
2241 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2243 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2244 * For a spare vdev, we only want to boot from the active
2249 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2252 return (EZFS_INVALCONFIG
);
2255 if (vdev_online(nv
)) {
2256 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2257 phypath_size
, rsz
)) != 0)
2260 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2261 strcmp(type
, VDEV_TYPE_RAIDZ
) == 0 ||
2262 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2263 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2268 if (nvlist_lookup_nvlist_array(nv
,
2269 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2270 return (EZFS_INVALCONFIG
);
2272 for (i
= 0; i
< count
; i
++) {
2273 ret
= vdev_get_physpaths(child
[i
], physpath
,
2274 phypath_size
, rsz
, is_spare
);
2275 if (ret
== EZFS_NOSPC
)
2280 return (EZFS_POOL_INVALARG
);
2284 * Get phys_path for a root pool config.
2285 * Return 0 on success; non-zero on failure.
2288 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2291 nvlist_t
*vdev_root
;
2298 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2300 return (EZFS_INVALCONFIG
);
2302 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2303 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2304 &child
, &count
) != 0)
2305 return (EZFS_INVALCONFIG
);
2308 * root pool can only have a single top-level vdev.
2310 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1)
2311 return (EZFS_POOL_INVALARG
);
2313 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2316 /* No online devices */
2318 return (EZFS_NODEVICE
);
2324 * Get phys_path for a root pool
2325 * Return 0 on success; non-zero on failure.
2328 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2330 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2335 * If the device has being dynamically expanded then we need to relabel
2336 * the disk to use the new unallocated space.
2339 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *path
, const char *msg
)
2343 if ((fd
= open(path
, O_RDWR
|O_DIRECT
)) < 0) {
2344 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2345 "relabel '%s': unable to open device: %d"), path
, errno
);
2346 return (zfs_error(hdl
, EZFS_OPENFAILED
, msg
));
2350 * It's possible that we might encounter an error if the device
2351 * does not have any unallocated space left. If so, we simply
2352 * ignore that error and continue on.
2354 * Also, we don't call efi_rescan() - that would just return EBUSY.
2355 * The module will do it for us in vdev_disk_open().
2357 error
= efi_use_whole_disk(fd
);
2359 /* Flush the buffers to disk and invalidate the page cache. */
2361 (void) ioctl(fd
, BLKFLSBUF
);
2364 if (error
&& error
!= VT_ENOSPC
) {
2365 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2366 "relabel '%s': unable to read disk capacity"), path
);
2367 return (zfs_error(hdl
, EZFS_NOCAP
, msg
));
2374 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2376 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2377 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2381 zpool_vdev_path_to_guid_impl(zpool_handle_t
*zhp
, const char *path
,
2382 boolean_t
*is_spare
, boolean_t
*is_l2cache
, boolean_t
*is_log
)
2385 boolean_t spare
= B_FALSE
, l2cache
= B_FALSE
, log
= B_FALSE
;
2388 if ((tgt
= zpool_find_vdev(zhp
, path
, &spare
, &l2cache
,
2392 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &guid
) == 0);
2393 if (is_spare
!= NULL
)
2395 if (is_l2cache
!= NULL
)
2396 *is_l2cache
= l2cache
;
2403 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2405 zpool_vdev_path_to_guid(zpool_handle_t
*zhp
, const char *path
)
2407 return (zpool_vdev_path_to_guid_impl(zhp
, path
, NULL
, NULL
, NULL
));
2411 * Bring the specified vdev online. The 'flags' parameter is a set of the
2412 * ZFS_ONLINE_* flags.
2415 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2416 vdev_state_t
*newstate
)
2418 zfs_cmd_t zc
= {"\0"};
2421 boolean_t avail_spare
, l2cache
, islog
;
2422 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2425 if (flags
& ZFS_ONLINE_EXPAND
) {
2426 (void) snprintf(msg
, sizeof (msg
),
2427 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2429 (void) snprintf(msg
, sizeof (msg
),
2430 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2433 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2434 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2436 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2438 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2441 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2443 if (flags
& ZFS_ONLINE_EXPAND
||
2444 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2445 uint64_t wholedisk
= 0;
2447 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2451 * XXX - L2ARC 1.0 devices can't support expansion.
2454 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2455 "cannot expand cache devices"));
2456 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2460 const char *fullpath
= path
;
2461 char buf
[MAXPATHLEN
];
2463 if (path
[0] != '/') {
2464 error
= zfs_resolve_shortname(path
, buf
,
2467 return (zfs_error(hdl
, EZFS_NODEVICE
,
2473 error
= zpool_relabel_disk(hdl
, fullpath
, msg
);
2479 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2482 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2483 if (errno
== EINVAL
) {
2484 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2485 "from this pool into a new one. Use '%s' "
2486 "instead"), "zpool detach");
2487 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2489 return (zpool_standard_error(hdl
, errno
, msg
));
2492 *newstate
= zc
.zc_cookie
;
2497 * Take the specified vdev offline
2500 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2502 zfs_cmd_t zc
= {"\0"};
2505 boolean_t avail_spare
, l2cache
;
2506 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2508 (void) snprintf(msg
, sizeof (msg
),
2509 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2511 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2512 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2514 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2516 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2519 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2521 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2522 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2524 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2531 * There are no other replicas of this device.
2533 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2537 * The log device has unplayed logs
2539 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2542 return (zpool_standard_error(hdl
, errno
, msg
));
2547 * Mark the given vdev faulted.
2550 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2552 zfs_cmd_t zc
= {"\0"};
2554 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2556 (void) snprintf(msg
, sizeof (msg
),
2557 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), (u_longlong_t
)guid
);
2559 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2561 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2564 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2571 * There are no other replicas of this device.
2573 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2576 return (zpool_standard_error(hdl
, errno
, msg
));
2582 * Mark the given vdev degraded.
2585 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2587 zfs_cmd_t zc
= {"\0"};
2589 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2591 (void) snprintf(msg
, sizeof (msg
),
2592 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), (u_longlong_t
)guid
);
2594 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2596 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2599 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2602 return (zpool_standard_error(hdl
, errno
, msg
));
2606 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2610 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2616 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2618 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2621 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2622 children
== 2 && child
[which
] == tgt
)
2625 for (c
= 0; c
< children
; c
++)
2626 if (is_replacing_spare(child
[c
], tgt
, which
))
2634 * Attach new_disk (fully described by nvroot) to old_disk.
2635 * If 'replacing' is specified, the new disk will replace the old one.
2638 zpool_vdev_attach(zpool_handle_t
*zhp
,
2639 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2641 zfs_cmd_t zc
= {"\0"};
2645 boolean_t avail_spare
, l2cache
, islog
;
2650 nvlist_t
*config_root
;
2651 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2652 boolean_t rootpool
= zpool_is_bootable(zhp
);
2655 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2656 "cannot replace %s with %s"), old_disk
, new_disk
);
2658 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2659 "cannot attach %s to %s"), new_disk
, old_disk
);
2661 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2662 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2664 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2667 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2670 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2672 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2673 zc
.zc_cookie
= replacing
;
2675 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2676 &child
, &children
) != 0 || children
!= 1) {
2677 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2678 "new device must be a single disk"));
2679 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2682 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2683 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2685 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], 0)) == NULL
)
2689 * If the target is a hot spare that has been swapped in, we can only
2690 * replace it with another hot spare.
2693 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2694 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2695 NULL
) == NULL
|| !avail_spare
) &&
2696 is_replacing_spare(config_root
, tgt
, 1)) {
2697 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2698 "can only be replaced by another hot spare"));
2700 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2705 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2708 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2710 zcmd_free_nvlists(&zc
);
2715 * XXX need a better way to prevent user from
2716 * booting up a half-baked vdev.
2718 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2719 "sure to wait until resilver is done "
2720 "before rebooting.\n"));
2728 * Can't attach to or replace this type of vdev.
2731 uint64_t version
= zpool_get_prop_int(zhp
,
2732 ZPOOL_PROP_VERSION
, NULL
);
2735 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2736 "cannot replace a log with a spare"));
2737 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2738 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2739 "already in replacing/spare config; wait "
2740 "for completion or use 'zpool detach'"));
2742 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2743 "cannot replace a replacing device"));
2745 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2746 "can only attach to mirrors and top-level "
2749 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2754 * The new device must be a single disk.
2756 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2757 "new device must be a single disk"));
2758 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2762 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2764 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2769 * The new device is too small.
2771 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2772 "device is too small"));
2773 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2778 * The new device has a different optimal sector size.
2780 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2781 "new device has a different optimal sector size; use the "
2782 "option '-o ashift=N' to override the optimal size"));
2783 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2788 * The resulting top-level vdev spec won't fit in the label.
2790 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2794 (void) zpool_standard_error(hdl
, errno
, msg
);
2801 * Detach the specified device.
2804 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2806 zfs_cmd_t zc
= {"\0"};
2809 boolean_t avail_spare
, l2cache
;
2810 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2812 (void) snprintf(msg
, sizeof (msg
),
2813 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2815 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2816 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2818 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2821 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2824 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2826 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2828 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2835 * Can't detach from this type of vdev.
2837 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2838 "applicable to mirror and replacing vdevs"));
2839 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2844 * There are no other replicas of this device.
2846 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2850 (void) zpool_standard_error(hdl
, errno
, msg
);
2857 * Find a mirror vdev in the source nvlist.
2859 * The mchild array contains a list of disks in one of the top-level mirrors
2860 * of the source pool. The schild array contains a list of disks that the
2861 * user specified on the command line. We loop over the mchild array to
2862 * see if any entry in the schild array matches.
2864 * If a disk in the mchild array is found in the schild array, we return
2865 * the index of that entry. Otherwise we return -1.
2868 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2869 nvlist_t
**schild
, uint_t schildren
)
2873 for (mc
= 0; mc
< mchildren
; mc
++) {
2875 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2878 for (sc
= 0; sc
< schildren
; sc
++) {
2879 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2881 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2897 * Split a mirror pool. If newroot points to null, then a new nvlist
2898 * is generated and it is the responsibility of the caller to free it.
2901 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2902 nvlist_t
*props
, splitflags_t flags
)
2904 zfs_cmd_t zc
= {"\0"};
2906 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2907 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2908 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2909 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2911 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2914 (void) snprintf(msg
, sizeof (msg
),
2915 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2917 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2918 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2920 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2921 (void) fprintf(stderr
, gettext("Internal error: unable to "
2922 "retrieve pool configuration\n"));
2926 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2928 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2931 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2932 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2933 props
, vers
, flags
, msg
)) == NULL
)
2937 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2939 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2940 "Source pool is missing vdev tree"));
2941 nvlist_free(zc_props
);
2945 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2948 if (*newroot
== NULL
||
2949 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2950 &newchild
, &newchildren
) != 0)
2953 for (c
= 0; c
< children
; c
++) {
2954 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2956 nvlist_t
**mchild
, *vdev
;
2961 * Unlike cache & spares, slogs are stored in the
2962 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2964 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2966 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2968 if (is_log
|| is_hole
) {
2970 * Create a hole vdev and put it in the config.
2972 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2974 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2975 VDEV_TYPE_HOLE
) != 0)
2977 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2982 varray
[vcount
++] = vdev
;
2986 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2988 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2989 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2990 "Source pool must be composed only of mirrors\n"));
2991 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2995 verify(nvlist_lookup_nvlist_array(child
[c
],
2996 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2998 /* find or add an entry for this top-level vdev */
2999 if (newchildren
> 0 &&
3000 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
3001 newchild
, newchildren
)) >= 0) {
3002 /* We found a disk that the user specified. */
3003 vdev
= mchild
[entry
];
3006 /* User didn't specify a disk for this vdev. */
3007 vdev
= mchild
[mchildren
- 1];
3010 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
3014 /* did we find every disk the user specified? */
3015 if (found
!= newchildren
) {
3016 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
3017 "include at most one disk from each mirror"));
3018 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
3022 /* Prepare the nvlist for populating. */
3023 if (*newroot
== NULL
) {
3024 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
3027 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
3028 VDEV_TYPE_ROOT
) != 0)
3031 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
3034 /* Add all the children we found */
3035 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
3036 lastlog
== 0 ? vcount
: lastlog
) != 0)
3040 * If we're just doing a dry run, exit now with success.
3043 memory_err
= B_FALSE
;
3048 /* now build up the config list & call the ioctl */
3049 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3052 if (nvlist_add_nvlist(newconfig
,
3053 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3054 nvlist_add_string(newconfig
,
3055 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3056 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3060 * The new pool is automatically part of the namespace unless we
3061 * explicitly export it.
3064 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3065 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3066 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3067 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3069 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3072 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3073 retval
= zpool_standard_error(hdl
, errno
, msg
);
3078 memory_err
= B_FALSE
;
3081 if (varray
!= NULL
) {
3084 for (v
= 0; v
< vcount
; v
++)
3085 nvlist_free(varray
[v
]);
3088 zcmd_free_nvlists(&zc
);
3089 nvlist_free(zc_props
);
3090 nvlist_free(newconfig
);
3092 nvlist_free(*newroot
);
3100 return (no_memory(hdl
));
3106 * Remove the given device. Currently, this is supported only for hot spares,
3107 * cache, and log devices.
3110 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3112 zfs_cmd_t zc
= {"\0"};
3115 boolean_t avail_spare
, l2cache
, islog
;
3116 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3119 (void) snprintf(msg
, sizeof (msg
),
3120 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3122 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3123 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3125 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3127 * XXX - this should just go away.
3129 if (!avail_spare
&& !l2cache
&& !islog
) {
3130 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3131 "only inactive hot spares, cache, "
3132 "or log devices can be removed"));
3133 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3136 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3137 if (islog
&& version
< SPA_VERSION_HOLES
) {
3138 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3139 "pool must be upgrade to support log removal"));
3140 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3143 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3145 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3148 return (zpool_standard_error(hdl
, errno
, msg
));
3152 * Clear the errors for the pool, or the particular device if specified.
3155 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3157 zfs_cmd_t zc
= {"\0"};
3160 zpool_rewind_policy_t policy
;
3161 boolean_t avail_spare
, l2cache
;
3162 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3163 nvlist_t
*nvi
= NULL
;
3167 (void) snprintf(msg
, sizeof (msg
),
3168 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3171 (void) snprintf(msg
, sizeof (msg
),
3172 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3175 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3177 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3178 &l2cache
, NULL
)) == 0)
3179 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3182 * Don't allow error clearing for hot spares. Do allow
3183 * error clearing for l2cache devices.
3186 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3188 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3192 zpool_get_rewind_policy(rewindnvl
, &policy
);
3193 zc
.zc_cookie
= policy
.zrp_request
;
3195 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3198 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3201 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3203 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3204 zcmd_free_nvlists(&zc
);
3209 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3210 errno
!= EPERM
&& errno
!= EACCES
)) {
3211 if (policy
.zrp_request
&
3212 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3213 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3214 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3215 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3219 zcmd_free_nvlists(&zc
);
3223 zcmd_free_nvlists(&zc
);
3224 return (zpool_standard_error(hdl
, errno
, msg
));
3228 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3231 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3233 zfs_cmd_t zc
= {"\0"};
3235 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3237 (void) snprintf(msg
, sizeof (msg
),
3238 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3239 (u_longlong_t
)guid
);
3241 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3243 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3245 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3248 return (zpool_standard_error(hdl
, errno
, msg
));
3252 * Change the GUID for a pool.
3255 zpool_reguid(zpool_handle_t
*zhp
)
3258 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3259 zfs_cmd_t zc
= {"\0"};
3261 (void) snprintf(msg
, sizeof (msg
),
3262 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3264 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3265 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3268 return (zpool_standard_error(hdl
, errno
, msg
));
3275 zpool_reopen(zpool_handle_t
*zhp
)
3277 zfs_cmd_t zc
= {"\0"};
3279 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3281 (void) snprintf(msg
, sizeof (msg
),
3282 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3285 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3286 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3288 return (zpool_standard_error(hdl
, errno
, msg
));
3291 #if defined(__sun__) || defined(__sun)
3293 * Convert from a devid string to a path.
3296 devid_to_path(char *devid_str
)
3301 devid_nmlist_t
*list
= NULL
;
3304 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3307 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3309 devid_str_free(minor
);
3316 * In a case the strdup() fails, we will just return NULL below.
3318 path
= strdup(list
[0].devname
);
3320 devid_free_nmlist(list
);
3326 * Convert from a path to a devid string.
3329 path_to_devid(const char *path
)
3335 if ((fd
= open(path
, O_RDONLY
)) < 0)
3340 if (devid_get(fd
, &devid
) == 0) {
3341 if (devid_get_minor_name(fd
, &minor
) == 0)
3342 ret
= devid_str_encode(devid
, minor
);
3344 devid_str_free(minor
);
3353 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3354 * ignore any failure here, since a common case is for an unprivileged user to
3355 * type 'zpool status', and we'll display the correct information anyway.
3358 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3360 zfs_cmd_t zc
= {"\0"};
3362 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3363 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3364 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3367 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3372 * Remove partition suffix from a vdev path. Partition suffixes may take three
3373 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3374 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3375 * third case only occurs when preceded by a string matching the regular
3376 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3378 * caller must free the returned string
3381 zfs_strip_partition(char *path
)
3383 char *tmp
= strdup(path
);
3384 char *part
= NULL
, *d
= NULL
;
3388 if ((part
= strstr(tmp
, "-part")) && part
!= tmp
) {
3390 } else if ((part
= strrchr(tmp
, 'p')) &&
3391 part
> tmp
+ 1 && isdigit(*(part
-1))) {
3393 } else if ((tmp
[0] == 'h' || tmp
[0] == 's' || tmp
[0] == 'v') &&
3395 for (d
= &tmp
[2]; isalpha(*d
); part
= ++d
) { }
3396 } else if (strncmp("xvd", tmp
, 3) == 0) {
3397 for (d
= &tmp
[3]; isalpha(*d
); part
= ++d
) { }
3399 if (part
&& d
&& *d
!= '\0') {
3400 for (; isdigit(*d
); d
++) { }
3409 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3414 * Returned string must be freed.
3417 zfs_strip_partition_path(char *path
)
3419 char *newpath
= strdup(path
);
3426 /* Point to "sda1" part of "/dev/sda1" */
3427 sd_offset
= strrchr(newpath
, '/') + 1;
3429 /* Get our new name "sda" */
3430 new_sd
= zfs_strip_partition(sd_offset
);
3436 /* Paste the "sda" where "sda1" was */
3437 strlcpy(sd_offset
, new_sd
, strlen(sd_offset
) + 1);
3439 /* Free temporary "sda" */
3445 #define PATH_BUF_LEN 64
3448 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3449 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3450 * We also check if this is a whole disk, in which case we strip off the
3451 * trailing 's0' slice name.
3453 * This routine is also responsible for identifying when disks have been
3454 * reconfigured in a new location. The kernel will have opened the device by
3455 * devid, but the path will still refer to the old location. To catch this, we
3456 * first do a path -> devid translation (which is fast for the common case). If
3457 * the devid matches, we're done. If not, we do a reverse devid -> path
3458 * translation and issue the appropriate ioctl() to update the path of the vdev.
3459 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3463 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3466 char *path
, *type
, *env
;
3468 char buf
[PATH_BUF_LEN
];
3469 char tmpbuf
[PATH_BUF_LEN
];
3471 env
= getenv("ZPOOL_VDEV_NAME_PATH");
3472 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3473 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3474 name_flags
|= VDEV_NAME_PATH
;
3476 env
= getenv("ZPOOL_VDEV_NAME_GUID");
3477 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3478 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3479 name_flags
|= VDEV_NAME_GUID
;
3481 env
= getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3482 if (env
&& (strtoul(env
, NULL
, 0) > 0 ||
3483 !strncasecmp(env
, "YES", 3) || !strncasecmp(env
, "ON", 2)))
3484 name_flags
|= VDEV_NAME_FOLLOW_LINKS
;
3486 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, &value
) == 0 ||
3487 name_flags
& VDEV_NAME_GUID
) {
3488 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
);
3489 (void) snprintf(buf
, sizeof (buf
), "%llu", (u_longlong_t
)value
);
3491 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3492 #if defined(__sun__) || defined(__sun)
3494 * Live VDEV path updates to a kernel VDEV during a
3495 * zpool_vdev_name lookup are not supported on Linux.
3502 * If the device is dead (faulted, offline, etc) then don't
3503 * bother opening it. Otherwise we may be forcing the user to
3504 * open a misbehaving device, which can have undesirable
3507 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3508 (uint64_t **)&vs
, &vsc
) != 0 ||
3509 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3511 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3513 * Determine if the current path is correct.
3515 char *newdevid
= path_to_devid(path
);
3517 if (newdevid
== NULL
||
3518 strcmp(devid
, newdevid
) != 0) {
3521 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3523 * Update the path appropriately.
3525 set_path(zhp
, nv
, newpath
);
3526 if (nvlist_add_string(nv
,
3527 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3528 verify(nvlist_lookup_string(nv
,
3536 devid_str_free(newdevid
);
3540 if (name_flags
& VDEV_NAME_FOLLOW_LINKS
) {
3541 char *rp
= realpath(path
, NULL
);
3543 strlcpy(buf
, rp
, sizeof (buf
));
3550 * For a block device only use the name.
3552 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3553 if ((strcmp(type
, VDEV_TYPE_DISK
) == 0) &&
3554 !(name_flags
& VDEV_NAME_PATH
)) {
3555 path
= strrchr(path
, '/');
3560 * Remove the partition from the path it this is a whole disk.
3562 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
, &value
)
3563 == 0 && value
&& !(name_flags
& VDEV_NAME_PATH
)) {
3564 return (zfs_strip_partition(path
));
3567 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3570 * If it's a raidz device, we need to stick in the parity level.
3572 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3573 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3575 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3576 (u_longlong_t
)value
);
3581 * We identify each top-level vdev by using a <type-id>
3582 * naming convention.
3584 if (name_flags
& VDEV_NAME_TYPE_ID
) {
3586 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3588 (void) snprintf(tmpbuf
, sizeof (tmpbuf
), "%s-%llu",
3589 path
, (u_longlong_t
)id
);
3594 return (zfs_strdup(hdl
, path
));
3598 zbookmark_mem_compare(const void *a
, const void *b
)
3600 return (memcmp(a
, b
, sizeof (zbookmark_phys_t
)));
3604 * Retrieve the persistent error log, uniquify the members, and return to the
3608 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3610 zfs_cmd_t zc
= {"\0"};
3611 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3613 zbookmark_phys_t
*zb
= NULL
;
3617 * Retrieve the raw error list from the kernel. If the number of errors
3618 * has increased, allocate more space and continue until we get the
3621 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3625 zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3626 count
* sizeof (zbookmark_phys_t
));
3627 zc
.zc_nvlist_dst_size
= count
;
3628 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3630 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3632 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3633 if (errno
== ENOMEM
) {
3636 count
= zc
.zc_nvlist_dst_size
;
3637 dst
= zfs_alloc(zhp
->zpool_hdl
, count
*
3638 sizeof (zbookmark_phys_t
));
3639 zc
.zc_nvlist_dst
= (uintptr_t)dst
;
3641 return (zpool_standard_error_fmt(hdl
, errno
,
3642 dgettext(TEXT_DOMAIN
, "errors: List of "
3643 "errors unavailable")));
3651 * Sort the resulting bookmarks. This is a little confusing due to the
3652 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3653 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3654 * _not_ copied as part of the process. So we point the start of our
3655 * array appropriate and decrement the total number of elements.
3657 zb
= ((zbookmark_phys_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3658 zc
.zc_nvlist_dst_size
;
3659 count
-= zc
.zc_nvlist_dst_size
;
3661 qsort(zb
, count
, sizeof (zbookmark_phys_t
), zbookmark_mem_compare
);
3663 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3666 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3668 for (i
= 0; i
< count
; i
++) {
3671 /* ignoring zb_blkid and zb_level for now */
3672 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3673 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3676 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3678 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3679 zb
[i
].zb_objset
) != 0) {
3683 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3684 zb
[i
].zb_object
) != 0) {
3688 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3695 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3699 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3700 return (no_memory(zhp
->zpool_hdl
));
3704 * Upgrade a ZFS pool to the latest on-disk version.
3707 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3709 zfs_cmd_t zc
= {"\0"};
3710 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3712 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3713 zc
.zc_cookie
= new_version
;
3715 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3716 return (zpool_standard_error_fmt(hdl
, errno
,
3717 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3723 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3727 (void) strlcpy(string
, basename(argv
[0]), len
);
3728 for (i
= 1; i
< argc
; i
++) {
3729 (void) strlcat(string
, " ", len
);
3730 (void) strlcat(string
, argv
[i
], len
);
3735 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3737 zfs_cmd_t zc
= {"\0"};
3741 args
= fnvlist_alloc();
3742 fnvlist_add_string(args
, "message", message
);
3743 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3745 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3747 zcmd_free_nvlists(&zc
);
3752 * Perform ioctl to get some command history of a pool.
3754 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3755 * logical offset of the history buffer to start reading from.
3757 * Upon return, 'off' is the next logical offset to read from and
3758 * 'len' is the actual amount of bytes read into 'buf'.
3761 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3763 zfs_cmd_t zc
= {"\0"};
3764 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3766 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3768 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3769 zc
.zc_history_len
= *len
;
3770 zc
.zc_history_offset
= *off
;
3772 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3775 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3776 dgettext(TEXT_DOMAIN
,
3777 "cannot show history for pool '%s'"),
3780 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3781 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3782 "'%s'"), zhp
->zpool_name
));
3784 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3785 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3786 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3788 return (zpool_standard_error_fmt(hdl
, errno
,
3789 dgettext(TEXT_DOMAIN
,
3790 "cannot get history for '%s'"), zhp
->zpool_name
));
3794 *len
= zc
.zc_history_len
;
3795 *off
= zc
.zc_history_offset
;
3801 * Process the buffer of nvlists, unpacking and storing each nvlist record
3802 * into 'records'. 'leftover' is set to the number of bytes that weren't
3803 * processed as there wasn't a complete record.
3806 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3807 nvlist_t
***records
, uint_t
*numrecords
)
3814 while (bytes_read
> sizeof (reclen
)) {
3816 /* get length of packed record (stored as little endian) */
3817 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3818 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3820 if (bytes_read
< sizeof (reclen
) + reclen
)
3824 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3826 bytes_read
-= sizeof (reclen
) + reclen
;
3827 buf
+= sizeof (reclen
) + reclen
;
3829 /* add record to nvlist array */
3831 if (ISP2(*numrecords
+ 1)) {
3832 tmp
= realloc(*records
,
3833 *numrecords
* 2 * sizeof (nvlist_t
*));
3841 (*records
)[*numrecords
- 1] = nv
;
3844 *leftover
= bytes_read
;
3849 * Retrieve the command history of a pool.
3852 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3855 int buflen
= 128 * 1024;
3857 nvlist_t
**records
= NULL
;
3858 uint_t numrecords
= 0;
3861 buf
= malloc(buflen
);
3865 uint64_t bytes_read
= buflen
;
3868 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3871 /* if nothing else was read in, we're at EOF, just return */
3875 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3876 &leftover
, &records
, &numrecords
)) != 0)
3879 if (leftover
== bytes_read
) {
3881 * no progress made, because buffer is not big enough
3882 * to hold this record; resize and retry.
3886 buf
= malloc(buflen
);
3897 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3898 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3899 records
, numrecords
) == 0);
3901 for (i
= 0; i
< numrecords
; i
++)
3902 nvlist_free(records
[i
]);
3909 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3910 * If there is a new event available 'nvp' will contain a newly allocated
3911 * nvlist and 'dropped' will be set to the number of missed events since
3912 * the last call to this function. When 'nvp' is set to NULL it indicates
3913 * no new events are available. In either case the function returns 0 and
3914 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3915 * function will return a non-zero value. When the function is called in
3916 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3917 * it will not return until a new event is available.
3920 zpool_events_next(libzfs_handle_t
*hdl
, nvlist_t
**nvp
,
3921 int *dropped
, unsigned flags
, int zevent_fd
)
3923 zfs_cmd_t zc
= {"\0"};
3928 zc
.zc_cleanup_fd
= zevent_fd
;
3930 if (flags
& ZEVENT_NONBLOCK
)
3931 zc
.zc_guid
= ZEVENT_NONBLOCK
;
3933 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, ZEVENT_SIZE
) != 0)
3937 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_NEXT
, &zc
) != 0) {
3940 error
= zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
3941 dgettext(TEXT_DOMAIN
, "zfs shutdown"));
3944 /* Blocking error case should not occur */
3945 if (!(flags
& ZEVENT_NONBLOCK
))
3946 error
= zpool_standard_error_fmt(hdl
, errno
,
3947 dgettext(TEXT_DOMAIN
, "cannot get event"));
3951 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3952 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
3953 dgettext(TEXT_DOMAIN
, "cannot get event"));
3959 error
= zpool_standard_error_fmt(hdl
, errno
,
3960 dgettext(TEXT_DOMAIN
, "cannot get event"));
3965 error
= zcmd_read_dst_nvlist(hdl
, &zc
, nvp
);
3969 *dropped
= (int)zc
.zc_cookie
;
3971 zcmd_free_nvlists(&zc
);
3980 zpool_events_clear(libzfs_handle_t
*hdl
, int *count
)
3982 zfs_cmd_t zc
= {"\0"};
3985 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
3986 "cannot clear events"));
3988 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_CLEAR
, &zc
) != 0)
3989 return (zpool_standard_error_fmt(hdl
, errno
, msg
));
3992 *count
= (int)zc
.zc_cookie
; /* # of events cleared */
3998 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3999 * the passed zevent_fd file handle. On success zero is returned,
4000 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4003 zpool_events_seek(libzfs_handle_t
*hdl
, uint64_t eid
, int zevent_fd
)
4005 zfs_cmd_t zc
= {"\0"};
4009 zc
.zc_cleanup_fd
= zevent_fd
;
4011 if (zfs_ioctl(hdl
, ZFS_IOC_EVENTS_SEEK
, &zc
) != 0) {
4014 error
= zfs_error_fmt(hdl
, EZFS_NOENT
,
4015 dgettext(TEXT_DOMAIN
, "cannot get event"));
4019 error
= zfs_error_fmt(hdl
, EZFS_NOMEM
,
4020 dgettext(TEXT_DOMAIN
, "cannot get event"));
4024 error
= zpool_standard_error_fmt(hdl
, errno
,
4025 dgettext(TEXT_DOMAIN
, "cannot get event"));
4034 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
4035 char *pathname
, size_t len
)
4037 zfs_cmd_t zc
= {"\0"};
4038 boolean_t mounted
= B_FALSE
;
4039 char *mntpnt
= NULL
;
4040 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
4043 /* special case for the MOS */
4044 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>",
4049 /* get the dataset's name */
4050 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
4052 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
4053 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
4054 /* just write out a path of two object numbers */
4055 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
4056 (longlong_t
)dsobj
, (longlong_t
)obj
);
4059 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
4061 /* find out if the dataset is mounted */
4062 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
4064 /* get the corrupted object's path */
4065 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
4067 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
4070 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
4073 (void) snprintf(pathname
, len
, "%s:%s",
4074 dsname
, zc
.zc_value
);
4077 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
,
4084 * Read the EFI label from the config, if a label does not exist then
4085 * pass back the error to the caller. If the caller has passed a non-NULL
4086 * diskaddr argument then we set it to the starting address of the EFI
4090 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
4094 char diskname
[MAXPATHLEN
];
4097 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
4100 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", DISK_ROOT
,
4101 strrchr(path
, '/'));
4102 if ((fd
= open(diskname
, O_RDONLY
|O_DIRECT
)) >= 0) {
4103 struct dk_gpt
*vtoc
;
4105 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
4107 *sb
= vtoc
->efi_parts
[0].p_start
;
4116 * determine where a partition starts on a disk in the current
4120 find_start_block(nvlist_t
*config
)
4124 diskaddr_t sb
= MAXOFFSET_T
;
4127 if (nvlist_lookup_nvlist_array(config
,
4128 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
4129 if (nvlist_lookup_uint64(config
,
4130 ZPOOL_CONFIG_WHOLE_DISK
,
4131 &wholedisk
) != 0 || !wholedisk
) {
4132 return (MAXOFFSET_T
);
4134 if (read_efi_label(config
, &sb
) < 0)
4139 for (c
= 0; c
< children
; c
++) {
4140 sb
= find_start_block(child
[c
]);
4141 if (sb
!= MAXOFFSET_T
) {
4145 return (MAXOFFSET_T
);
4149 zpool_label_disk_check(char *path
)
4151 struct dk_gpt
*vtoc
;
4154 if ((fd
= open(path
, O_RDONLY
|O_DIRECT
)) < 0)
4157 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) != 0) {
4162 if (vtoc
->efi_flags
& EFI_GPT_PRIMARY_CORRUPT
) {
4174 * Generate a unique partition name for the ZFS member. Partitions must
4175 * have unique names to ensure udev will be able to create symlinks under
4176 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4177 * of the form <pool>-<unique-id>.
4180 zpool_label_name(char *label_name
, int label_size
)
4185 fd
= open("/dev/urandom", O_RDONLY
);
4187 if (read(fd
, &id
, sizeof (id
)) != sizeof (id
))
4194 id
= (((uint64_t)rand()) << 32) | (uint64_t)rand();
4196 snprintf(label_name
, label_size
, "zfs-%016llx", (u_longlong_t
)id
);
4200 * Label an individual disk. The name provided is the short name,
4201 * stripped of any leading /dev path.
4204 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
4206 char path
[MAXPATHLEN
];
4207 struct dk_gpt
*vtoc
;
4209 size_t resv
= EFI_MIN_RESV_SIZE
;
4210 uint64_t slice_size
;
4211 diskaddr_t start_block
;
4214 /* prepare an error message just in case */
4215 (void) snprintf(errbuf
, sizeof (errbuf
),
4216 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
4221 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
4222 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
4224 if (zhp
->zpool_start_block
== 0)
4225 start_block
= find_start_block(nvroot
);
4227 start_block
= zhp
->zpool_start_block
;
4228 zhp
->zpool_start_block
= start_block
;
4231 start_block
= NEW_START_BLOCK
;
4234 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4236 if ((fd
= open(path
, O_RDWR
|O_DIRECT
|O_EXCL
)) < 0) {
4238 * This shouldn't happen. We've long since verified that this
4239 * is a valid device.
4241 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4242 "label '%s': unable to open device: %d"), path
, errno
);
4243 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
4246 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
4248 * The only way this can fail is if we run out of memory, or we
4249 * were unable to read the disk's capacity
4251 if (errno
== ENOMEM
)
4252 (void) no_memory(hdl
);
4255 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
4256 "label '%s': unable to read disk capacity"), path
);
4258 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
4261 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4262 slice_size
-= EFI_MIN_RESV_SIZE
;
4263 if (start_block
== MAXOFFSET_T
)
4264 start_block
= NEW_START_BLOCK
;
4265 slice_size
-= start_block
;
4266 slice_size
= P2ALIGN(slice_size
, PARTITION_END_ALIGNMENT
);
4268 vtoc
->efi_parts
[0].p_start
= start_block
;
4269 vtoc
->efi_parts
[0].p_size
= slice_size
;
4272 * Why we use V_USR: V_BACKUP confuses users, and is considered
4273 * disposable by some EFI utilities (since EFI doesn't have a backup
4274 * slice). V_UNASSIGNED is supposed to be used only for zero size
4275 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4276 * etc. were all pretty specific. V_USR is as close to reality as we
4277 * can get, in the absence of V_OTHER.
4279 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4280 zpool_label_name(vtoc
->efi_parts
[0].p_name
, EFI_PART_NAME_LEN
);
4282 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4283 vtoc
->efi_parts
[8].p_size
= resv
;
4284 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4286 rval
= efi_write(fd
, vtoc
);
4288 /* Flush the buffers to disk and invalidate the page cache. */
4290 (void) ioctl(fd
, BLKFLSBUF
);
4293 rval
= efi_rescan(fd
);
4296 * Some block drivers (like pcata) may not support EFI GPT labels.
4297 * Print out a helpful error message directing the user to manually
4298 * label the disk and give a specific slice.
4304 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "try using "
4305 "parted(8) and then provide a specific slice: %d"), rval
);
4306 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4312 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4313 (void) zfs_append_partition(path
, MAXPATHLEN
);
4315 /* Wait to udev to signal use the device has settled. */
4316 rval
= zpool_label_disk_wait(path
, DISK_LABEL_WAIT
);
4318 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "failed to "
4319 "detect device partitions on '%s': %d"), path
, rval
);
4320 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4323 /* We can't be to paranoid. Read the label back and verify it. */
4324 (void) snprintf(path
, sizeof (path
), "%s/%s", DISK_ROOT
, name
);
4325 rval
= zpool_label_disk_check(path
);
4327 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "freshly written "
4328 "EFI label on '%s' is damaged. Ensure\nthis device "
4329 "is not in in use, and is functioning properly: %d"),
4331 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4338 * Allocate and return the underlying device name for a device mapper device.
4339 * If a device mapper device maps to multiple devices, return the first device.
4341 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4342 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4344 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4345 * device then return NULL.
4347 * NOTE: The returned name string must be *freed*.
4350 dm_get_underlying_path(char *dm_name
)
4360 if (dm_name
== NULL
)
4363 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4364 realp
= realpath(dm_name
, NULL
);
4369 * If they preface 'dev' with a path (like "/dev") then strip it off.
4370 * We just want the 'dm-N' part.
4372 tmp
= strrchr(realp
, '/');
4374 dev_str
= tmp
+ 1; /* +1 since we want the chr after '/' */
4378 size
= asprintf(&tmp
, "/sys/block/%s/slaves/", dev_str
);
4379 if (size
== -1 || !tmp
)
4386 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4387 while ((ep
= readdir(dp
))) {
4388 if (ep
->d_type
!= DT_DIR
) { /* skip "." and ".." dirs */
4389 size
= asprintf(&path
, "/dev/%s", ep
->d_name
);
4403 * Return 1 if device is a device mapper or multipath device.
4407 zfs_dev_is_dm(char *dev_name
)
4411 tmp
= dm_get_underlying_path(dev_name
);
4420 * By "whole disk" we mean an entire physical disk (something we can
4421 * label, toggle the write cache on, etc.) as opposed to the full
4422 * capacity of a pseudo-device such as lofi or did. We act as if we
4423 * are labeling the disk, which should be a pretty good test of whether
4424 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4428 zfs_dev_is_whole_disk(char *dev_name
)
4430 struct dk_gpt
*label
;
4433 if ((fd
= open(dev_name
, O_RDONLY
| O_DIRECT
)) < 0)
4436 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &label
) != 0) {
4448 * Lookup the underlying device for a device name
4450 * Often you'll have a symlink to a device, a partition device,
4451 * or a multipath device, and want to look up the underlying device.
4452 * This function returns the underlying device name. If the device
4453 * name is already the underlying device, then just return the same
4454 * name. If the device is a DM device with multiple underlying devices
4455 * then return the first one.
4459 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4460 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4463 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4464 * dev_name: /dev/mapper/mpatha
4465 * returns: /dev/sda (first device)
4467 * 3. /dev/sda (already the underlying device)
4468 * dev_name: /dev/sda
4471 * 4. /dev/dm-3 (mapped to /dev/sda)
4472 * dev_name: /dev/dm-3
4475 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4476 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4479 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4480 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4483 * Returns underlying device name, or NULL on error or no match.
4485 * NOTE: The returned name string must be *freed*.
4488 zfs_get_underlying_path(char *dev_name
)
4493 if (dev_name
== NULL
)
4496 tmp
= dm_get_underlying_path(dev_name
);
4498 /* dev_name not a DM device, so just un-symlinkize it */
4500 tmp
= realpath(dev_name
, NULL
);
4503 name
= zfs_strip_partition_path(tmp
);
4511 * Given a dev name like "sda", return the full enclosure sysfs path to
4512 * the disk. You can also pass in the name with "/dev" prepended
4513 * to it (like /dev/sda).
4515 * For example, disk "sda" in enclosure slot 1:
4517 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4519 * 'dev' must be a non-devicemapper device.
4521 * Returned string must be freed.
4524 zfs_get_enclosure_sysfs_path(char *dev_name
)
4528 char buf
[MAXPATHLEN
];
4536 if (dev_name
== NULL
)
4539 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4540 tmp1
= strrchr(dev_name
, '/');
4542 dev_name
= tmp1
+ 1; /* +1 since we want the chr after '/' */
4544 tmpsize
= asprintf(&tmp1
, "/sys/block/%s/device", dev_name
);
4545 if (tmpsize
== -1 || tmp1
== NULL
) {
4552 tmp1
= NULL
; /* To make free() at the end a NOP */
4557 * Look though all sysfs entries in /sys/block/<dev>/device for
4558 * the enclosure symlink.
4560 while ((ep
= readdir(dp
))) {
4561 /* Ignore everything that's not our enclosure_device link */
4562 if (strstr(ep
->d_name
, "enclosure_device") == NULL
)
4565 if (asprintf(&tmp2
, "%s/%s", tmp1
, ep
->d_name
) == -1 ||
4569 size
= readlink(tmp2
, buf
, sizeof (buf
));
4571 /* Did readlink fail or crop the link name? */
4572 if (size
== -1 || size
>= sizeof (buf
)) {
4574 tmp2
= NULL
; /* To make free() at the end a NOP */
4579 * We got a valid link. readlink() doesn't terminate strings
4580 * so we have to do it.
4585 * Our link will look like:
4587 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4589 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4591 tmp3
= strstr(buf
, "enclosure");
4595 if (asprintf(&path
, "/sys/class/%s", tmp3
) == -1) {
4596 /* If asprintf() fails, 'path' is undefined */