4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t
*, diskaddr_t
*, boolean_t
*);
53 #define BACKUP_SLICE "s2"
55 typedef struct prop_flags
{
56 int create
:1; /* Validate property on creation */
57 int import
:1; /* Validate property on import */
61 * ====================================================================
62 * zpool property functions
63 * ====================================================================
67 zpool_get_all_props(zpool_handle_t
*zhp
)
70 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
72 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
74 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
77 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
78 if (errno
== ENOMEM
) {
79 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
80 zcmd_free_nvlists(&zc
);
84 zcmd_free_nvlists(&zc
);
89 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
90 zcmd_free_nvlists(&zc
);
94 zcmd_free_nvlists(&zc
);
100 zpool_props_refresh(zpool_handle_t
*zhp
)
104 old_props
= zhp
->zpool_props
;
106 if (zpool_get_all_props(zhp
) != 0)
109 nvlist_free(old_props
);
114 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
120 zprop_source_t source
;
122 nvl
= zhp
->zpool_props
;
123 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
124 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
126 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
128 source
= ZPROP_SRC_DEFAULT
;
129 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
140 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
144 zprop_source_t source
;
146 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
148 * zpool_get_all_props() has most likely failed because
149 * the pool is faulted, but if all we need is the top level
150 * vdev's guid then get it from the zhp config nvlist.
152 if ((prop
== ZPOOL_PROP_GUID
) &&
153 (nvlist_lookup_nvlist(zhp
->zpool_config
,
154 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
155 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
159 return (zpool_prop_default_numeric(prop
));
162 nvl
= zhp
->zpool_props
;
163 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
164 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
166 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
168 source
= ZPROP_SRC_DEFAULT
;
169 value
= zpool_prop_default_numeric(prop
);
179 * Map VDEV STATE to printed strings.
182 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
185 case VDEV_STATE_CLOSED
:
186 case VDEV_STATE_OFFLINE
:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED
:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN
:
191 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
192 return (gettext("FAULTED"));
193 else if (aux
== VDEV_AUX_SPLIT_POOL
)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED
:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED
:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY
:
202 return (gettext("ONLINE"));
208 return (gettext("UNKNOWN"));
212 * Map POOL STATE to printed strings.
215 zpool_pool_state_to_name(pool_state_t state
)
218 case POOL_STATE_ACTIVE
:
219 return (gettext("ACTIVE"));
220 case POOL_STATE_EXPORTED
:
221 return (gettext("EXPORTED"));
222 case POOL_STATE_DESTROYED
:
223 return (gettext("DESTROYED"));
224 case POOL_STATE_SPARE
:
225 return (gettext("SPARE"));
226 case POOL_STATE_L2CACHE
:
227 return (gettext("L2CACHE"));
228 case POOL_STATE_UNINITIALIZED
:
229 return (gettext("UNINITIALIZED"));
230 case POOL_STATE_UNAVAIL
:
231 return (gettext("UNAVAIL"));
232 case POOL_STATE_POTENTIALLY_ACTIVE
:
233 return (gettext("POTENTIALLY_ACTIVE"));
236 return (gettext("UNKNOWN"));
240 * Get a zpool property value for 'prop' and return the value in
241 * a pre-allocated buffer.
244 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
245 zprop_source_t
*srctype
, boolean_t literal
)
249 zprop_source_t src
= ZPROP_SRC_NONE
;
254 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
256 case ZPOOL_PROP_NAME
:
257 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
260 case ZPOOL_PROP_HEALTH
:
261 (void) strlcpy(buf
, "FAULTED", len
);
264 case ZPOOL_PROP_GUID
:
265 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
266 (void) snprintf(buf
, len
, "%llu", intval
);
269 case ZPOOL_PROP_ALTROOT
:
270 case ZPOOL_PROP_CACHEFILE
:
271 case ZPOOL_PROP_COMMENT
:
272 if (zhp
->zpool_props
!= NULL
||
273 zpool_get_all_props(zhp
) == 0) {
275 zpool_get_prop_string(zhp
, prop
, &src
),
281 (void) strlcpy(buf
, "-", len
);
290 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
291 prop
!= ZPOOL_PROP_NAME
)
294 switch (zpool_prop_get_type(prop
)) {
295 case PROP_TYPE_STRING
:
296 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
300 case PROP_TYPE_NUMBER
:
301 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
304 case ZPOOL_PROP_SIZE
:
305 case ZPOOL_PROP_ALLOCATED
:
306 case ZPOOL_PROP_FREE
:
307 case ZPOOL_PROP_FREEING
:
308 case ZPOOL_PROP_LEAKED
:
310 (void) snprintf(buf
, len
, "%llu",
311 (u_longlong_t
)intval
);
313 (void) zfs_nicenum(intval
, buf
, len
);
316 case ZPOOL_PROP_BOOTSIZE
:
317 case ZPOOL_PROP_EXPANDSZ
:
319 (void) strlcpy(buf
, "-", len
);
320 } else if (literal
) {
321 (void) snprintf(buf
, len
, "%llu",
322 (u_longlong_t
)intval
);
324 (void) zfs_nicenum(intval
, buf
, len
);
327 case ZPOOL_PROP_CAPACITY
:
329 (void) snprintf(buf
, len
, "%llu",
330 (u_longlong_t
)intval
);
332 (void) snprintf(buf
, len
, "%llu%%",
333 (u_longlong_t
)intval
);
336 case ZPOOL_PROP_FRAGMENTATION
:
337 if (intval
== UINT64_MAX
) {
338 (void) strlcpy(buf
, "-", len
);
340 (void) snprintf(buf
, len
, "%llu%%",
341 (u_longlong_t
)intval
);
344 case ZPOOL_PROP_DEDUPRATIO
:
345 (void) snprintf(buf
, len
, "%llu.%02llux",
346 (u_longlong_t
)(intval
/ 100),
347 (u_longlong_t
)(intval
% 100));
349 case ZPOOL_PROP_HEALTH
:
350 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
351 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
352 verify(nvlist_lookup_uint64_array(nvroot
,
353 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
356 (void) strlcpy(buf
, zpool_state_to_name(intval
,
359 case ZPOOL_PROP_VERSION
:
360 if (intval
>= SPA_VERSION_FEATURES
) {
361 (void) snprintf(buf
, len
, "-");
366 (void) snprintf(buf
, len
, "%llu", intval
);
370 case PROP_TYPE_INDEX
:
371 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
372 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
375 (void) strlcpy(buf
, strval
, len
);
389 * Check if the bootfs name has the same pool name as it is set to.
390 * Assuming bootfs is a valid dataset name.
393 bootfs_name_valid(const char *pool
, char *bootfs
)
395 int len
= strlen(pool
);
397 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
400 if (strncmp(pool
, bootfs
, len
) == 0 &&
401 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
408 zpool_is_bootable(zpool_handle_t
*zhp
)
410 char bootfs
[ZFS_MAX_DATASET_NAME_LEN
];
412 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
413 sizeof (bootfs
), NULL
, B_FALSE
) == 0 && strncmp(bootfs
, "-",
414 sizeof (bootfs
)) != 0);
419 * Given an nvlist of zpool properties to be set, validate that they are
420 * correct, and parse any numeric properties (index, boolean, etc) if they are
421 * specified as strings.
424 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
425 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
433 struct stat64 statbuf
;
436 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
437 (void) no_memory(hdl
);
442 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
443 const char *propname
= nvpair_name(elem
);
445 prop
= zpool_name_to_prop(propname
);
446 if (prop
== ZPROP_INVAL
&& zpool_prop_feature(propname
)) {
448 char *fname
= strchr(propname
, '@') + 1;
450 err
= zfeature_lookup_name(fname
, NULL
);
452 ASSERT3U(err
, ==, ENOENT
);
453 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
454 "invalid feature '%s'"), fname
);
455 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
459 if (nvpair_type(elem
) != DATA_TYPE_STRING
) {
460 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
461 "'%s' must be a string"), propname
);
462 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
466 (void) nvpair_value_string(elem
, &strval
);
467 if (strcmp(strval
, ZFS_FEATURE_ENABLED
) != 0) {
468 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
469 "property '%s' can only be set to "
470 "'enabled'"), propname
);
471 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
475 if (nvlist_add_uint64(retprops
, propname
, 0) != 0) {
476 (void) no_memory(hdl
);
483 * Make sure this property is valid and applies to this type.
485 if (prop
== ZPROP_INVAL
) {
486 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
487 "invalid property '%s'"), propname
);
488 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
492 if (zpool_prop_readonly(prop
)) {
493 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
494 "is readonly"), propname
);
495 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
499 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
500 &strval
, &intval
, errbuf
) != 0)
504 * Perform additional checking for specific properties.
507 case ZPOOL_PROP_VERSION
:
508 if (intval
< version
||
509 !SPA_VERSION_IS_SUPPORTED(intval
)) {
510 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
511 "property '%s' number %d is invalid."),
513 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
518 case ZPOOL_PROP_BOOTSIZE
:
520 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
521 "property '%s' can only be set during pool "
522 "creation"), propname
);
523 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
528 case ZPOOL_PROP_BOOTFS
:
529 if (flags
.create
|| flags
.import
) {
530 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
531 "property '%s' cannot be set at creation "
532 "or import time"), propname
);
533 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
537 if (version
< SPA_VERSION_BOOTFS
) {
538 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
539 "pool must be upgraded to support "
540 "'%s' property"), propname
);
541 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
546 * bootfs property value has to be a dataset name and
547 * the dataset has to be in the same pool as it sets to.
549 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
551 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
552 "is an invalid name"), strval
);
553 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
557 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
558 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
559 "could not open pool '%s'"), poolname
);
560 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
566 case ZPOOL_PROP_ALTROOT
:
567 if (!flags
.create
&& !flags
.import
) {
568 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
569 "property '%s' can only be set during pool "
570 "creation or import"), propname
);
571 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
575 if (strval
[0] != '/') {
576 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
577 "bad alternate root '%s'"), strval
);
578 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
583 case ZPOOL_PROP_CACHEFILE
:
584 if (strval
[0] == '\0')
587 if (strcmp(strval
, "none") == 0)
590 if (strval
[0] != '/') {
591 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
592 "property '%s' must be empty, an "
593 "absolute path, or 'none'"), propname
);
594 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
598 slash
= strrchr(strval
, '/');
600 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
601 strcmp(slash
, "/..") == 0) {
602 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
603 "'%s' is not a valid file"), strval
);
604 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
610 if (strval
[0] != '\0' &&
611 (stat64(strval
, &statbuf
) != 0 ||
612 !S_ISDIR(statbuf
.st_mode
))) {
613 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
614 "'%s' is not a valid directory"),
616 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
623 case ZPOOL_PROP_COMMENT
:
624 for (check
= strval
; *check
!= '\0'; check
++) {
625 if (!isprint(*check
)) {
627 dgettext(TEXT_DOMAIN
,
628 "comment may only have printable "
630 (void) zfs_error(hdl
, EZFS_BADPROP
,
635 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
636 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
637 "comment must not exceed %d characters"),
639 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
643 case ZPOOL_PROP_READONLY
:
645 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
646 "property '%s' can only be set at "
647 "import time"), propname
);
648 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
654 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
655 "property '%s'(%d) not defined"), propname
, prop
);
662 nvlist_free(retprops
);
667 * Set zpool property : propname=propval.
670 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
672 zfs_cmd_t zc
= { 0 };
675 nvlist_t
*nvl
= NULL
;
678 prop_flags_t flags
= { 0 };
680 (void) snprintf(errbuf
, sizeof (errbuf
),
681 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
684 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
685 return (no_memory(zhp
->zpool_hdl
));
687 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
689 return (no_memory(zhp
->zpool_hdl
));
692 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
693 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
694 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
703 * Execute the corresponding ioctl() to set this property.
705 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
707 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
712 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
714 zcmd_free_nvlists(&zc
);
718 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
720 (void) zpool_props_refresh(zhp
);
726 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
728 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
730 char buf
[ZFS_MAXPROPLEN
];
731 nvlist_t
*features
= NULL
;
733 boolean_t firstexpand
= (NULL
== *plp
);
735 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
739 while (*last
!= NULL
)
740 last
= &(*last
)->pl_next
;
743 features
= zpool_get_features(zhp
);
745 if ((*plp
)->pl_all
&& firstexpand
) {
746 for (int i
= 0; i
< SPA_FEATURES
; i
++) {
747 zprop_list_t
*entry
= zfs_alloc(hdl
,
748 sizeof (zprop_list_t
));
749 entry
->pl_prop
= ZPROP_INVAL
;
750 entry
->pl_user_prop
= zfs_asprintf(hdl
, "feature@%s",
751 spa_feature_table
[i
].fi_uname
);
752 entry
->pl_width
= strlen(entry
->pl_user_prop
);
753 entry
->pl_all
= B_TRUE
;
756 last
= &entry
->pl_next
;
760 /* add any unsupported features */
761 for (nvpair_t
*nvp
= nvlist_next_nvpair(features
, NULL
);
762 nvp
!= NULL
; nvp
= nvlist_next_nvpair(features
, nvp
)) {
767 if (zfeature_is_supported(nvpair_name(nvp
)))
770 propname
= zfs_asprintf(hdl
, "unsupported@%s",
774 * Before adding the property to the list make sure that no
775 * other pool already added the same property.
779 while (entry
!= NULL
) {
780 if (entry
->pl_user_prop
!= NULL
&&
781 strcmp(propname
, entry
->pl_user_prop
) == 0) {
785 entry
= entry
->pl_next
;
792 entry
= zfs_alloc(hdl
, sizeof (zprop_list_t
));
793 entry
->pl_prop
= ZPROP_INVAL
;
794 entry
->pl_user_prop
= propname
;
795 entry
->pl_width
= strlen(entry
->pl_user_prop
);
796 entry
->pl_all
= B_TRUE
;
799 last
= &entry
->pl_next
;
802 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
807 if (entry
->pl_prop
!= ZPROP_INVAL
&&
808 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
809 NULL
, B_FALSE
) == 0) {
810 if (strlen(buf
) > entry
->pl_width
)
811 entry
->pl_width
= strlen(buf
);
819 * Get the state for the given feature on the given ZFS pool.
822 zpool_prop_get_feature(zpool_handle_t
*zhp
, const char *propname
, char *buf
,
826 boolean_t found
= B_FALSE
;
827 nvlist_t
*features
= zpool_get_features(zhp
);
829 const char *feature
= strchr(propname
, '@') + 1;
831 supported
= zpool_prop_feature(propname
);
832 ASSERT(supported
|| zpool_prop_unsupported(propname
));
835 * Convert from feature name to feature guid. This conversion is
836 * unecessary for unsupported@... properties because they already
843 ret
= zfeature_lookup_name(feature
, &fid
);
845 (void) strlcpy(buf
, "-", len
);
848 feature
= spa_feature_table
[fid
].fi_guid
;
851 if (nvlist_lookup_uint64(features
, feature
, &refcount
) == 0)
856 (void) strlcpy(buf
, ZFS_FEATURE_DISABLED
, len
);
859 (void) strlcpy(buf
, ZFS_FEATURE_ENABLED
, len
);
861 (void) strlcpy(buf
, ZFS_FEATURE_ACTIVE
, len
);
866 (void) strcpy(buf
, ZFS_UNSUPPORTED_INACTIVE
);
868 (void) strcpy(buf
, ZFS_UNSUPPORTED_READONLY
);
871 (void) strlcpy(buf
, "-", len
);
880 * Don't start the slice at the default block of 34; many storage
881 * devices will use a stripe width of 128k, so start there instead.
883 #define NEW_START_BLOCK 256
886 * Validate the given pool name, optionally putting an extended error message in
890 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
896 ret
= pool_namecheck(pool
, &why
, &what
);
899 * The rules for reserved pool names were extended at a later point.
900 * But we need to support users with existing pools that may now be
901 * invalid. So we only check for this expanded set of names during a
902 * create (or import), and only in userland.
904 if (ret
== 0 && !isopen
&&
905 (strncmp(pool
, "mirror", 6) == 0 ||
906 strncmp(pool
, "raidz", 5) == 0 ||
907 strncmp(pool
, "spare", 5) == 0 ||
908 strcmp(pool
, "log") == 0)) {
911 dgettext(TEXT_DOMAIN
, "name is reserved"));
919 case NAME_ERR_TOOLONG
:
921 dgettext(TEXT_DOMAIN
, "name is too long"));
924 case NAME_ERR_INVALCHAR
:
926 dgettext(TEXT_DOMAIN
, "invalid character "
927 "'%c' in pool name"), what
);
930 case NAME_ERR_NOLETTER
:
931 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
932 "name must begin with a letter"));
935 case NAME_ERR_RESERVED
:
936 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
937 "name is reserved"));
940 case NAME_ERR_DISKLIKE
:
941 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
942 "pool name is reserved"));
945 case NAME_ERR_LEADING_SLASH
:
946 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
947 "leading slash in name"));
950 case NAME_ERR_EMPTY_COMPONENT
:
951 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
952 "empty component in name"));
955 case NAME_ERR_TRAILING_SLASH
:
956 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
957 "trailing slash in name"));
960 case NAME_ERR_MULTIPLE_DELIMITERS
:
961 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
962 "multiple '@' and/or '#' delimiters in "
967 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
968 "(%d) not defined"), why
);
979 * Open a handle to the given pool, even if the pool is currently in the FAULTED
983 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
989 * Make sure the pool name is valid.
991 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
992 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
993 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
998 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1001 zhp
->zpool_hdl
= hdl
;
1002 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1004 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1010 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
1011 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
1012 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
1021 * Like the above, but silent on error. Used when iterating over pools (because
1022 * the configuration cache may be out of date).
1025 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
1027 zpool_handle_t
*zhp
;
1030 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
1033 zhp
->zpool_hdl
= hdl
;
1034 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
1036 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
1052 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1056 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
1058 zpool_handle_t
*zhp
;
1060 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
1063 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
1064 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
1065 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
1074 * Close the handle. Simply frees the memory associated with the handle.
1077 zpool_close(zpool_handle_t
*zhp
)
1079 nvlist_free(zhp
->zpool_config
);
1080 nvlist_free(zhp
->zpool_old_config
);
1081 nvlist_free(zhp
->zpool_props
);
1086 * Return the name of the pool.
1089 zpool_get_name(zpool_handle_t
*zhp
)
1091 return (zhp
->zpool_name
);
1096 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1099 zpool_get_state(zpool_handle_t
*zhp
)
1101 return (zhp
->zpool_state
);
1105 * Create the named pool, using the provided vdev list. It is assumed
1106 * that the consumer has already validated the contents of the nvlist, so we
1107 * don't have to worry about error semantics.
1110 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
1111 nvlist_t
*props
, nvlist_t
*fsprops
)
1113 zfs_cmd_t zc
= { 0 };
1114 nvlist_t
*zc_fsprops
= NULL
;
1115 nvlist_t
*zc_props
= NULL
;
1119 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1120 "cannot create '%s'"), pool
);
1122 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
1123 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
1125 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1129 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
1131 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
1132 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
1141 zoned
= ((nvlist_lookup_string(fsprops
,
1142 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
1143 strcmp(zonestr
, "on") == 0);
1145 if ((zc_fsprops
= zfs_valid_proplist(hdl
, ZFS_TYPE_FILESYSTEM
,
1146 fsprops
, zoned
, NULL
, NULL
, msg
)) == NULL
) {
1150 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
1153 if (nvlist_add_nvlist(zc_props
,
1154 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
1159 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
1162 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
1164 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
1166 zcmd_free_nvlists(&zc
);
1167 nvlist_free(zc_props
);
1168 nvlist_free(zc_fsprops
);
1173 * This can happen if the user has specified the same
1174 * device multiple times. We can't reliably detect this
1175 * until we try to add it and see we already have a
1178 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1179 "one or more vdevs refer to the same device"));
1180 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1184 * This happens if the record size is smaller or larger
1185 * than the allowed size range, or not a power of 2.
1187 * NOTE: although zfs_valid_proplist is called earlier,
1188 * this case may have slipped through since the
1189 * pool does not exist yet and it is therefore
1190 * impossible to read properties e.g. max blocksize
1193 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1194 "record size invalid"));
1195 return (zfs_error(hdl
, EZFS_BADPROP
, msg
));
1199 * This occurs when one of the devices is below
1200 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1201 * device was the problem device since there's no
1202 * reliable way to determine device size from userland.
1207 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1209 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1210 "one or more devices is less than the "
1211 "minimum size (%s)"), buf
);
1213 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1216 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1217 "one or more devices is out of space"));
1218 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1221 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1222 "cache device must be a disk or disk slice"));
1223 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1226 return (zpool_standard_error(hdl
, errno
, msg
));
1231 zcmd_free_nvlists(&zc
);
1232 nvlist_free(zc_props
);
1233 nvlist_free(zc_fsprops
);
1238 * Destroy the given pool. It is up to the caller to ensure that there are no
1239 * datasets left in the pool.
1242 zpool_destroy(zpool_handle_t
*zhp
, const char *log_str
)
1244 zfs_cmd_t zc
= { 0 };
1245 zfs_handle_t
*zfp
= NULL
;
1246 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1249 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1250 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1253 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1254 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1256 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1257 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1258 "cannot destroy '%s'"), zhp
->zpool_name
);
1260 if (errno
== EROFS
) {
1261 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1262 "one or more devices is read only"));
1263 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1265 (void) zpool_standard_error(hdl
, errno
, msg
);
1274 remove_mountpoint(zfp
);
1282 * Add the given vdevs to the pool. The caller must have already performed the
1283 * necessary verification to ensure that the vdev specification is well-formed.
1286 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1288 zfs_cmd_t zc
= { 0 };
1290 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1292 nvlist_t
**spares
, **l2cache
;
1293 uint_t nspares
, nl2cache
;
1295 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1296 "cannot add to '%s'"), zhp
->zpool_name
);
1298 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1299 SPA_VERSION_SPARES
&&
1300 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1301 &spares
, &nspares
) == 0) {
1302 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1303 "upgraded to add hot spares"));
1304 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1307 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1308 SPA_VERSION_L2CACHE
&&
1309 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1310 &l2cache
, &nl2cache
) == 0) {
1311 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1312 "upgraded to add cache devices"));
1313 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1316 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1318 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1320 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1324 * This can happen if the user has specified the same
1325 * device multiple times. We can't reliably detect this
1326 * until we try to add it and see we already have a
1329 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1330 "one or more vdevs refer to the same device"));
1331 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1336 * This occurrs when one of the devices is below
1337 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1338 * device was the problem device since there's no
1339 * reliable way to determine device size from userland.
1344 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1346 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1347 "device is less than the minimum "
1350 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1354 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1355 "pool must be upgraded to add these vdevs"));
1356 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1360 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1361 "root pool can not have multiple vdevs"
1362 " or separate logs"));
1363 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
);
1367 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1368 "cache device must be a disk or disk slice"));
1369 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1373 (void) zpool_standard_error(hdl
, errno
, msg
);
1381 zcmd_free_nvlists(&zc
);
1387 * Exports the pool from the system. The caller must ensure that there are no
1388 * mounted datasets in the pool.
1391 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
,
1392 const char *log_str
)
1394 zfs_cmd_t zc
= { 0 };
1397 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1398 "cannot export '%s'"), zhp
->zpool_name
);
1400 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1401 zc
.zc_cookie
= force
;
1402 zc
.zc_guid
= hardforce
;
1403 zc
.zc_history
= (uint64_t)(uintptr_t)log_str
;
1405 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1408 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1409 "use '-f' to override the following errors:\n"
1410 "'%s' has an active shared spare which could be"
1411 " used by other pools once '%s' is exported."),
1412 zhp
->zpool_name
, zhp
->zpool_name
);
1413 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1416 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1425 zpool_export(zpool_handle_t
*zhp
, boolean_t force
, const char *log_str
)
1427 return (zpool_export_common(zhp
, force
, B_FALSE
, log_str
));
1431 zpool_export_force(zpool_handle_t
*zhp
, const char *log_str
)
1433 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
, log_str
));
1437 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1440 nvlist_t
*nv
= NULL
;
1446 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1449 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1450 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0) {
1454 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1456 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1458 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1459 strftime(timestr
, 128, 0, &t
) != 0) {
1461 (void) printf(dgettext(TEXT_DOMAIN
,
1462 "Would be able to return %s "
1463 "to its state as of %s.\n"),
1466 (void) printf(dgettext(TEXT_DOMAIN
,
1467 "Pool %s returned to its state as of %s.\n"),
1471 (void) printf(dgettext(TEXT_DOMAIN
,
1472 "%s approximately %lld "),
1473 dryrun
? "Would discard" : "Discarded",
1475 (void) printf(dgettext(TEXT_DOMAIN
,
1476 "minutes of transactions.\n"));
1477 } else if (loss
> 0) {
1478 (void) printf(dgettext(TEXT_DOMAIN
,
1479 "%s approximately %lld "),
1480 dryrun
? "Would discard" : "Discarded", loss
);
1481 (void) printf(dgettext(TEXT_DOMAIN
,
1482 "seconds of transactions.\n"));
1488 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1491 nvlist_t
*nv
= NULL
;
1493 uint64_t edata
= UINT64_MAX
;
1498 if (!hdl
->libzfs_printerr
)
1502 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1504 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1506 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1507 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1508 nvlist_lookup_nvlist(nv
, ZPOOL_CONFIG_REWIND_INFO
, &nv
) != 0 ||
1509 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1512 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1513 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1516 (void) printf(dgettext(TEXT_DOMAIN
,
1517 "Recovery is possible, but will result in some data loss.\n"));
1519 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1520 strftime(timestr
, 128, 0, &t
) != 0) {
1521 (void) printf(dgettext(TEXT_DOMAIN
,
1522 "\tReturning the pool to its state as of %s\n"
1523 "\tshould correct the problem. "),
1526 (void) printf(dgettext(TEXT_DOMAIN
,
1527 "\tReverting the pool to an earlier state "
1528 "should correct the problem.\n\t"));
1532 (void) printf(dgettext(TEXT_DOMAIN
,
1533 "Approximately %lld minutes of data\n"
1534 "\tmust be discarded, irreversibly. "), (loss
+ 30) / 60);
1535 } else if (loss
> 0) {
1536 (void) printf(dgettext(TEXT_DOMAIN
,
1537 "Approximately %lld seconds of data\n"
1538 "\tmust be discarded, irreversibly. "), loss
);
1540 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1542 (void) printf(dgettext(TEXT_DOMAIN
,
1543 "After rewind, at least\n"
1544 "\tone persistent user-data error will remain. "));
1546 (void) printf(dgettext(TEXT_DOMAIN
,
1547 "After rewind, several\n"
1548 "\tpersistent user-data errors will remain. "));
1551 (void) printf(dgettext(TEXT_DOMAIN
,
1552 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1553 reason
>= 0 ? "clear" : "import", name
);
1555 (void) printf(dgettext(TEXT_DOMAIN
,
1556 "A scrub of the pool\n"
1557 "\tis strongly recommended after recovery.\n"));
1561 (void) printf(dgettext(TEXT_DOMAIN
,
1562 "Destroy and re-create the pool from\n\ta backup source.\n"));
1566 * zpool_import() is a contracted interface. Should be kept the same
1569 * Applications should use zpool_import_props() to import a pool with
1570 * new properties value to be set.
1573 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1576 nvlist_t
*props
= NULL
;
1579 if (altroot
!= NULL
) {
1580 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1581 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1582 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1586 if (nvlist_add_string(props
,
1587 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1588 nvlist_add_string(props
,
1589 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1591 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1592 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1597 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1604 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1610 uint64_t is_log
= 0;
1612 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1616 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1617 is_log
? " [log]" : "");
1619 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1620 &child
, &children
) != 0)
1623 for (c
= 0; c
< children
; c
++) {
1624 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], B_TRUE
);
1625 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1631 zpool_print_unsup_feat(nvlist_t
*config
)
1633 nvlist_t
*nvinfo
, *unsup_feat
;
1635 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) ==
1637 verify(nvlist_lookup_nvlist(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
,
1640 for (nvpair_t
*nvp
= nvlist_next_nvpair(unsup_feat
, NULL
); nvp
!= NULL
;
1641 nvp
= nvlist_next_nvpair(unsup_feat
, nvp
)) {
1644 verify(nvpair_type(nvp
) == DATA_TYPE_STRING
);
1645 verify(nvpair_value_string(nvp
, &desc
) == 0);
1647 if (strlen(desc
) > 0)
1648 (void) printf("\t%s (%s)\n", nvpair_name(nvp
), desc
);
1650 (void) printf("\t%s\n", nvpair_name(nvp
));
1655 * Import the given pool using the known configuration and a list of
1656 * properties to be set. The configuration should have come from
1657 * zpool_find_import(). The 'newname' parameters control whether the pool
1658 * is imported with a different name.
1661 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1662 nvlist_t
*props
, int flags
)
1664 zfs_cmd_t zc
= { 0 };
1665 zpool_rewind_policy_t policy
;
1666 nvlist_t
*nv
= NULL
;
1667 nvlist_t
*nvinfo
= NULL
;
1668 nvlist_t
*missing
= NULL
;
1675 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1678 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1679 "cannot import pool '%s'"), origname
);
1681 if (newname
!= NULL
) {
1682 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1683 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1684 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1686 thename
= (char *)newname
;
1691 if (props
!= NULL
) {
1693 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1695 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1698 if ((props
= zpool_valid_proplist(hdl
, origname
,
1699 props
, version
, flags
, errbuf
)) == NULL
)
1701 if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1708 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1710 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1713 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1714 zcmd_free_nvlists(&zc
);
1717 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1718 zcmd_free_nvlists(&zc
);
1722 zc
.zc_cookie
= flags
;
1723 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1725 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1726 zcmd_free_nvlists(&zc
);
1733 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1735 zcmd_free_nvlists(&zc
);
1737 zpool_get_rewind_policy(config
, &policy
);
1743 * Dry-run failed, but we print out what success
1744 * looks like if we found a best txg
1746 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1747 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1753 if (newname
== NULL
)
1754 (void) snprintf(desc
, sizeof (desc
),
1755 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1758 (void) snprintf(desc
, sizeof (desc
),
1759 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1764 if (nv
!= NULL
&& nvlist_lookup_nvlist(nv
,
1765 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1766 nvlist_exists(nvinfo
, ZPOOL_CONFIG_UNSUP_FEAT
)) {
1767 (void) printf(dgettext(TEXT_DOMAIN
, "This "
1768 "pool uses the following feature(s) not "
1769 "supported by this system:\n"));
1770 zpool_print_unsup_feat(nv
);
1771 if (nvlist_exists(nvinfo
,
1772 ZPOOL_CONFIG_CAN_RDONLY
)) {
1773 (void) printf(dgettext(TEXT_DOMAIN
,
1774 "All unsupported features are only "
1775 "required for writing to the pool."
1776 "\nThe pool can be imported using "
1777 "'-o readonly=on'.\n"));
1781 * Unsupported version.
1783 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1787 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1791 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1792 "one or more devices is read only"));
1793 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1797 if (nv
&& nvlist_lookup_nvlist(nv
,
1798 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1799 nvlist_lookup_nvlist(nvinfo
,
1800 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1801 (void) printf(dgettext(TEXT_DOMAIN
,
1802 "The devices below are missing, use "
1803 "'-m' to import the pool anyway:\n"));
1804 print_vdev_tree(hdl
, NULL
, missing
, 2);
1805 (void) printf("\n");
1807 (void) zpool_standard_error(hdl
, error
, desc
);
1811 (void) zpool_standard_error(hdl
, error
, desc
);
1814 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1815 "new name of at least one dataset is longer than "
1816 "the maximum allowable length"));
1817 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, desc
);
1820 (void) zpool_standard_error(hdl
, error
, desc
);
1821 zpool_explain_recover(hdl
,
1822 newname
? origname
: thename
, -error
, nv
);
1829 zpool_handle_t
*zhp
;
1832 * This should never fail, but play it safe anyway.
1834 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1836 else if (zhp
!= NULL
)
1838 if (policy
.zrp_request
&
1839 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1840 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1841 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1854 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1856 zfs_cmd_t zc
= { 0 };
1858 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1860 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1861 zc
.zc_cookie
= func
;
1863 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1864 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1867 if (func
== POOL_SCAN_SCRUB
) {
1868 (void) snprintf(msg
, sizeof (msg
),
1869 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1870 } else if (func
== POOL_SCAN_NONE
) {
1871 (void) snprintf(msg
, sizeof (msg
),
1872 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1875 assert(!"unexpected result");
1878 if (errno
== EBUSY
) {
1880 pool_scan_stat_t
*ps
= NULL
;
1883 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1884 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1885 (void) nvlist_lookup_uint64_array(nvroot
,
1886 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1887 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1888 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1890 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1891 } else if (errno
== ENOENT
) {
1892 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1894 return (zpool_standard_error(hdl
, errno
, msg
));
1899 * This provides a very minimal check whether a given string is likely a
1900 * c#t#d# style string. Users of this are expected to do their own
1901 * verification of the s# part.
1903 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1906 * More elaborate version for ones which may start with "/dev/dsk/"
1910 ctd_check_path(char *str
)
1913 * If it starts with a slash, check the last component.
1915 if (str
&& str
[0] == '/') {
1916 char *tmp
= strrchr(str
, '/');
1919 * If it ends in "/old", check the second-to-last
1920 * component of the string instead.
1922 if (tmp
!= str
&& strcmp(tmp
, "/old") == 0) {
1923 for (tmp
--; *tmp
!= '/'; tmp
--)
1928 return (CTD_CHECK(str
));
1932 * Find a vdev that matches the search criteria specified. We use the
1933 * the nvpair name to determine how we should look for the device.
1934 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1935 * spare; but FALSE if its an INUSE spare.
1938 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1939 boolean_t
*l2cache
, boolean_t
*log
)
1946 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1948 /* Nothing to look for */
1949 if (search
== NULL
|| pair
== NULL
)
1952 /* Obtain the key we will use to search */
1953 srchkey
= nvpair_name(pair
);
1955 switch (nvpair_type(pair
)) {
1956 case DATA_TYPE_UINT64
:
1957 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1958 uint64_t srchval
, theguid
;
1960 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1961 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1963 if (theguid
== srchval
)
1968 case DATA_TYPE_STRING
: {
1969 char *srchval
, *val
;
1971 verify(nvpair_value_string(pair
, &srchval
) == 0);
1972 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
1976 * Search for the requested value. Special cases:
1978 * - ZPOOL_CONFIG_PATH for whole disk entries. To support
1979 * UEFI boot, these end in "s0" or "s0/old" or "s1" or
1980 * "s1/old". The "s0" or "s1" part is hidden from the user,
1981 * but included in the string, so this matches around it.
1982 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1984 * Otherwise, all other searches are simple string compares.
1986 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0 &&
1987 ctd_check_path(val
)) {
1988 uint64_t wholedisk
= 0;
1990 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
1993 int slen
= strlen(srchval
);
1994 int vlen
= strlen(val
);
1996 if (slen
!= vlen
- 2)
2000 * make_leaf_vdev() should only set
2001 * wholedisk for ZPOOL_CONFIG_PATHs which
2002 * will include "/dev/dsk/", giving plenty of
2003 * room for the indices used next.
2008 * strings identical except trailing "s0"
2010 if ((strcmp(&val
[vlen
- 2], "s0") == 0 ||
2011 strcmp(&val
[vlen
- 2], "s1") == 0) &&
2012 strncmp(srchval
, val
, slen
) == 0)
2016 * strings identical except trailing "s0/old"
2018 if ((strcmp(&val
[vlen
- 6], "s0/old") == 0 ||
2019 strcmp(&val
[vlen
- 6], "s1/old") == 0) &&
2020 strcmp(&srchval
[slen
- 4], "/old") == 0 &&
2021 strncmp(srchval
, val
, slen
- 4) == 0)
2026 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
2027 char *type
, *idx
, *end
, *p
;
2028 uint64_t id
, vdev_id
;
2031 * Determine our vdev type, keeping in mind
2032 * that the srchval is composed of a type and
2033 * vdev id pair (i.e. mirror-4).
2035 if ((type
= strdup(srchval
)) == NULL
)
2038 if ((p
= strrchr(type
, '-')) == NULL
) {
2046 * If the types don't match then keep looking.
2048 if (strncmp(val
, type
, strlen(val
)) != 0) {
2053 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
2054 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2055 strncmp(type
, VDEV_TYPE_MIRROR
,
2056 strlen(VDEV_TYPE_MIRROR
)) == 0);
2057 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
2061 vdev_id
= strtoull(idx
, &end
, 10);
2068 * Now verify that we have the correct vdev id.
2077 if (strcmp(srchval
, val
) == 0)
2086 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
2087 &child
, &children
) != 0)
2090 for (c
= 0; c
< children
; c
++) {
2091 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2092 avail_spare
, l2cache
, NULL
)) != NULL
) {
2094 * The 'is_log' value is only set for the toplevel
2095 * vdev, not the leaf vdevs. So we always lookup the
2096 * log device from the root of the vdev tree (where
2097 * 'log' is non-NULL).
2100 nvlist_lookup_uint64(child
[c
],
2101 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
2109 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
2110 &child
, &children
) == 0) {
2111 for (c
= 0; c
< children
; c
++) {
2112 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2113 avail_spare
, l2cache
, NULL
)) != NULL
) {
2114 *avail_spare
= B_TRUE
;
2120 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
2121 &child
, &children
) == 0) {
2122 for (c
= 0; c
< children
; c
++) {
2123 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
2124 avail_spare
, l2cache
, NULL
)) != NULL
) {
2135 * Given a physical path (minus the "/devices" prefix), find the
2139 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
2140 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
2142 nvlist_t
*search
, *nvroot
, *ret
;
2144 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2145 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
2147 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2150 *avail_spare
= B_FALSE
;
2154 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2155 nvlist_free(search
);
2161 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2164 zpool_vdev_is_interior(const char *name
)
2166 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
2167 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
2173 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
2174 boolean_t
*l2cache
, boolean_t
*log
)
2176 char buf
[MAXPATHLEN
];
2178 nvlist_t
*nvroot
, *search
, *ret
;
2181 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2183 guid
= strtoull(path
, &end
, 10);
2184 if (guid
!= 0 && *end
== '\0') {
2185 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
2186 } else if (zpool_vdev_is_interior(path
)) {
2187 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
2188 } else if (path
[0] != '/') {
2189 (void) snprintf(buf
, sizeof (buf
), "%s/%s", ZFS_DISK_ROOT
,
2191 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, buf
) == 0);
2193 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
2196 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
2199 *avail_spare
= B_FALSE
;
2203 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
2204 nvlist_free(search
);
2210 vdev_online(nvlist_t
*nv
)
2214 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
2215 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
2216 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
2223 * Helper function for zpool_get_physpaths().
2226 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
2227 size_t *bytes_written
)
2229 size_t bytes_left
, pos
, rsz
;
2233 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
2235 return (EZFS_NODEVICE
);
2237 pos
= *bytes_written
;
2238 bytes_left
= physpath_size
- pos
;
2239 format
= (pos
== 0) ? "%s" : " %s";
2241 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
2242 *bytes_written
+= rsz
;
2244 if (rsz
>= bytes_left
) {
2245 /* if physpath was not copied properly, clear it */
2246 if (bytes_left
!= 0) {
2249 return (EZFS_NOSPC
);
2255 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2256 size_t *rsz
, boolean_t is_spare
)
2261 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2262 return (EZFS_INVALCONFIG
);
2264 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2266 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2267 * For a spare vdev, we only want to boot from the active
2272 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2275 return (EZFS_INVALCONFIG
);
2278 if (vdev_online(nv
)) {
2279 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2280 phypath_size
, rsz
)) != 0)
2283 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2284 strcmp(type
, VDEV_TYPE_RAIDZ
) == 0 ||
2285 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2286 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2291 if (nvlist_lookup_nvlist_array(nv
,
2292 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2293 return (EZFS_INVALCONFIG
);
2295 for (i
= 0; i
< count
; i
++) {
2296 ret
= vdev_get_physpaths(child
[i
], physpath
,
2297 phypath_size
, rsz
, is_spare
);
2298 if (ret
== EZFS_NOSPC
)
2303 return (EZFS_POOL_INVALARG
);
2307 * Get phys_path for a root pool config.
2308 * Return 0 on success; non-zero on failure.
2311 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2314 nvlist_t
*vdev_root
;
2321 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2323 return (EZFS_INVALCONFIG
);
2325 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2326 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2327 &child
, &count
) != 0)
2328 return (EZFS_INVALCONFIG
);
2331 * root pool can only have a single top-level vdev.
2333 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1)
2334 return (EZFS_POOL_INVALARG
);
2336 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2339 /* No online devices */
2341 return (EZFS_NODEVICE
);
2347 * Get phys_path for a root pool
2348 * Return 0 on success; non-zero on failure.
2351 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2353 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2358 * If the device has being dynamically expanded then we need to relabel
2359 * the disk to use the new unallocated space.
2362 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *name
)
2364 char path
[MAXPATHLEN
];
2367 int (*_efi_use_whole_disk
)(int);
2369 if ((_efi_use_whole_disk
= (int (*)(int))dlsym(RTLD_DEFAULT
,
2370 "efi_use_whole_disk")) == NULL
)
2373 (void) snprintf(path
, sizeof (path
), "%s/%s", ZFS_RDISK_ROOT
, name
);
2375 if ((fd
= open(path
, O_RDWR
| O_NDELAY
)) < 0) {
2376 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2377 "relabel '%s': unable to open device"), name
);
2378 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
2382 * It's possible that we might encounter an error if the device
2383 * does not have any unallocated space left. If so, we simply
2384 * ignore that error and continue on.
2386 error
= _efi_use_whole_disk(fd
);
2388 if (error
&& error
!= VT_ENOSPC
) {
2389 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2390 "relabel '%s': unable to read disk capacity"), name
);
2391 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
2397 * Bring the specified vdev online. The 'flags' parameter is a set of the
2398 * ZFS_ONLINE_* flags.
2401 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2402 vdev_state_t
*newstate
)
2404 zfs_cmd_t zc
= { 0 };
2407 boolean_t avail_spare
, l2cache
, islog
;
2408 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2410 if (flags
& ZFS_ONLINE_EXPAND
) {
2411 (void) snprintf(msg
, sizeof (msg
),
2412 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2414 (void) snprintf(msg
, sizeof (msg
),
2415 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2418 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2419 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2421 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2423 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2426 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2428 if (flags
& ZFS_ONLINE_EXPAND
||
2429 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2430 char *pathname
= NULL
;
2431 uint64_t wholedisk
= 0;
2433 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2435 verify(nvlist_lookup_string(tgt
, ZPOOL_CONFIG_PATH
,
2439 * XXX - L2ARC 1.0 devices can't support expansion.
2442 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2443 "cannot expand cache devices"));
2444 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2448 pathname
+= strlen(ZFS_DISK_ROOT
) + 1;
2449 (void) zpool_relabel_disk(hdl
, pathname
);
2453 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2456 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2457 if (errno
== EINVAL
) {
2458 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2459 "from this pool into a new one. Use '%s' "
2460 "instead"), "zpool detach");
2461 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2463 return (zpool_standard_error(hdl
, errno
, msg
));
2466 *newstate
= zc
.zc_cookie
;
2471 * Take the specified vdev offline
2474 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2476 zfs_cmd_t zc
= { 0 };
2479 boolean_t avail_spare
, l2cache
;
2480 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2482 (void) snprintf(msg
, sizeof (msg
),
2483 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2485 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2486 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2488 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2490 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2493 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2495 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2496 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2498 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2505 * There are no other replicas of this device.
2507 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2511 * The log device has unplayed logs
2513 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2516 return (zpool_standard_error(hdl
, errno
, msg
));
2521 * Mark the given vdev faulted.
2524 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2526 zfs_cmd_t zc
= { 0 };
2528 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2530 (void) snprintf(msg
, sizeof (msg
),
2531 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), guid
);
2533 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2535 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2538 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2545 * There are no other replicas of this device.
2547 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2550 return (zpool_standard_error(hdl
, errno
, msg
));
2556 * Mark the given vdev degraded.
2559 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2561 zfs_cmd_t zc
= { 0 };
2563 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2565 (void) snprintf(msg
, sizeof (msg
),
2566 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), guid
);
2568 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2570 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2573 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2576 return (zpool_standard_error(hdl
, errno
, msg
));
2580 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2584 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2590 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2592 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2595 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2596 children
== 2 && child
[which
] == tgt
)
2599 for (c
= 0; c
< children
; c
++)
2600 if (is_replacing_spare(child
[c
], tgt
, which
))
2608 * Attach new_disk (fully described by nvroot) to old_disk.
2609 * If 'replacing' is specified, the new disk will replace the old one.
2612 zpool_vdev_attach(zpool_handle_t
*zhp
,
2613 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2615 zfs_cmd_t zc
= { 0 };
2619 boolean_t avail_spare
, l2cache
, islog
;
2624 nvlist_t
*config_root
;
2625 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2626 boolean_t rootpool
= zpool_is_bootable(zhp
);
2629 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2630 "cannot replace %s with %s"), old_disk
, new_disk
);
2632 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2633 "cannot attach %s to %s"), new_disk
, old_disk
);
2635 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2636 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2638 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2641 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2644 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2646 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2647 zc
.zc_cookie
= replacing
;
2649 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2650 &child
, &children
) != 0 || children
!= 1) {
2651 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2652 "new device must be a single disk"));
2653 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2656 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2657 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2659 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], B_FALSE
)) == NULL
)
2663 * If the target is a hot spare that has been swapped in, we can only
2664 * replace it with another hot spare.
2667 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2668 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2669 NULL
) == NULL
|| !avail_spare
) &&
2670 is_replacing_spare(config_root
, tgt
, 1)) {
2671 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2672 "can only be replaced by another hot spare"));
2674 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2679 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2682 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2684 zcmd_free_nvlists(&zc
);
2689 * XXX need a better way to prevent user from
2690 * booting up a half-baked vdev.
2692 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2693 "sure to wait until resilver is done "
2694 "before rebooting.\n"));
2702 * Can't attach to or replace this type of vdev.
2705 uint64_t version
= zpool_get_prop_int(zhp
,
2706 ZPOOL_PROP_VERSION
, NULL
);
2709 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2710 "cannot replace a log with a spare"));
2711 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2712 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2713 "already in replacing/spare config; wait "
2714 "for completion or use 'zpool detach'"));
2716 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2717 "cannot replace a replacing device"));
2719 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2720 "can only attach to mirrors and top-level "
2723 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2728 * The new device must be a single disk.
2730 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2731 "new device must be a single disk"));
2732 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2736 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2738 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2743 * The new device is too small.
2745 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2746 "device is too small"));
2747 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2752 * The new device has a different alignment requirement.
2754 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2755 "devices have different sector alignment"));
2756 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2761 * The resulting top-level vdev spec won't fit in the label.
2763 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2767 (void) zpool_standard_error(hdl
, errno
, msg
);
2774 * Detach the specified device.
2777 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2779 zfs_cmd_t zc
= { 0 };
2782 boolean_t avail_spare
, l2cache
;
2783 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2785 (void) snprintf(msg
, sizeof (msg
),
2786 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2788 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2789 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2791 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2794 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2797 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2799 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2801 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2808 * Can't detach from this type of vdev.
2810 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2811 "applicable to mirror and replacing vdevs"));
2812 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2817 * There are no other replicas of this device.
2819 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2823 (void) zpool_standard_error(hdl
, errno
, msg
);
2830 * Find a mirror vdev in the source nvlist.
2832 * The mchild array contains a list of disks in one of the top-level mirrors
2833 * of the source pool. The schild array contains a list of disks that the
2834 * user specified on the command line. We loop over the mchild array to
2835 * see if any entry in the schild array matches.
2837 * If a disk in the mchild array is found in the schild array, we return
2838 * the index of that entry. Otherwise we return -1.
2841 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2842 nvlist_t
**schild
, uint_t schildren
)
2846 for (mc
= 0; mc
< mchildren
; mc
++) {
2848 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2849 mchild
[mc
], B_FALSE
);
2851 for (sc
= 0; sc
< schildren
; sc
++) {
2852 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2853 schild
[sc
], B_FALSE
);
2854 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2870 * Split a mirror pool. If newroot points to null, then a new nvlist
2871 * is generated and it is the responsibility of the caller to free it.
2874 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2875 nvlist_t
*props
, splitflags_t flags
)
2877 zfs_cmd_t zc
= { 0 };
2879 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2880 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2881 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2882 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2884 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2887 (void) snprintf(msg
, sizeof (msg
),
2888 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2890 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2891 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2893 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2894 (void) fprintf(stderr
, gettext("Internal error: unable to "
2895 "retrieve pool configuration\n"));
2899 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2901 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2904 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2905 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2906 props
, vers
, flags
, msg
)) == NULL
)
2910 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2912 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2913 "Source pool is missing vdev tree"));
2914 nvlist_free(zc_props
);
2918 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2921 if (*newroot
== NULL
||
2922 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2923 &newchild
, &newchildren
) != 0)
2926 for (c
= 0; c
< children
; c
++) {
2927 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2929 nvlist_t
**mchild
, *vdev
;
2934 * Unlike cache & spares, slogs are stored in the
2935 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2937 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2939 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2941 if (is_log
|| is_hole
) {
2943 * Create a hole vdev and put it in the config.
2945 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2947 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2948 VDEV_TYPE_HOLE
) != 0)
2950 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2955 varray
[vcount
++] = vdev
;
2959 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2961 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2962 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2963 "Source pool must be composed only of mirrors\n"));
2964 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2968 verify(nvlist_lookup_nvlist_array(child
[c
],
2969 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2971 /* find or add an entry for this top-level vdev */
2972 if (newchildren
> 0 &&
2973 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
2974 newchild
, newchildren
)) >= 0) {
2975 /* We found a disk that the user specified. */
2976 vdev
= mchild
[entry
];
2979 /* User didn't specify a disk for this vdev. */
2980 vdev
= mchild
[mchildren
- 1];
2983 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
2987 /* did we find every disk the user specified? */
2988 if (found
!= newchildren
) {
2989 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
2990 "include at most one disk from each mirror"));
2991 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2995 /* Prepare the nvlist for populating. */
2996 if (*newroot
== NULL
) {
2997 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
3000 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
3001 VDEV_TYPE_ROOT
) != 0)
3004 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
3007 /* Add all the children we found */
3008 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
3009 lastlog
== 0 ? vcount
: lastlog
) != 0)
3013 * If we're just doing a dry run, exit now with success.
3016 memory_err
= B_FALSE
;
3021 /* now build up the config list & call the ioctl */
3022 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
3025 if (nvlist_add_nvlist(newconfig
,
3026 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
3027 nvlist_add_string(newconfig
,
3028 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
3029 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
3033 * The new pool is automatically part of the namespace unless we
3034 * explicitly export it.
3037 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
3038 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3039 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
3040 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
3042 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
3045 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
3046 retval
= zpool_standard_error(hdl
, errno
, msg
);
3051 memory_err
= B_FALSE
;
3054 if (varray
!= NULL
) {
3057 for (v
= 0; v
< vcount
; v
++)
3058 nvlist_free(varray
[v
]);
3061 zcmd_free_nvlists(&zc
);
3062 nvlist_free(zc_props
);
3063 nvlist_free(newconfig
);
3065 nvlist_free(*newroot
);
3073 return (no_memory(hdl
));
3079 * Remove the given device. Currently, this is supported only for hot spares
3080 * and level 2 cache devices.
3083 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
3085 zfs_cmd_t zc
= { 0 };
3088 boolean_t avail_spare
, l2cache
, islog
;
3089 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3092 (void) snprintf(msg
, sizeof (msg
),
3093 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
3095 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3096 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
3098 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3100 * XXX - this should just go away.
3102 if (!avail_spare
&& !l2cache
&& !islog
) {
3103 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3104 "only inactive hot spares, cache, top-level, "
3105 "or log devices can be removed"));
3106 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3109 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
3110 if (islog
&& version
< SPA_VERSION_HOLES
) {
3111 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3112 "pool must be upgrade to support log removal"));
3113 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
3116 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
3118 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
3121 return (zpool_standard_error(hdl
, errno
, msg
));
3125 * Clear the errors for the pool, or the particular device if specified.
3128 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
3130 zfs_cmd_t zc
= { 0 };
3133 zpool_rewind_policy_t policy
;
3134 boolean_t avail_spare
, l2cache
;
3135 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3136 nvlist_t
*nvi
= NULL
;
3140 (void) snprintf(msg
, sizeof (msg
),
3141 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3144 (void) snprintf(msg
, sizeof (msg
),
3145 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
3148 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3150 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
3151 &l2cache
, NULL
)) == 0)
3152 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
3155 * Don't allow error clearing for hot spares. Do allow
3156 * error clearing for l2cache devices.
3159 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
3161 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
3165 zpool_get_rewind_policy(rewindnvl
, &policy
);
3166 zc
.zc_cookie
= policy
.zrp_request
;
3168 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
3171 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
3174 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
3176 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
3177 zcmd_free_nvlists(&zc
);
3182 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
3183 errno
!= EPERM
&& errno
!= EACCES
)) {
3184 if (policy
.zrp_request
&
3185 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
3186 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
3187 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
3188 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
3192 zcmd_free_nvlists(&zc
);
3196 zcmd_free_nvlists(&zc
);
3197 return (zpool_standard_error(hdl
, errno
, msg
));
3201 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3204 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
3206 zfs_cmd_t zc
= { 0 };
3208 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3210 (void) snprintf(msg
, sizeof (msg
),
3211 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
3214 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3216 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
3218 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
3221 return (zpool_standard_error(hdl
, errno
, msg
));
3225 * Change the GUID for a pool.
3228 zpool_reguid(zpool_handle_t
*zhp
)
3231 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3232 zfs_cmd_t zc
= { 0 };
3234 (void) snprintf(msg
, sizeof (msg
),
3235 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3237 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3238 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3241 return (zpool_standard_error(hdl
, errno
, msg
));
3248 zpool_reopen(zpool_handle_t
*zhp
)
3250 zfs_cmd_t zc
= { 0 };
3252 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3254 (void) snprintf(msg
, sizeof (msg
),
3255 dgettext(TEXT_DOMAIN
, "cannot reopen '%s'"),
3258 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3259 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REOPEN
, &zc
) == 0)
3261 return (zpool_standard_error(hdl
, errno
, msg
));
3265 * Convert from a devid string to a path.
3268 devid_to_path(char *devid_str
)
3273 devid_nmlist_t
*list
= NULL
;
3276 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3279 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3281 devid_str_free(minor
);
3288 * In a case the strdup() fails, we will just return NULL below.
3290 path
= strdup(list
[0].devname
);
3292 devid_free_nmlist(list
);
3298 * Convert from a path to a devid string.
3301 path_to_devid(const char *path
)
3307 if ((fd
= open(path
, O_RDONLY
)) < 0)
3312 if (devid_get(fd
, &devid
) == 0) {
3313 if (devid_get_minor_name(fd
, &minor
) == 0)
3314 ret
= devid_str_encode(devid
, minor
);
3316 devid_str_free(minor
);
3325 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3326 * ignore any failure here, since a common case is for an unprivileged user to
3327 * type 'zpool status', and we'll display the correct information anyway.
3330 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3332 zfs_cmd_t zc
= { 0 };
3334 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3335 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3336 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3339 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3343 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3344 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3345 * We also check if this is a whole disk, in which case we strip off the
3346 * trailing 's0' slice name.
3348 * This routine is also responsible for identifying when disks have been
3349 * reconfigured in a new location. The kernel will have opened the device by
3350 * devid, but the path will still refer to the old location. To catch this, we
3351 * first do a path -> devid translation (which is fast for the common case). If
3352 * the devid matches, we're done. If not, we do a reverse devid -> path
3353 * translation and issue the appropriate ioctl() to update the path of the vdev.
3354 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3358 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3367 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
3369 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3371 (void) snprintf(buf
, sizeof (buf
), "%llu",
3372 (u_longlong_t
)value
);
3374 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3377 * If the device is dead (faulted, offline, etc) then don't
3378 * bother opening it. Otherwise we may be forcing the user to
3379 * open a misbehaving device, which can have undesirable
3382 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3383 (uint64_t **)&vs
, &vsc
) != 0 ||
3384 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3386 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3388 * Determine if the current path is correct.
3390 char *newdevid
= path_to_devid(path
);
3392 if (newdevid
== NULL
||
3393 strcmp(devid
, newdevid
) != 0) {
3396 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3398 * Update the path appropriately.
3400 set_path(zhp
, nv
, newpath
);
3401 if (nvlist_add_string(nv
,
3402 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3403 verify(nvlist_lookup_string(nv
,
3411 devid_str_free(newdevid
);
3414 if (strncmp(path
, ZFS_DISK_ROOTD
, strlen(ZFS_DISK_ROOTD
)) == 0)
3415 path
+= strlen(ZFS_DISK_ROOTD
);
3417 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
3418 &value
) == 0 && value
) {
3419 int pathlen
= strlen(path
);
3420 char *tmp
= zfs_strdup(hdl
, path
);
3423 * If it starts with c#, and ends with "s0" or "s1",
3424 * chop the slice off, or if it ends with "s0/old" or
3425 * "s1/old", remove the slice from the middle.
3427 if (CTD_CHECK(tmp
)) {
3428 if (strcmp(&tmp
[pathlen
- 2], "s0") == 0 ||
3429 strcmp(&tmp
[pathlen
- 2], "s1") == 0) {
3430 tmp
[pathlen
- 2] = '\0';
3431 } else if (pathlen
> 6 &&
3432 (strcmp(&tmp
[pathlen
- 6], "s0/old") == 0 ||
3433 strcmp(&tmp
[pathlen
- 6], "s1/old") == 0)) {
3434 (void) strcpy(&tmp
[pathlen
- 6],
3441 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3444 * If it's a raidz device, we need to stick in the parity level.
3446 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3447 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3449 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3450 (u_longlong_t
)value
);
3455 * We identify each top-level vdev by using a <type-id>
3456 * naming convention.
3461 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3463 (void) snprintf(buf
, sizeof (buf
), "%s-%llu", path
,
3469 return (zfs_strdup(hdl
, path
));
3473 zbookmark_mem_compare(const void *a
, const void *b
)
3475 return (memcmp(a
, b
, sizeof (zbookmark_phys_t
)));
3479 * Retrieve the persistent error log, uniquify the members, and return to the
3483 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3485 zfs_cmd_t zc
= { 0 };
3487 zbookmark_phys_t
*zb
= NULL
;
3491 * Retrieve the raw error list from the kernel. If the number of errors
3492 * has increased, allocate more space and continue until we get the
3495 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3499 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3500 count
* sizeof (zbookmark_phys_t
))) == (uintptr_t)NULL
)
3502 zc
.zc_nvlist_dst_size
= count
;
3503 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3505 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3507 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3508 if (errno
== ENOMEM
) {
3511 count
= zc
.zc_nvlist_dst_size
;
3512 dst
= zfs_alloc(zhp
->zpool_hdl
, count
*
3513 sizeof (zbookmark_phys_t
));
3516 zc
.zc_nvlist_dst
= (uintptr_t)dst
;
3526 * Sort the resulting bookmarks. This is a little confusing due to the
3527 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3528 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3529 * _not_ copied as part of the process. So we point the start of our
3530 * array appropriate and decrement the total number of elements.
3532 zb
= ((zbookmark_phys_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3533 zc
.zc_nvlist_dst_size
;
3534 count
-= zc
.zc_nvlist_dst_size
;
3536 qsort(zb
, count
, sizeof (zbookmark_phys_t
), zbookmark_mem_compare
);
3538 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3541 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3543 for (i
= 0; i
< count
; i
++) {
3546 /* ignoring zb_blkid and zb_level for now */
3547 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3548 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3551 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3553 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3554 zb
[i
].zb_objset
) != 0) {
3558 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3559 zb
[i
].zb_object
) != 0) {
3563 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3570 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3574 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3575 return (no_memory(zhp
->zpool_hdl
));
3579 * Upgrade a ZFS pool to the latest on-disk version.
3582 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3584 zfs_cmd_t zc
= { 0 };
3585 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3587 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3588 zc
.zc_cookie
= new_version
;
3590 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3591 return (zpool_standard_error_fmt(hdl
, errno
,
3592 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3598 zfs_save_arguments(int argc
, char **argv
, char *string
, int len
)
3600 (void) strlcpy(string
, basename(argv
[0]), len
);
3601 for (int i
= 1; i
< argc
; i
++) {
3602 (void) strlcat(string
, " ", len
);
3603 (void) strlcat(string
, argv
[i
], len
);
3608 zpool_log_history(libzfs_handle_t
*hdl
, const char *message
)
3610 zfs_cmd_t zc
= { 0 };
3614 args
= fnvlist_alloc();
3615 fnvlist_add_string(args
, "message", message
);
3616 err
= zcmd_write_src_nvlist(hdl
, &zc
, args
);
3618 err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_LOG_HISTORY
, &zc
);
3620 zcmd_free_nvlists(&zc
);
3625 * Perform ioctl to get some command history of a pool.
3627 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3628 * logical offset of the history buffer to start reading from.
3630 * Upon return, 'off' is the next logical offset to read from and
3631 * 'len' is the actual amount of bytes read into 'buf'.
3634 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3636 zfs_cmd_t zc
= { 0 };
3637 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3639 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3641 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3642 zc
.zc_history_len
= *len
;
3643 zc
.zc_history_offset
= *off
;
3645 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3648 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3649 dgettext(TEXT_DOMAIN
,
3650 "cannot show history for pool '%s'"),
3653 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3654 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3655 "'%s'"), zhp
->zpool_name
));
3657 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3658 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3659 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3661 return (zpool_standard_error_fmt(hdl
, errno
,
3662 dgettext(TEXT_DOMAIN
,
3663 "cannot get history for '%s'"), zhp
->zpool_name
));
3667 *len
= zc
.zc_history_len
;
3668 *off
= zc
.zc_history_offset
;
3674 * Process the buffer of nvlists, unpacking and storing each nvlist record
3675 * into 'records'. 'leftover' is set to the number of bytes that weren't
3676 * processed as there wasn't a complete record.
3679 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3680 nvlist_t
***records
, uint_t
*numrecords
)
3686 while (bytes_read
> sizeof (reclen
)) {
3688 /* get length of packed record (stored as little endian) */
3689 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3690 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3692 if (bytes_read
< sizeof (reclen
) + reclen
)
3696 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3698 bytes_read
-= sizeof (reclen
) + reclen
;
3699 buf
+= sizeof (reclen
) + reclen
;
3701 /* add record to nvlist array */
3703 if (ISP2(*numrecords
+ 1)) {
3704 *records
= reallocarray(*records
, *numrecords
* 2,
3705 sizeof (nvlist_t
*));
3707 (*records
)[*numrecords
- 1] = nv
;
3710 *leftover
= bytes_read
;
3715 * Retrieve the command history of a pool.
3718 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3721 int buflen
= 128 * 1024;
3723 nvlist_t
**records
= NULL
;
3724 uint_t numrecords
= 0;
3727 buf
= malloc(buflen
);
3731 uint64_t bytes_read
= buflen
;
3734 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3737 /* if nothing else was read in, we're at EOF, just return */
3741 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3742 &leftover
, &records
, &numrecords
)) != 0)
3745 if (leftover
== bytes_read
) {
3747 * no progress made, because buffer is not big enough
3748 * to hold this record; resize and retry.
3752 buf
= malloc(buflen
);
3763 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3764 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3765 records
, numrecords
) == 0);
3767 for (i
= 0; i
< numrecords
; i
++)
3768 nvlist_free(records
[i
]);
3775 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
3776 char *pathname
, size_t len
)
3778 zfs_cmd_t zc
= { 0 };
3779 boolean_t mounted
= B_FALSE
;
3780 char *mntpnt
= NULL
;
3781 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
3784 /* special case for the MOS */
3785 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>", obj
);
3789 /* get the dataset's name */
3790 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3792 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
3793 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
3794 /* just write out a path of two object numbers */
3795 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
3799 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
3801 /* find out if the dataset is mounted */
3802 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
3804 /* get the corrupted object's path */
3805 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
3807 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
3810 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
3813 (void) snprintf(pathname
, len
, "%s:%s",
3814 dsname
, zc
.zc_value
);
3817 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
, obj
);
3823 * Read the EFI label from the config, if a label does not exist then
3824 * pass back the error to the caller. If the caller has passed a non-NULL
3825 * diskaddr argument then we set it to the starting address of the EFI
3826 * partition. If the caller has passed a non-NULL boolean argument, then
3827 * we set it to indicate if the disk does have efi system partition.
3830 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
, boolean_t
*system
)
3834 char diskname
[MAXPATHLEN
];
3835 boolean_t boot
= B_FALSE
;
3839 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
3842 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", ZFS_RDISK_ROOT
,
3843 strrchr(path
, '/'));
3844 if ((fd
= open(diskname
, O_RDONLY
|O_NDELAY
)) >= 0) {
3845 struct dk_gpt
*vtoc
;
3847 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
3848 for (slice
= 0; slice
< vtoc
->efi_nparts
; slice
++) {
3849 if (vtoc
->efi_parts
[slice
].p_tag
== V_SYSTEM
)
3851 if (vtoc
->efi_parts
[slice
].p_tag
== V_USR
)
3854 if (sb
!= NULL
&& vtoc
->efi_parts
[slice
].p_tag
== V_USR
)
3855 *sb
= vtoc
->efi_parts
[slice
].p_start
;
3866 * determine where a partition starts on a disk in the current
3870 find_start_block(nvlist_t
*config
)
3874 diskaddr_t sb
= MAXOFFSET_T
;
3877 if (nvlist_lookup_nvlist_array(config
,
3878 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
3879 if (nvlist_lookup_uint64(config
,
3880 ZPOOL_CONFIG_WHOLE_DISK
,
3881 &wholedisk
) != 0 || !wholedisk
) {
3882 return (MAXOFFSET_T
);
3884 if (read_efi_label(config
, &sb
, NULL
) < 0)
3889 for (c
= 0; c
< children
; c
++) {
3890 sb
= find_start_block(child
[c
]);
3891 if (sb
!= MAXOFFSET_T
) {
3895 return (MAXOFFSET_T
);
3899 * Label an individual disk. The name provided is the short name,
3900 * stripped of any leading /dev path.
3903 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, const char *name
,
3904 zpool_boot_label_t boot_type
, uint64_t boot_size
, int *slice
)
3906 char path
[MAXPATHLEN
];
3907 struct dk_gpt
*vtoc
;
3909 size_t resv
= EFI_MIN_RESV_SIZE
;
3910 uint64_t slice_size
;
3911 diskaddr_t start_block
;
3914 /* prepare an error message just in case */
3915 (void) snprintf(errbuf
, sizeof (errbuf
),
3916 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
3921 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
3922 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
3924 if (zhp
->zpool_start_block
== 0)
3925 start_block
= find_start_block(nvroot
);
3927 start_block
= zhp
->zpool_start_block
;
3928 zhp
->zpool_start_block
= start_block
;
3931 start_block
= NEW_START_BLOCK
;
3934 (void) snprintf(path
, sizeof (path
), "%s/%s%s", ZFS_RDISK_ROOT
, name
,
3937 if ((fd
= open(path
, O_RDWR
| O_NDELAY
)) < 0) {
3939 * This shouldn't happen. We've long since verified that this
3940 * is a valid device.
3943 dgettext(TEXT_DOMAIN
, "unable to open device"));
3944 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
3947 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
3949 * The only way this can fail is if we run out of memory, or we
3950 * were unable to read the disk's capacity
3952 if (errno
== ENOMEM
)
3953 (void) no_memory(hdl
);
3956 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3957 "unable to read disk capacity"), name
);
3959 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
3963 * Why we use V_USR: V_BACKUP confuses users, and is considered
3964 * disposable by some EFI utilities (since EFI doesn't have a backup
3965 * slice). V_UNASSIGNED is supposed to be used only for zero size
3966 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3967 * etc. were all pretty specific. V_USR is as close to reality as we
3968 * can get, in the absence of V_OTHER.
3970 /* first fix the partition start block */
3971 if (start_block
== MAXOFFSET_T
)
3972 start_block
= NEW_START_BLOCK
;
3975 * EFI System partition is using slice 0.
3976 * ZFS is on slice 1 and slice 8 is reserved.
3977 * We assume the GPT partition table without system
3978 * partition has zfs p_start == NEW_START_BLOCK.
3979 * If start_block != NEW_START_BLOCK, it means we have
3980 * system partition. Correct solution would be to query/cache vtoc
3981 * from existing vdev member.
3983 if (boot_type
== ZPOOL_CREATE_BOOT_LABEL
) {
3984 if (boot_size
% vtoc
->efi_lbasize
!= 0) {
3985 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3986 "boot partition size must be a multiple of %d"),
3990 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
3993 * System partition size checks.
3994 * Note the 1MB is quite arbitrary value, since we
3995 * are creating dedicated pool, it should be enough
3996 * to hold fat + efi bootloader. May need to be
3997 * adjusted if the bootloader size will grow.
3999 if (boot_size
< 1024 * 1024) {
4001 zfs_nicenum(boot_size
, buf
, sizeof (buf
));
4002 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4003 "Specified size %s for EFI System partition is too "
4004 "small, the minimum size is 1MB."), buf
);
4007 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4009 /* 33MB is tested with mkfs -F pcfs */
4010 if (hdl
->libzfs_printerr
&&
4011 ((vtoc
->efi_lbasize
== 512 &&
4012 boot_size
< 33 * 1024 * 1024) ||
4013 (vtoc
->efi_lbasize
== 4096 &&
4014 boot_size
< 256 * 1024 * 1024))) {
4016 zfs_nicenum(boot_size
, buf
, sizeof (buf
));
4017 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
,
4018 "Warning: EFI System partition size %s is "
4019 "not allowing to create FAT32 file\nsystem, which "
4020 "may result in unbootable system.\n"), buf
);
4022 /* Adjust zfs partition start by size of system partition. */
4023 start_block
+= boot_size
/ vtoc
->efi_lbasize
;
4026 if (start_block
== NEW_START_BLOCK
) {
4028 * Use default layout.
4029 * ZFS is on slice 0 and slice 8 is reserved.
4031 slice_size
= vtoc
->efi_last_u_lba
+ 1;
4032 slice_size
-= EFI_MIN_RESV_SIZE
;
4033 slice_size
-= start_block
;
4037 vtoc
->efi_parts
[0].p_start
= start_block
;
4038 vtoc
->efi_parts
[0].p_size
= slice_size
;
4040 vtoc
->efi_parts
[0].p_tag
= V_USR
;
4041 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
4043 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4044 vtoc
->efi_parts
[8].p_size
= resv
;
4045 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4047 slice_size
= start_block
- NEW_START_BLOCK
;
4048 vtoc
->efi_parts
[0].p_start
= NEW_START_BLOCK
;
4049 vtoc
->efi_parts
[0].p_size
= slice_size
;
4050 vtoc
->efi_parts
[0].p_tag
= V_SYSTEM
;
4051 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "loader");
4054 /* prepare slice 1 */
4055 slice_size
= vtoc
->efi_last_u_lba
+ 1 - slice_size
;
4057 slice_size
-= NEW_START_BLOCK
;
4058 vtoc
->efi_parts
[1].p_start
= start_block
;
4059 vtoc
->efi_parts
[1].p_size
= slice_size
;
4060 vtoc
->efi_parts
[1].p_tag
= V_USR
;
4061 (void) strcpy(vtoc
->efi_parts
[1].p_name
, "zfs");
4063 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
4064 vtoc
->efi_parts
[8].p_size
= resv
;
4065 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
4068 if (efi_write(fd
, vtoc
) != 0) {
4070 * Some block drivers (like pcata) may not support EFI
4071 * GPT labels. Print out a helpful error message dir-
4072 * ecting the user to manually label the disk and give
4078 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4079 "try using fdisk(1M) and then provide a specific slice"));
4080 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
4089 supported_dump_vdev_type(libzfs_handle_t
*hdl
, nvlist_t
*config
, char *errbuf
)
4095 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
4096 if (strcmp(type
, VDEV_TYPE_FILE
) == 0 ||
4097 strcmp(type
, VDEV_TYPE_HOLE
) == 0 ||
4098 strcmp(type
, VDEV_TYPE_MISSING
) == 0) {
4099 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4100 "vdev type '%s' is not supported"), type
);
4101 (void) zfs_error(hdl
, EZFS_VDEVNOTSUP
, errbuf
);
4104 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
4105 &child
, &children
) == 0) {
4106 for (c
= 0; c
< children
; c
++) {
4107 if (!supported_dump_vdev_type(hdl
, child
[c
], errbuf
))
4115 * Check if this zvol is allowable for use as a dump device; zero if
4116 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4118 * Allowable storage configurations include mirrors, all raidz variants, and
4119 * pools with log, cache, and spare devices. Pools which are backed by files or
4120 * have missing/hole vdevs are not suitable.
4123 zvol_check_dump_config(char *arg
)
4125 zpool_handle_t
*zhp
= NULL
;
4126 nvlist_t
*config
, *nvroot
;
4130 libzfs_handle_t
*hdl
;
4132 char poolname
[ZFS_MAX_DATASET_NAME_LEN
];
4133 int pathlen
= strlen(ZVOL_FULL_DEV_DIR
);
4136 if (strncmp(arg
, ZVOL_FULL_DEV_DIR
, pathlen
)) {
4140 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
4141 "dump is not supported on device '%s'"), arg
);
4143 if ((hdl
= libzfs_init()) == NULL
)
4145 libzfs_print_on_error(hdl
, B_TRUE
);
4147 volname
= arg
+ pathlen
;
4149 /* check the configuration of the pool */
4150 if ((p
= strchr(volname
, '/')) == NULL
) {
4151 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4152 "malformed dataset name"));
4153 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
4155 } else if (p
- volname
>= ZFS_MAX_DATASET_NAME_LEN
) {
4156 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4157 "dataset name is too long"));
4158 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, errbuf
);
4161 (void) strncpy(poolname
, volname
, p
- volname
);
4162 poolname
[p
- volname
] = '\0';
4165 if ((zhp
= zpool_open(hdl
, poolname
)) == NULL
) {
4166 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4167 "could not open pool '%s'"), poolname
);
4168 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
4171 config
= zpool_get_config(zhp
, NULL
);
4172 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
4174 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
4175 "could not obtain vdev configuration for '%s'"), poolname
);
4176 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, errbuf
);
4180 verify(nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
4181 &top
, &toplevels
) == 0);
4183 if (!supported_dump_vdev_type(hdl
, top
[0], errbuf
)) {