Force fault a vdev with 'zpool offline -f'
[zfs.git] / lib / libzfs / libzfs_pool.c
blob28ccf8f4d62e28443952774315f1183c7093074a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <libgen.h>
39 #include <zone.h>
40 #include <sys/stat.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
52 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
54 typedef struct prop_flags {
55 int create:1; /* Validate property on creation */
56 int import:1; /* Validate property on import */
57 } prop_flags_t;
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
65 static int
66 zpool_get_all_props(zpool_handle_t *zhp)
68 zfs_cmd_t zc = {"\0"};
69 libzfs_handle_t *hdl = zhp->zpool_hdl;
71 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
73 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
74 return (-1);
76 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
77 if (errno == ENOMEM) {
78 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
79 zcmd_free_nvlists(&zc);
80 return (-1);
82 } else {
83 zcmd_free_nvlists(&zc);
84 return (-1);
88 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
89 zcmd_free_nvlists(&zc);
90 return (-1);
93 zcmd_free_nvlists(&zc);
95 return (0);
98 static int
99 zpool_props_refresh(zpool_handle_t *zhp)
101 nvlist_t *old_props;
103 old_props = zhp->zpool_props;
105 if (zpool_get_all_props(zhp) != 0)
106 return (-1);
108 nvlist_free(old_props);
109 return (0);
112 static char *
113 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
114 zprop_source_t *src)
116 nvlist_t *nv, *nvl;
117 uint64_t ival;
118 char *value;
119 zprop_source_t source;
121 nvl = zhp->zpool_props;
122 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
123 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 source = ival;
125 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 } else {
127 source = ZPROP_SRC_DEFAULT;
128 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
129 value = "-";
132 if (src)
133 *src = source;
135 return (value);
138 uint64_t
139 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
141 nvlist_t *nv, *nvl;
142 uint64_t value;
143 zprop_source_t source;
145 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
151 if ((prop == ZPOOL_PROP_GUID) &&
152 (nvlist_lookup_nvlist(zhp->zpool_config,
153 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
154 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 == 0)) {
156 return (value);
158 return (zpool_prop_default_numeric(prop));
161 nvl = zhp->zpool_props;
162 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
163 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 source = value;
165 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 } else {
167 source = ZPROP_SRC_DEFAULT;
168 value = zpool_prop_default_numeric(prop);
171 if (src)
172 *src = source;
174 return (value);
178 * Map VDEV STATE to printed strings.
180 char *
181 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
183 switch (state) {
184 case VDEV_STATE_CLOSED:
185 case VDEV_STATE_OFFLINE:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN:
190 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
191 return (gettext("FAULTED"));
192 else if (aux == VDEV_AUX_SPLIT_POOL)
193 return (gettext("SPLIT"));
194 else
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY:
201 return (gettext("ONLINE"));
203 default:
204 break;
207 return (gettext("UNKNOWN"));
211 * Map POOL STATE to printed strings.
213 const char *
214 zpool_pool_state_to_name(pool_state_t state)
216 switch (state) {
217 default:
218 break;
219 case POOL_STATE_ACTIVE:
220 return (gettext("ACTIVE"));
221 case POOL_STATE_EXPORTED:
222 return (gettext("EXPORTED"));
223 case POOL_STATE_DESTROYED:
224 return (gettext("DESTROYED"));
225 case POOL_STATE_SPARE:
226 return (gettext("SPARE"));
227 case POOL_STATE_L2CACHE:
228 return (gettext("L2CACHE"));
229 case POOL_STATE_UNINITIALIZED:
230 return (gettext("UNINITIALIZED"));
231 case POOL_STATE_UNAVAIL:
232 return (gettext("UNAVAIL"));
233 case POOL_STATE_POTENTIALLY_ACTIVE:
234 return (gettext("POTENTIALLY_ACTIVE"));
237 return (gettext("UNKNOWN"));
241 * Get a zpool property value for 'prop' and return the value in
242 * a pre-allocated buffer.
245 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
246 size_t len, zprop_source_t *srctype, boolean_t literal)
248 uint64_t intval;
249 const char *strval;
250 zprop_source_t src = ZPROP_SRC_NONE;
251 nvlist_t *nvroot;
252 vdev_stat_t *vs;
253 uint_t vsc;
255 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
256 switch (prop) {
257 case ZPOOL_PROP_NAME:
258 (void) strlcpy(buf, zpool_get_name(zhp), len);
259 break;
261 case ZPOOL_PROP_HEALTH:
262 (void) strlcpy(buf, "FAULTED", len);
263 break;
265 case ZPOOL_PROP_GUID:
266 intval = zpool_get_prop_int(zhp, prop, &src);
267 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
268 break;
270 case ZPOOL_PROP_ALTROOT:
271 case ZPOOL_PROP_CACHEFILE:
272 case ZPOOL_PROP_COMMENT:
273 if (zhp->zpool_props != NULL ||
274 zpool_get_all_props(zhp) == 0) {
275 (void) strlcpy(buf,
276 zpool_get_prop_string(zhp, prop, &src),
277 len);
278 break;
280 /* FALLTHROUGH */
281 default:
282 (void) strlcpy(buf, "-", len);
283 break;
286 if (srctype != NULL)
287 *srctype = src;
288 return (0);
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
293 return (-1);
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
298 len);
299 break;
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
304 switch (prop) {
305 case ZPOOL_PROP_SIZE:
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
308 case ZPOOL_PROP_FREEING:
309 case ZPOOL_PROP_LEAKED:
310 case ZPOOL_PROP_ASHIFT:
311 if (literal)
312 (void) snprintf(buf, len, "%llu",
313 (u_longlong_t)intval);
314 else
315 (void) zfs_nicenum(intval, buf, len);
316 break;
318 case ZPOOL_PROP_EXPANDSZ:
319 if (intval == 0) {
320 (void) strlcpy(buf, "-", len);
321 } else if (literal) {
322 (void) snprintf(buf, len, "%llu",
323 (u_longlong_t)intval);
324 } else {
325 (void) zfs_nicebytes(intval, buf, len);
327 break;
329 case ZPOOL_PROP_CAPACITY:
330 if (literal) {
331 (void) snprintf(buf, len, "%llu",
332 (u_longlong_t)intval);
333 } else {
334 (void) snprintf(buf, len, "%llu%%",
335 (u_longlong_t)intval);
337 break;
339 case ZPOOL_PROP_FRAGMENTATION:
340 if (intval == UINT64_MAX) {
341 (void) strlcpy(buf, "-", len);
342 } else if (literal) {
343 (void) snprintf(buf, len, "%llu",
344 (u_longlong_t)intval);
345 } else {
346 (void) snprintf(buf, len, "%llu%%",
347 (u_longlong_t)intval);
349 break;
351 case ZPOOL_PROP_DEDUPRATIO:
352 if (literal)
353 (void) snprintf(buf, len, "%llu.%02llu",
354 (u_longlong_t)(intval / 100),
355 (u_longlong_t)(intval % 100));
356 else
357 (void) snprintf(buf, len, "%llu.%02llux",
358 (u_longlong_t)(intval / 100),
359 (u_longlong_t)(intval % 100));
360 break;
362 case ZPOOL_PROP_HEALTH:
363 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
364 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
365 verify(nvlist_lookup_uint64_array(nvroot,
366 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
367 == 0);
369 (void) strlcpy(buf, zpool_state_to_name(intval,
370 vs->vs_aux), len);
371 break;
372 case ZPOOL_PROP_VERSION:
373 if (intval >= SPA_VERSION_FEATURES) {
374 (void) snprintf(buf, len, "-");
375 break;
377 /* FALLTHROUGH */
378 default:
379 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
381 break;
383 case PROP_TYPE_INDEX:
384 intval = zpool_get_prop_int(zhp, prop, &src);
385 if (zpool_prop_index_to_string(prop, intval, &strval)
386 != 0)
387 return (-1);
388 (void) strlcpy(buf, strval, len);
389 break;
391 default:
392 abort();
395 if (srctype)
396 *srctype = src;
398 return (0);
402 * Check if the bootfs name has the same pool name as it is set to.
403 * Assuming bootfs is a valid dataset name.
405 static boolean_t
406 bootfs_name_valid(const char *pool, char *bootfs)
408 int len = strlen(pool);
410 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
411 return (B_FALSE);
413 if (strncmp(pool, bootfs, len) == 0 &&
414 (bootfs[len] == '/' || bootfs[len] == '\0'))
415 return (B_TRUE);
417 return (B_FALSE);
420 boolean_t
421 zpool_is_bootable(zpool_handle_t *zhp)
423 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
425 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
426 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
427 sizeof (bootfs)) != 0);
432 * Given an nvlist of zpool properties to be set, validate that they are
433 * correct, and parse any numeric properties (index, boolean, etc) if they are
434 * specified as strings.
436 static nvlist_t *
437 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
438 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
440 nvpair_t *elem;
441 nvlist_t *retprops;
442 zpool_prop_t prop;
443 char *strval;
444 uint64_t intval;
445 char *slash, *check;
446 struct stat64 statbuf;
447 zpool_handle_t *zhp;
449 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
450 (void) no_memory(hdl);
451 return (NULL);
454 elem = NULL;
455 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
456 const char *propname = nvpair_name(elem);
458 prop = zpool_name_to_prop(propname);
459 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
460 int err;
461 char *fname = strchr(propname, '@') + 1;
463 err = zfeature_lookup_name(fname, NULL);
464 if (err != 0) {
465 ASSERT3U(err, ==, ENOENT);
466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
467 "invalid feature '%s'"), fname);
468 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
469 goto error;
472 if (nvpair_type(elem) != DATA_TYPE_STRING) {
473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
474 "'%s' must be a string"), propname);
475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
476 goto error;
479 (void) nvpair_value_string(elem, &strval);
480 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
481 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483 "property '%s' can only be set to "
484 "'enabled' or 'disabled'"), propname);
485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486 goto error;
489 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
490 (void) no_memory(hdl);
491 goto error;
493 continue;
497 * Make sure this property is valid and applies to this type.
499 if (prop == ZPROP_INVAL) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "invalid property '%s'"), propname);
502 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
503 goto error;
506 if (zpool_prop_readonly(prop)) {
507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
508 "is readonly"), propname);
509 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
510 goto error;
513 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
514 &strval, &intval, errbuf) != 0)
515 goto error;
518 * Perform additional checking for specific properties.
520 switch (prop) {
521 case ZPOOL_PROP_VERSION:
522 if (intval < version ||
523 !SPA_VERSION_IS_SUPPORTED(intval)) {
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' number %d is invalid."),
526 propname, intval);
527 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
528 goto error;
530 break;
532 case ZPOOL_PROP_ASHIFT:
533 if (intval != 0 &&
534 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
536 "invalid '%s=%d' property: only values "
537 "between %" PRId32 " and %" PRId32 " "
538 "are allowed.\n"),
539 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
540 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
541 goto error;
543 break;
545 case ZPOOL_PROP_BOOTFS:
546 if (flags.create || flags.import) {
547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
548 "property '%s' cannot be set at creation "
549 "or import time"), propname);
550 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
551 goto error;
554 if (version < SPA_VERSION_BOOTFS) {
555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
556 "pool must be upgraded to support "
557 "'%s' property"), propname);
558 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
559 goto error;
563 * bootfs property value has to be a dataset name and
564 * the dataset has to be in the same pool as it sets to.
566 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
567 strval)) {
568 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
569 "is an invalid name"), strval);
570 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
571 goto error;
574 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "could not open pool '%s'"), poolname);
577 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
578 goto error;
580 zpool_close(zhp);
581 break;
583 case ZPOOL_PROP_ALTROOT:
584 if (!flags.create && !flags.import) {
585 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
586 "property '%s' can only be set during pool "
587 "creation or import"), propname);
588 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
589 goto error;
592 if (strval[0] != '/') {
593 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
594 "bad alternate root '%s'"), strval);
595 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
596 goto error;
598 break;
600 case ZPOOL_PROP_CACHEFILE:
601 if (strval[0] == '\0')
602 break;
604 if (strcmp(strval, "none") == 0)
605 break;
607 if (strval[0] != '/') {
608 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
609 "property '%s' must be empty, an "
610 "absolute path, or 'none'"), propname);
611 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
612 goto error;
615 slash = strrchr(strval, '/');
617 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
618 strcmp(slash, "/..") == 0) {
619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
620 "'%s' is not a valid file"), strval);
621 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
622 goto error;
625 *slash = '\0';
627 if (strval[0] != '\0' &&
628 (stat64(strval, &statbuf) != 0 ||
629 !S_ISDIR(statbuf.st_mode))) {
630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
631 "'%s' is not a valid directory"),
632 strval);
633 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
634 goto error;
637 *slash = '/';
638 break;
640 case ZPOOL_PROP_COMMENT:
641 for (check = strval; *check != '\0'; check++) {
642 if (!isprint(*check)) {
643 zfs_error_aux(hdl,
644 dgettext(TEXT_DOMAIN,
645 "comment may only have printable "
646 "characters"));
647 (void) zfs_error(hdl, EZFS_BADPROP,
648 errbuf);
649 goto error;
652 if (strlen(strval) > ZPROP_MAX_COMMENT) {
653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
654 "comment must not exceed %d characters"),
655 ZPROP_MAX_COMMENT);
656 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
657 goto error;
659 break;
660 case ZPOOL_PROP_READONLY:
661 if (!flags.import) {
662 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
663 "property '%s' can only be set at "
664 "import time"), propname);
665 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
666 goto error;
668 break;
669 case ZPOOL_PROP_TNAME:
670 if (!flags.create) {
671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
672 "property '%s' can only be set at "
673 "creation time"), propname);
674 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
675 goto error;
677 break;
679 default:
680 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
681 "property '%s'(%d) not defined"), propname, prop);
682 break;
686 return (retprops);
687 error:
688 nvlist_free(retprops);
689 return (NULL);
693 * Set zpool property : propname=propval.
696 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
698 zfs_cmd_t zc = {"\0"};
699 int ret = -1;
700 char errbuf[1024];
701 nvlist_t *nvl = NULL;
702 nvlist_t *realprops;
703 uint64_t version;
704 prop_flags_t flags = { 0 };
706 (void) snprintf(errbuf, sizeof (errbuf),
707 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
708 zhp->zpool_name);
710 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
711 return (no_memory(zhp->zpool_hdl));
713 if (nvlist_add_string(nvl, propname, propval) != 0) {
714 nvlist_free(nvl);
715 return (no_memory(zhp->zpool_hdl));
718 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
719 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
720 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
721 nvlist_free(nvl);
722 return (-1);
725 nvlist_free(nvl);
726 nvl = realprops;
729 * Execute the corresponding ioctl() to set this property.
731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
733 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
734 nvlist_free(nvl);
735 return (-1);
738 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
740 zcmd_free_nvlists(&zc);
741 nvlist_free(nvl);
743 if (ret)
744 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
745 else
746 (void) zpool_props_refresh(zhp);
748 return (ret);
752 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
754 libzfs_handle_t *hdl = zhp->zpool_hdl;
755 zprop_list_t *entry;
756 char buf[ZFS_MAXPROPLEN];
757 nvlist_t *features = NULL;
758 nvpair_t *nvp;
759 zprop_list_t **last;
760 boolean_t firstexpand = (NULL == *plp);
761 int i;
763 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
764 return (-1);
766 last = plp;
767 while (*last != NULL)
768 last = &(*last)->pl_next;
770 if ((*plp)->pl_all)
771 features = zpool_get_features(zhp);
773 if ((*plp)->pl_all && firstexpand) {
774 for (i = 0; i < SPA_FEATURES; i++) {
775 zprop_list_t *entry = zfs_alloc(hdl,
776 sizeof (zprop_list_t));
777 entry->pl_prop = ZPROP_INVAL;
778 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
779 spa_feature_table[i].fi_uname);
780 entry->pl_width = strlen(entry->pl_user_prop);
781 entry->pl_all = B_TRUE;
783 *last = entry;
784 last = &entry->pl_next;
788 /* add any unsupported features */
789 for (nvp = nvlist_next_nvpair(features, NULL);
790 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
791 char *propname;
792 boolean_t found;
793 zprop_list_t *entry;
795 if (zfeature_is_supported(nvpair_name(nvp)))
796 continue;
798 propname = zfs_asprintf(hdl, "unsupported@%s",
799 nvpair_name(nvp));
802 * Before adding the property to the list make sure that no
803 * other pool already added the same property.
805 found = B_FALSE;
806 entry = *plp;
807 while (entry != NULL) {
808 if (entry->pl_user_prop != NULL &&
809 strcmp(propname, entry->pl_user_prop) == 0) {
810 found = B_TRUE;
811 break;
813 entry = entry->pl_next;
815 if (found) {
816 free(propname);
817 continue;
820 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
821 entry->pl_prop = ZPROP_INVAL;
822 entry->pl_user_prop = propname;
823 entry->pl_width = strlen(entry->pl_user_prop);
824 entry->pl_all = B_TRUE;
826 *last = entry;
827 last = &entry->pl_next;
830 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
832 if (entry->pl_fixed)
833 continue;
835 if (entry->pl_prop != ZPROP_INVAL &&
836 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
837 NULL, B_FALSE) == 0) {
838 if (strlen(buf) > entry->pl_width)
839 entry->pl_width = strlen(buf);
843 return (0);
847 * Get the state for the given feature on the given ZFS pool.
850 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
851 size_t len)
853 uint64_t refcount;
854 boolean_t found = B_FALSE;
855 nvlist_t *features = zpool_get_features(zhp);
856 boolean_t supported;
857 const char *feature = strchr(propname, '@') + 1;
859 supported = zpool_prop_feature(propname);
860 ASSERT(supported || zpool_prop_unsupported(propname));
863 * Convert from feature name to feature guid. This conversion is
864 * unnecessary for unsupported@... properties because they already
865 * use guids.
867 if (supported) {
868 int ret;
869 spa_feature_t fid;
871 ret = zfeature_lookup_name(feature, &fid);
872 if (ret != 0) {
873 (void) strlcpy(buf, "-", len);
874 return (ENOTSUP);
876 feature = spa_feature_table[fid].fi_guid;
879 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
880 found = B_TRUE;
882 if (supported) {
883 if (!found) {
884 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
885 } else {
886 if (refcount == 0)
887 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
888 else
889 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
891 } else {
892 if (found) {
893 if (refcount == 0) {
894 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
895 } else {
896 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
898 } else {
899 (void) strlcpy(buf, "-", len);
900 return (ENOTSUP);
904 return (0);
908 * Don't start the slice at the default block of 34; many storage
909 * devices will use a stripe width of 128k, other vendors prefer a 1m
910 * alignment. It is best to play it safe and ensure a 1m alignment
911 * given 512B blocks. When the block size is larger by a power of 2
912 * we will still be 1m aligned. Some devices are sensitive to the
913 * partition ending alignment as well.
915 #define NEW_START_BLOCK 2048
916 #define PARTITION_END_ALIGNMENT 2048
919 * Validate the given pool name, optionally putting an extended error message in
920 * 'buf'.
922 boolean_t
923 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
925 namecheck_err_t why;
926 char what;
927 int ret;
929 ret = pool_namecheck(pool, &why, &what);
932 * The rules for reserved pool names were extended at a later point.
933 * But we need to support users with existing pools that may now be
934 * invalid. So we only check for this expanded set of names during a
935 * create (or import), and only in userland.
937 if (ret == 0 && !isopen &&
938 (strncmp(pool, "mirror", 6) == 0 ||
939 strncmp(pool, "raidz", 5) == 0 ||
940 strncmp(pool, "spare", 5) == 0 ||
941 strcmp(pool, "log") == 0)) {
942 if (hdl != NULL)
943 zfs_error_aux(hdl,
944 dgettext(TEXT_DOMAIN, "name is reserved"));
945 return (B_FALSE);
949 if (ret != 0) {
950 if (hdl != NULL) {
951 switch (why) {
952 case NAME_ERR_TOOLONG:
953 zfs_error_aux(hdl,
954 dgettext(TEXT_DOMAIN, "name is too long"));
955 break;
957 case NAME_ERR_INVALCHAR:
958 zfs_error_aux(hdl,
959 dgettext(TEXT_DOMAIN, "invalid character "
960 "'%c' in pool name"), what);
961 break;
963 case NAME_ERR_NOLETTER:
964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
965 "name must begin with a letter"));
966 break;
968 case NAME_ERR_RESERVED:
969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970 "name is reserved"));
971 break;
973 case NAME_ERR_DISKLIKE:
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "pool name is reserved"));
976 break;
978 case NAME_ERR_LEADING_SLASH:
979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
980 "leading slash in name"));
981 break;
983 case NAME_ERR_EMPTY_COMPONENT:
984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 "empty component in name"));
986 break;
988 case NAME_ERR_TRAILING_SLASH:
989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
990 "trailing slash in name"));
991 break;
993 case NAME_ERR_MULTIPLE_DELIMITERS:
994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 "multiple '@' and/or '#' delimiters in "
996 "name"));
997 break;
999 case NAME_ERR_NO_AT:
1000 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1001 "permission set is missing '@'"));
1002 break;
1004 default:
1005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1006 "(%d) not defined"), why);
1007 break;
1010 return (B_FALSE);
1013 return (B_TRUE);
1017 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1018 * state.
1020 zpool_handle_t *
1021 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1023 zpool_handle_t *zhp;
1024 boolean_t missing;
1027 * Make sure the pool name is valid.
1029 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1030 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1031 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1032 pool);
1033 return (NULL);
1036 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1037 return (NULL);
1039 zhp->zpool_hdl = hdl;
1040 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1042 if (zpool_refresh_stats(zhp, &missing) != 0) {
1043 zpool_close(zhp);
1044 return (NULL);
1047 if (missing) {
1048 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1049 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1050 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1051 zpool_close(zhp);
1052 return (NULL);
1055 return (zhp);
1059 * Like the above, but silent on error. Used when iterating over pools (because
1060 * the configuration cache may be out of date).
1063 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1065 zpool_handle_t *zhp;
1066 boolean_t missing;
1068 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1069 return (-1);
1071 zhp->zpool_hdl = hdl;
1072 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1074 if (zpool_refresh_stats(zhp, &missing) != 0) {
1075 zpool_close(zhp);
1076 return (-1);
1079 if (missing) {
1080 zpool_close(zhp);
1081 *ret = NULL;
1082 return (0);
1085 *ret = zhp;
1086 return (0);
1090 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1091 * state.
1093 zpool_handle_t *
1094 zpool_open(libzfs_handle_t *hdl, const char *pool)
1096 zpool_handle_t *zhp;
1098 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1099 return (NULL);
1101 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1102 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1103 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1104 zpool_close(zhp);
1105 return (NULL);
1108 return (zhp);
1112 * Close the handle. Simply frees the memory associated with the handle.
1114 void
1115 zpool_close(zpool_handle_t *zhp)
1117 nvlist_free(zhp->zpool_config);
1118 nvlist_free(zhp->zpool_old_config);
1119 nvlist_free(zhp->zpool_props);
1120 free(zhp);
1124 * Return the name of the pool.
1126 const char *
1127 zpool_get_name(zpool_handle_t *zhp)
1129 return (zhp->zpool_name);
1134 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1137 zpool_get_state(zpool_handle_t *zhp)
1139 return (zhp->zpool_state);
1143 * Create the named pool, using the provided vdev list. It is assumed
1144 * that the consumer has already validated the contents of the nvlist, so we
1145 * don't have to worry about error semantics.
1148 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1149 nvlist_t *props, nvlist_t *fsprops)
1151 zfs_cmd_t zc = {"\0"};
1152 nvlist_t *zc_fsprops = NULL;
1153 nvlist_t *zc_props = NULL;
1154 char msg[1024];
1155 int ret = -1;
1157 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1158 "cannot create '%s'"), pool);
1160 if (!zpool_name_valid(hdl, B_FALSE, pool))
1161 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1163 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1164 return (-1);
1166 if (props) {
1167 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1169 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1170 SPA_VERSION_1, flags, msg)) == NULL) {
1171 goto create_failed;
1175 if (fsprops) {
1176 uint64_t zoned;
1177 char *zonestr;
1179 zoned = ((nvlist_lookup_string(fsprops,
1180 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1181 strcmp(zonestr, "on") == 0);
1183 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1184 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1185 goto create_failed;
1187 if (!zc_props &&
1188 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1189 goto create_failed;
1191 if (nvlist_add_nvlist(zc_props,
1192 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1193 goto create_failed;
1197 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1198 goto create_failed;
1200 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1202 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1204 zcmd_free_nvlists(&zc);
1205 nvlist_free(zc_props);
1206 nvlist_free(zc_fsprops);
1208 switch (errno) {
1209 case EBUSY:
1211 * This can happen if the user has specified the same
1212 * device multiple times. We can't reliably detect this
1213 * until we try to add it and see we already have a
1214 * label. This can also happen under if the device is
1215 * part of an active md or lvm device.
1217 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1218 "one or more vdevs refer to the same device, or "
1219 "one of\nthe devices is part of an active md or "
1220 "lvm device"));
1221 return (zfs_error(hdl, EZFS_BADDEV, msg));
1223 case ERANGE:
1225 * This happens if the record size is smaller or larger
1226 * than the allowed size range, or not a power of 2.
1228 * NOTE: although zfs_valid_proplist is called earlier,
1229 * this case may have slipped through since the
1230 * pool does not exist yet and it is therefore
1231 * impossible to read properties e.g. max blocksize
1232 * from the pool.
1234 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1235 "record size invalid"));
1236 return (zfs_error(hdl, EZFS_BADPROP, msg));
1238 case EOVERFLOW:
1240 * This occurs when one of the devices is below
1241 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1242 * device was the problem device since there's no
1243 * reliable way to determine device size from userland.
1246 char buf[64];
1248 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1249 sizeof (buf));
1251 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1252 "one or more devices is less than the "
1253 "minimum size (%s)"), buf);
1255 return (zfs_error(hdl, EZFS_BADDEV, msg));
1257 case ENOSPC:
1258 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1259 "one or more devices is out of space"));
1260 return (zfs_error(hdl, EZFS_BADDEV, msg));
1262 case ENOTBLK:
1263 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1264 "cache device must be a disk or disk slice"));
1265 return (zfs_error(hdl, EZFS_BADDEV, msg));
1267 default:
1268 return (zpool_standard_error(hdl, errno, msg));
1272 create_failed:
1273 zcmd_free_nvlists(&zc);
1274 nvlist_free(zc_props);
1275 nvlist_free(zc_fsprops);
1276 return (ret);
1280 * Destroy the given pool. It is up to the caller to ensure that there are no
1281 * datasets left in the pool.
1284 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1286 zfs_cmd_t zc = {"\0"};
1287 zfs_handle_t *zfp = NULL;
1288 libzfs_handle_t *hdl = zhp->zpool_hdl;
1289 char msg[1024];
1291 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1292 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1293 return (-1);
1295 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1296 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1298 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1299 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1300 "cannot destroy '%s'"), zhp->zpool_name);
1302 if (errno == EROFS) {
1303 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1304 "one or more devices is read only"));
1305 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1306 } else {
1307 (void) zpool_standard_error(hdl, errno, msg);
1310 if (zfp)
1311 zfs_close(zfp);
1312 return (-1);
1315 if (zfp) {
1316 remove_mountpoint(zfp);
1317 zfs_close(zfp);
1320 return (0);
1324 * Add the given vdevs to the pool. The caller must have already performed the
1325 * necessary verification to ensure that the vdev specification is well-formed.
1328 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1330 zfs_cmd_t zc = {"\0"};
1331 int ret;
1332 libzfs_handle_t *hdl = zhp->zpool_hdl;
1333 char msg[1024];
1334 nvlist_t **spares, **l2cache;
1335 uint_t nspares, nl2cache;
1337 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1338 "cannot add to '%s'"), zhp->zpool_name);
1340 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1341 SPA_VERSION_SPARES &&
1342 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1343 &spares, &nspares) == 0) {
1344 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1345 "upgraded to add hot spares"));
1346 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1349 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1350 SPA_VERSION_L2CACHE &&
1351 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1352 &l2cache, &nl2cache) == 0) {
1353 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1354 "upgraded to add cache devices"));
1355 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1358 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1359 return (-1);
1360 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1362 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1363 switch (errno) {
1364 case EBUSY:
1366 * This can happen if the user has specified the same
1367 * device multiple times. We can't reliably detect this
1368 * until we try to add it and see we already have a
1369 * label.
1371 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1372 "one or more vdevs refer to the same device"));
1373 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1374 break;
1376 case EOVERFLOW:
1378 * This occurrs when one of the devices is below
1379 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1380 * device was the problem device since there's no
1381 * reliable way to determine device size from userland.
1384 char buf[64];
1386 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1387 sizeof (buf));
1389 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1390 "device is less than the minimum "
1391 "size (%s)"), buf);
1393 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1394 break;
1396 case ENOTSUP:
1397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1398 "pool must be upgraded to add these vdevs"));
1399 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1400 break;
1402 case ENOTBLK:
1403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1404 "cache device must be a disk or disk slice"));
1405 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1406 break;
1408 default:
1409 (void) zpool_standard_error(hdl, errno, msg);
1412 ret = -1;
1413 } else {
1414 ret = 0;
1417 zcmd_free_nvlists(&zc);
1419 return (ret);
1423 * Exports the pool from the system. The caller must ensure that there are no
1424 * mounted datasets in the pool.
1426 static int
1427 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1428 const char *log_str)
1430 zfs_cmd_t zc = {"\0"};
1431 char msg[1024];
1433 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1434 "cannot export '%s'"), zhp->zpool_name);
1436 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1437 zc.zc_cookie = force;
1438 zc.zc_guid = hardforce;
1439 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1441 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1442 switch (errno) {
1443 case EXDEV:
1444 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1445 "use '-f' to override the following errors:\n"
1446 "'%s' has an active shared spare which could be"
1447 " used by other pools once '%s' is exported."),
1448 zhp->zpool_name, zhp->zpool_name);
1449 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1450 msg));
1451 default:
1452 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1453 msg));
1457 return (0);
1461 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1463 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1467 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1469 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1472 static void
1473 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1474 nvlist_t *config)
1476 nvlist_t *nv = NULL;
1477 uint64_t rewindto;
1478 int64_t loss = -1;
1479 struct tm t;
1480 char timestr[128];
1482 if (!hdl->libzfs_printerr || config == NULL)
1483 return;
1485 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1486 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1487 return;
1490 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1491 return;
1492 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1494 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1495 strftime(timestr, 128, "%c", &t) != 0) {
1496 if (dryrun) {
1497 (void) printf(dgettext(TEXT_DOMAIN,
1498 "Would be able to return %s "
1499 "to its state as of %s.\n"),
1500 name, timestr);
1501 } else {
1502 (void) printf(dgettext(TEXT_DOMAIN,
1503 "Pool %s returned to its state as of %s.\n"),
1504 name, timestr);
1506 if (loss > 120) {
1507 (void) printf(dgettext(TEXT_DOMAIN,
1508 "%s approximately %lld "),
1509 dryrun ? "Would discard" : "Discarded",
1510 ((longlong_t)loss + 30) / 60);
1511 (void) printf(dgettext(TEXT_DOMAIN,
1512 "minutes of transactions.\n"));
1513 } else if (loss > 0) {
1514 (void) printf(dgettext(TEXT_DOMAIN,
1515 "%s approximately %lld "),
1516 dryrun ? "Would discard" : "Discarded",
1517 (longlong_t)loss);
1518 (void) printf(dgettext(TEXT_DOMAIN,
1519 "seconds of transactions.\n"));
1524 void
1525 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1526 nvlist_t *config)
1528 nvlist_t *nv = NULL;
1529 int64_t loss = -1;
1530 uint64_t edata = UINT64_MAX;
1531 uint64_t rewindto;
1532 struct tm t;
1533 char timestr[128];
1535 if (!hdl->libzfs_printerr)
1536 return;
1538 if (reason >= 0)
1539 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1540 else
1541 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1543 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1544 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1545 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1546 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1547 goto no_info;
1549 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1550 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1551 &edata);
1553 (void) printf(dgettext(TEXT_DOMAIN,
1554 "Recovery is possible, but will result in some data loss.\n"));
1556 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1557 strftime(timestr, 128, "%c", &t) != 0) {
1558 (void) printf(dgettext(TEXT_DOMAIN,
1559 "\tReturning the pool to its state as of %s\n"
1560 "\tshould correct the problem. "),
1561 timestr);
1562 } else {
1563 (void) printf(dgettext(TEXT_DOMAIN,
1564 "\tReverting the pool to an earlier state "
1565 "should correct the problem.\n\t"));
1568 if (loss > 120) {
1569 (void) printf(dgettext(TEXT_DOMAIN,
1570 "Approximately %lld minutes of data\n"
1571 "\tmust be discarded, irreversibly. "),
1572 ((longlong_t)loss + 30) / 60);
1573 } else if (loss > 0) {
1574 (void) printf(dgettext(TEXT_DOMAIN,
1575 "Approximately %lld seconds of data\n"
1576 "\tmust be discarded, irreversibly. "),
1577 (longlong_t)loss);
1579 if (edata != 0 && edata != UINT64_MAX) {
1580 if (edata == 1) {
1581 (void) printf(dgettext(TEXT_DOMAIN,
1582 "After rewind, at least\n"
1583 "\tone persistent user-data error will remain. "));
1584 } else {
1585 (void) printf(dgettext(TEXT_DOMAIN,
1586 "After rewind, several\n"
1587 "\tpersistent user-data errors will remain. "));
1590 (void) printf(dgettext(TEXT_DOMAIN,
1591 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1592 reason >= 0 ? "clear" : "import", name);
1594 (void) printf(dgettext(TEXT_DOMAIN,
1595 "A scrub of the pool\n"
1596 "\tis strongly recommended after recovery.\n"));
1597 return;
1599 no_info:
1600 (void) printf(dgettext(TEXT_DOMAIN,
1601 "Destroy and re-create the pool from\n\ta backup source.\n"));
1605 * zpool_import() is a contracted interface. Should be kept the same
1606 * if possible.
1608 * Applications should use zpool_import_props() to import a pool with
1609 * new properties value to be set.
1612 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1613 char *altroot)
1615 nvlist_t *props = NULL;
1616 int ret;
1618 if (altroot != NULL) {
1619 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1620 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1621 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1622 newname));
1625 if (nvlist_add_string(props,
1626 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1627 nvlist_add_string(props,
1628 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1629 nvlist_free(props);
1630 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1631 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1632 newname));
1636 ret = zpool_import_props(hdl, config, newname, props,
1637 ZFS_IMPORT_NORMAL);
1638 nvlist_free(props);
1639 return (ret);
1642 static void
1643 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1644 int indent)
1646 nvlist_t **child;
1647 uint_t c, children;
1648 char *vname;
1649 uint64_t is_log = 0;
1651 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1652 &is_log);
1654 if (name != NULL)
1655 (void) printf("\t%*s%s%s\n", indent, "", name,
1656 is_log ? " [log]" : "");
1658 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1659 &child, &children) != 0)
1660 return;
1662 for (c = 0; c < children; c++) {
1663 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1664 print_vdev_tree(hdl, vname, child[c], indent + 2);
1665 free(vname);
1669 void
1670 zpool_print_unsup_feat(nvlist_t *config)
1672 nvlist_t *nvinfo, *unsup_feat;
1673 nvpair_t *nvp;
1675 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1677 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1678 &unsup_feat) == 0);
1680 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1681 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1682 char *desc;
1684 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1685 verify(nvpair_value_string(nvp, &desc) == 0);
1687 if (strlen(desc) > 0)
1688 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1689 else
1690 (void) printf("\t%s\n", nvpair_name(nvp));
1695 * Import the given pool using the known configuration and a list of
1696 * properties to be set. The configuration should have come from
1697 * zpool_find_import(). The 'newname' parameters control whether the pool
1698 * is imported with a different name.
1701 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1702 nvlist_t *props, int flags)
1704 zfs_cmd_t zc = {"\0"};
1705 zpool_rewind_policy_t policy;
1706 nvlist_t *nv = NULL;
1707 nvlist_t *nvinfo = NULL;
1708 nvlist_t *missing = NULL;
1709 char *thename;
1710 char *origname;
1711 int ret;
1712 int error = 0;
1713 char errbuf[1024];
1715 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1716 &origname) == 0);
1718 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1719 "cannot import pool '%s'"), origname);
1721 if (newname != NULL) {
1722 if (!zpool_name_valid(hdl, B_FALSE, newname))
1723 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1724 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1725 newname));
1726 thename = (char *)newname;
1727 } else {
1728 thename = origname;
1731 if (props != NULL) {
1732 uint64_t version;
1733 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1735 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1736 &version) == 0);
1738 if ((props = zpool_valid_proplist(hdl, origname,
1739 props, version, flags, errbuf)) == NULL)
1740 return (-1);
1741 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1742 nvlist_free(props);
1743 return (-1);
1745 nvlist_free(props);
1748 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1750 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1751 &zc.zc_guid) == 0);
1753 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1754 zcmd_free_nvlists(&zc);
1755 return (-1);
1757 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1758 zcmd_free_nvlists(&zc);
1759 return (-1);
1762 zc.zc_cookie = flags;
1763 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1764 errno == ENOMEM) {
1765 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1766 zcmd_free_nvlists(&zc);
1767 return (-1);
1770 if (ret != 0)
1771 error = errno;
1773 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1775 zcmd_free_nvlists(&zc);
1777 zpool_get_rewind_policy(config, &policy);
1779 if (error) {
1780 char desc[1024];
1783 * Dry-run failed, but we print out what success
1784 * looks like if we found a best txg
1786 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1787 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1788 B_TRUE, nv);
1789 nvlist_free(nv);
1790 return (-1);
1793 if (newname == NULL)
1794 (void) snprintf(desc, sizeof (desc),
1795 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1796 thename);
1797 else
1798 (void) snprintf(desc, sizeof (desc),
1799 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1800 origname, thename);
1802 switch (error) {
1803 case ENOTSUP:
1804 if (nv != NULL && nvlist_lookup_nvlist(nv,
1805 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1806 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1807 (void) printf(dgettext(TEXT_DOMAIN, "This "
1808 "pool uses the following feature(s) not "
1809 "supported by this system:\n"));
1810 zpool_print_unsup_feat(nv);
1811 if (nvlist_exists(nvinfo,
1812 ZPOOL_CONFIG_CAN_RDONLY)) {
1813 (void) printf(dgettext(TEXT_DOMAIN,
1814 "All unsupported features are only "
1815 "required for writing to the pool."
1816 "\nThe pool can be imported using "
1817 "'-o readonly=on'.\n"));
1821 * Unsupported version.
1823 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1824 break;
1826 case EINVAL:
1827 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1828 break;
1830 case EROFS:
1831 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1832 "one or more devices is read only"));
1833 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1834 break;
1836 case ENXIO:
1837 if (nv && nvlist_lookup_nvlist(nv,
1838 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1839 nvlist_lookup_nvlist(nvinfo,
1840 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1841 (void) printf(dgettext(TEXT_DOMAIN,
1842 "The devices below are missing, use "
1843 "'-m' to import the pool anyway:\n"));
1844 print_vdev_tree(hdl, NULL, missing, 2);
1845 (void) printf("\n");
1847 (void) zpool_standard_error(hdl, error, desc);
1848 break;
1850 case EEXIST:
1851 (void) zpool_standard_error(hdl, error, desc);
1852 break;
1854 case EBUSY:
1855 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1856 "one or more devices are already in use\n"));
1857 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1858 break;
1859 case ENAMETOOLONG:
1860 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1861 "new name of at least one dataset is longer than "
1862 "the maximum allowable length"));
1863 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1864 break;
1865 default:
1866 (void) zpool_standard_error(hdl, error, desc);
1867 zpool_explain_recover(hdl,
1868 newname ? origname : thename, -error, nv);
1869 break;
1872 nvlist_free(nv);
1873 ret = -1;
1874 } else {
1875 zpool_handle_t *zhp;
1878 * This should never fail, but play it safe anyway.
1880 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1881 ret = -1;
1882 else if (zhp != NULL)
1883 zpool_close(zhp);
1884 if (policy.zrp_request &
1885 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1886 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1887 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1889 nvlist_free(nv);
1890 return (0);
1893 return (ret);
1897 * Scan the pool.
1900 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1902 zfs_cmd_t zc = {"\0"};
1903 char msg[1024];
1904 libzfs_handle_t *hdl = zhp->zpool_hdl;
1906 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1907 zc.zc_cookie = func;
1909 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1910 (errno == ENOENT && func != POOL_SCAN_NONE))
1911 return (0);
1913 if (func == POOL_SCAN_SCRUB) {
1914 (void) snprintf(msg, sizeof (msg),
1915 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1916 } else if (func == POOL_SCAN_NONE) {
1917 (void) snprintf(msg, sizeof (msg),
1918 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1919 zc.zc_name);
1920 } else {
1921 assert(!"unexpected result");
1924 if (errno == EBUSY) {
1925 nvlist_t *nvroot;
1926 pool_scan_stat_t *ps = NULL;
1927 uint_t psc;
1929 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1930 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1931 (void) nvlist_lookup_uint64_array(nvroot,
1932 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1933 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1934 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1935 else
1936 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1937 } else if (errno == ENOENT) {
1938 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1939 } else {
1940 return (zpool_standard_error(hdl, errno, msg));
1945 * Find a vdev that matches the search criteria specified. We use the
1946 * the nvpair name to determine how we should look for the device.
1947 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1948 * spare; but FALSE if its an INUSE spare.
1950 static nvlist_t *
1951 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1952 boolean_t *l2cache, boolean_t *log)
1954 uint_t c, children;
1955 nvlist_t **child;
1956 nvlist_t *ret;
1957 uint64_t is_log;
1958 char *srchkey;
1959 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1961 /* Nothing to look for */
1962 if (search == NULL || pair == NULL)
1963 return (NULL);
1965 /* Obtain the key we will use to search */
1966 srchkey = nvpair_name(pair);
1968 switch (nvpair_type(pair)) {
1969 case DATA_TYPE_UINT64:
1970 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1971 uint64_t srchval, theguid;
1973 verify(nvpair_value_uint64(pair, &srchval) == 0);
1974 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1975 &theguid) == 0);
1976 if (theguid == srchval)
1977 return (nv);
1979 break;
1981 case DATA_TYPE_STRING: {
1982 char *srchval, *val;
1984 verify(nvpair_value_string(pair, &srchval) == 0);
1985 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1986 break;
1989 * Search for the requested value. Special cases:
1991 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1992 * "-part1", or "p1". The suffix is hidden from the user,
1993 * but included in the string, so this matches around it.
1994 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
1995 * is used to check all possible expanded paths.
1996 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1998 * Otherwise, all other searches are simple string compares.
2000 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2001 uint64_t wholedisk = 0;
2003 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2004 &wholedisk);
2005 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2006 return (nv);
2008 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2009 char *type, *idx, *end, *p;
2010 uint64_t id, vdev_id;
2013 * Determine our vdev type, keeping in mind
2014 * that the srchval is composed of a type and
2015 * vdev id pair (i.e. mirror-4).
2017 if ((type = strdup(srchval)) == NULL)
2018 return (NULL);
2020 if ((p = strrchr(type, '-')) == NULL) {
2021 free(type);
2022 break;
2024 idx = p + 1;
2025 *p = '\0';
2028 * If the types don't match then keep looking.
2030 if (strncmp(val, type, strlen(val)) != 0) {
2031 free(type);
2032 break;
2035 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2036 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2037 strncmp(type, VDEV_TYPE_MIRROR,
2038 strlen(VDEV_TYPE_MIRROR)) == 0);
2039 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2040 &id) == 0);
2042 errno = 0;
2043 vdev_id = strtoull(idx, &end, 10);
2045 free(type);
2046 if (errno != 0)
2047 return (NULL);
2050 * Now verify that we have the correct vdev id.
2052 if (vdev_id == id)
2053 return (nv);
2057 * Common case
2059 if (strcmp(srchval, val) == 0)
2060 return (nv);
2061 break;
2064 default:
2065 break;
2068 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2069 &child, &children) != 0)
2070 return (NULL);
2072 for (c = 0; c < children; c++) {
2073 if ((ret = vdev_to_nvlist_iter(child[c], search,
2074 avail_spare, l2cache, NULL)) != NULL) {
2076 * The 'is_log' value is only set for the toplevel
2077 * vdev, not the leaf vdevs. So we always lookup the
2078 * log device from the root of the vdev tree (where
2079 * 'log' is non-NULL).
2081 if (log != NULL &&
2082 nvlist_lookup_uint64(child[c],
2083 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2084 is_log) {
2085 *log = B_TRUE;
2087 return (ret);
2091 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2092 &child, &children) == 0) {
2093 for (c = 0; c < children; c++) {
2094 if ((ret = vdev_to_nvlist_iter(child[c], search,
2095 avail_spare, l2cache, NULL)) != NULL) {
2096 *avail_spare = B_TRUE;
2097 return (ret);
2102 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2103 &child, &children) == 0) {
2104 for (c = 0; c < children; c++) {
2105 if ((ret = vdev_to_nvlist_iter(child[c], search,
2106 avail_spare, l2cache, NULL)) != NULL) {
2107 *l2cache = B_TRUE;
2108 return (ret);
2113 return (NULL);
2117 * Given a physical path (minus the "/devices" prefix), find the
2118 * associated vdev.
2120 nvlist_t *
2121 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2122 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2124 nvlist_t *search, *nvroot, *ret;
2126 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2127 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2129 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2130 &nvroot) == 0);
2132 *avail_spare = B_FALSE;
2133 *l2cache = B_FALSE;
2134 if (log != NULL)
2135 *log = B_FALSE;
2136 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2137 nvlist_free(search);
2139 return (ret);
2143 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2145 boolean_t
2146 zpool_vdev_is_interior(const char *name)
2148 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2149 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2150 return (B_TRUE);
2151 return (B_FALSE);
2154 nvlist_t *
2155 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2156 boolean_t *l2cache, boolean_t *log)
2158 char *end;
2159 nvlist_t *nvroot, *search, *ret;
2160 uint64_t guid;
2162 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2164 guid = strtoull(path, &end, 0);
2165 if (guid != 0 && *end == '\0') {
2166 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2167 } else if (zpool_vdev_is_interior(path)) {
2168 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2169 } else {
2170 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2173 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2174 &nvroot) == 0);
2176 *avail_spare = B_FALSE;
2177 *l2cache = B_FALSE;
2178 if (log != NULL)
2179 *log = B_FALSE;
2180 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2181 nvlist_free(search);
2183 return (ret);
2186 static int
2187 vdev_online(nvlist_t *nv)
2189 uint64_t ival;
2191 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2192 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2193 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2194 return (0);
2196 return (1);
2200 * Helper function for zpool_get_physpaths().
2202 static int
2203 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2204 size_t *bytes_written)
2206 size_t bytes_left, pos, rsz;
2207 char *tmppath;
2208 const char *format;
2210 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2211 &tmppath) != 0)
2212 return (EZFS_NODEVICE);
2214 pos = *bytes_written;
2215 bytes_left = physpath_size - pos;
2216 format = (pos == 0) ? "%s" : " %s";
2218 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2219 *bytes_written += rsz;
2221 if (rsz >= bytes_left) {
2222 /* if physpath was not copied properly, clear it */
2223 if (bytes_left != 0) {
2224 physpath[pos] = 0;
2226 return (EZFS_NOSPC);
2228 return (0);
2231 static int
2232 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2233 size_t *rsz, boolean_t is_spare)
2235 char *type;
2236 int ret;
2238 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2239 return (EZFS_INVALCONFIG);
2241 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2243 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2244 * For a spare vdev, we only want to boot from the active
2245 * spare device.
2247 if (is_spare) {
2248 uint64_t spare = 0;
2249 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2250 &spare);
2251 if (!spare)
2252 return (EZFS_INVALCONFIG);
2255 if (vdev_online(nv)) {
2256 if ((ret = vdev_get_one_physpath(nv, physpath,
2257 phypath_size, rsz)) != 0)
2258 return (ret);
2260 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2261 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2262 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2263 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2264 nvlist_t **child;
2265 uint_t count;
2266 int i, ret;
2268 if (nvlist_lookup_nvlist_array(nv,
2269 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2270 return (EZFS_INVALCONFIG);
2272 for (i = 0; i < count; i++) {
2273 ret = vdev_get_physpaths(child[i], physpath,
2274 phypath_size, rsz, is_spare);
2275 if (ret == EZFS_NOSPC)
2276 return (ret);
2280 return (EZFS_POOL_INVALARG);
2284 * Get phys_path for a root pool config.
2285 * Return 0 on success; non-zero on failure.
2287 static int
2288 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2290 size_t rsz;
2291 nvlist_t *vdev_root;
2292 nvlist_t **child;
2293 uint_t count;
2294 char *type;
2296 rsz = 0;
2298 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2299 &vdev_root) != 0)
2300 return (EZFS_INVALCONFIG);
2302 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2303 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2304 &child, &count) != 0)
2305 return (EZFS_INVALCONFIG);
2308 * root pool can only have a single top-level vdev.
2310 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2311 return (EZFS_POOL_INVALARG);
2313 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2314 B_FALSE);
2316 /* No online devices */
2317 if (rsz == 0)
2318 return (EZFS_NODEVICE);
2320 return (0);
2324 * Get phys_path for a root pool
2325 * Return 0 on success; non-zero on failure.
2328 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2330 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2331 phypath_size));
2335 * If the device has being dynamically expanded then we need to relabel
2336 * the disk to use the new unallocated space.
2338 static int
2339 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2341 int fd, error;
2343 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2344 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2345 "relabel '%s': unable to open device: %d"), path, errno);
2346 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2350 * It's possible that we might encounter an error if the device
2351 * does not have any unallocated space left. If so, we simply
2352 * ignore that error and continue on.
2354 * Also, we don't call efi_rescan() - that would just return EBUSY.
2355 * The module will do it for us in vdev_disk_open().
2357 error = efi_use_whole_disk(fd);
2359 /* Flush the buffers to disk and invalidate the page cache. */
2360 (void) fsync(fd);
2361 (void) ioctl(fd, BLKFLSBUF);
2363 (void) close(fd);
2364 if (error && error != VT_ENOSPC) {
2365 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2366 "relabel '%s': unable to read disk capacity"), path);
2367 return (zfs_error(hdl, EZFS_NOCAP, msg));
2370 return (0);
2374 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2376 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2377 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2378 * ignore them.
2380 static uint64_t
2381 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2382 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2384 uint64_t guid;
2385 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2386 nvlist_t *tgt;
2388 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2389 &log)) == NULL)
2390 return (0);
2392 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2393 if (is_spare != NULL)
2394 *is_spare = spare;
2395 if (is_l2cache != NULL)
2396 *is_l2cache = l2cache;
2397 if (is_log != NULL)
2398 *is_log = log;
2400 return (guid);
2403 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2404 uint64_t
2405 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2407 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2411 * Bring the specified vdev online. The 'flags' parameter is a set of the
2412 * ZFS_ONLINE_* flags.
2415 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2416 vdev_state_t *newstate)
2418 zfs_cmd_t zc = {"\0"};
2419 char msg[1024];
2420 nvlist_t *tgt;
2421 boolean_t avail_spare, l2cache, islog;
2422 libzfs_handle_t *hdl = zhp->zpool_hdl;
2423 int error;
2425 if (flags & ZFS_ONLINE_EXPAND) {
2426 (void) snprintf(msg, sizeof (msg),
2427 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2428 } else {
2429 (void) snprintf(msg, sizeof (msg),
2430 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2433 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2434 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2435 &islog)) == NULL)
2436 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2438 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2440 if (avail_spare)
2441 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2443 if (flags & ZFS_ONLINE_EXPAND ||
2444 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2445 uint64_t wholedisk = 0;
2447 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2448 &wholedisk);
2451 * XXX - L2ARC 1.0 devices can't support expansion.
2453 if (l2cache) {
2454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2455 "cannot expand cache devices"));
2456 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2459 if (wholedisk) {
2460 const char *fullpath = path;
2461 char buf[MAXPATHLEN];
2463 if (path[0] != '/') {
2464 error = zfs_resolve_shortname(path, buf,
2465 sizeof (buf));
2466 if (error != 0)
2467 return (zfs_error(hdl, EZFS_NODEVICE,
2468 msg));
2470 fullpath = buf;
2473 error = zpool_relabel_disk(hdl, fullpath, msg);
2474 if (error != 0)
2475 return (error);
2479 zc.zc_cookie = VDEV_STATE_ONLINE;
2480 zc.zc_obj = flags;
2482 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2483 if (errno == EINVAL) {
2484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2485 "from this pool into a new one. Use '%s' "
2486 "instead"), "zpool detach");
2487 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2489 return (zpool_standard_error(hdl, errno, msg));
2492 *newstate = zc.zc_cookie;
2493 return (0);
2497 * Take the specified vdev offline
2500 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2502 zfs_cmd_t zc = {"\0"};
2503 char msg[1024];
2504 nvlist_t *tgt;
2505 boolean_t avail_spare, l2cache;
2506 libzfs_handle_t *hdl = zhp->zpool_hdl;
2508 (void) snprintf(msg, sizeof (msg),
2509 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2511 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2512 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2513 NULL)) == NULL)
2514 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2516 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2518 if (avail_spare)
2519 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2521 zc.zc_cookie = VDEV_STATE_OFFLINE;
2522 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2524 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2525 return (0);
2527 switch (errno) {
2528 case EBUSY:
2531 * There are no other replicas of this device.
2533 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2535 case EEXIST:
2537 * The log device has unplayed logs
2539 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2541 default:
2542 return (zpool_standard_error(hdl, errno, msg));
2547 * Mark the given vdev faulted.
2550 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2552 zfs_cmd_t zc = {"\0"};
2553 char msg[1024];
2554 libzfs_handle_t *hdl = zhp->zpool_hdl;
2556 (void) snprintf(msg, sizeof (msg),
2557 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2559 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2560 zc.zc_guid = guid;
2561 zc.zc_cookie = VDEV_STATE_FAULTED;
2562 zc.zc_obj = aux;
2564 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2565 return (0);
2567 switch (errno) {
2568 case EBUSY:
2571 * There are no other replicas of this device.
2573 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2575 default:
2576 return (zpool_standard_error(hdl, errno, msg));
2582 * Mark the given vdev degraded.
2585 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2587 zfs_cmd_t zc = {"\0"};
2588 char msg[1024];
2589 libzfs_handle_t *hdl = zhp->zpool_hdl;
2591 (void) snprintf(msg, sizeof (msg),
2592 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2594 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2595 zc.zc_guid = guid;
2596 zc.zc_cookie = VDEV_STATE_DEGRADED;
2597 zc.zc_obj = aux;
2599 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2600 return (0);
2602 return (zpool_standard_error(hdl, errno, msg));
2606 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2607 * a hot spare.
2609 static boolean_t
2610 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2612 nvlist_t **child;
2613 uint_t c, children;
2614 char *type;
2616 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2617 &children) == 0) {
2618 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2619 &type) == 0);
2621 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2622 children == 2 && child[which] == tgt)
2623 return (B_TRUE);
2625 for (c = 0; c < children; c++)
2626 if (is_replacing_spare(child[c], tgt, which))
2627 return (B_TRUE);
2630 return (B_FALSE);
2634 * Attach new_disk (fully described by nvroot) to old_disk.
2635 * If 'replacing' is specified, the new disk will replace the old one.
2638 zpool_vdev_attach(zpool_handle_t *zhp,
2639 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2641 zfs_cmd_t zc = {"\0"};
2642 char msg[1024];
2643 int ret;
2644 nvlist_t *tgt;
2645 boolean_t avail_spare, l2cache, islog;
2646 uint64_t val;
2647 char *newname;
2648 nvlist_t **child;
2649 uint_t children;
2650 nvlist_t *config_root;
2651 libzfs_handle_t *hdl = zhp->zpool_hdl;
2652 boolean_t rootpool = zpool_is_bootable(zhp);
2654 if (replacing)
2655 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2656 "cannot replace %s with %s"), old_disk, new_disk);
2657 else
2658 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2659 "cannot attach %s to %s"), new_disk, old_disk);
2661 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2662 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2663 &islog)) == 0)
2664 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2666 if (avail_spare)
2667 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2669 if (l2cache)
2670 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2672 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2673 zc.zc_cookie = replacing;
2675 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2676 &child, &children) != 0 || children != 1) {
2677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2678 "new device must be a single disk"));
2679 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2682 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2683 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2685 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2686 return (-1);
2689 * If the target is a hot spare that has been swapped in, we can only
2690 * replace it with another hot spare.
2692 if (replacing &&
2693 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2694 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2695 NULL) == NULL || !avail_spare) &&
2696 is_replacing_spare(config_root, tgt, 1)) {
2697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2698 "can only be replaced by another hot spare"));
2699 free(newname);
2700 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2703 free(newname);
2705 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2706 return (-1);
2708 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2710 zcmd_free_nvlists(&zc);
2712 if (ret == 0) {
2713 if (rootpool) {
2715 * XXX need a better way to prevent user from
2716 * booting up a half-baked vdev.
2718 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2719 "sure to wait until resilver is done "
2720 "before rebooting.\n"));
2722 return (0);
2725 switch (errno) {
2726 case ENOTSUP:
2728 * Can't attach to or replace this type of vdev.
2730 if (replacing) {
2731 uint64_t version = zpool_get_prop_int(zhp,
2732 ZPOOL_PROP_VERSION, NULL);
2734 if (islog)
2735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2736 "cannot replace a log with a spare"));
2737 else if (version >= SPA_VERSION_MULTI_REPLACE)
2738 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2739 "already in replacing/spare config; wait "
2740 "for completion or use 'zpool detach'"));
2741 else
2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2743 "cannot replace a replacing device"));
2744 } else {
2745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2746 "can only attach to mirrors and top-level "
2747 "disks"));
2749 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2750 break;
2752 case EINVAL:
2754 * The new device must be a single disk.
2756 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2757 "new device must be a single disk"));
2758 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2759 break;
2761 case EBUSY:
2762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2763 new_disk);
2764 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2765 break;
2767 case EOVERFLOW:
2769 * The new device is too small.
2771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2772 "device is too small"));
2773 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2774 break;
2776 case EDOM:
2778 * The new device has a different optimal sector size.
2780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2781 "new device has a different optimal sector size; use the "
2782 "option '-o ashift=N' to override the optimal size"));
2783 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2784 break;
2786 case ENAMETOOLONG:
2788 * The resulting top-level vdev spec won't fit in the label.
2790 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2791 break;
2793 default:
2794 (void) zpool_standard_error(hdl, errno, msg);
2797 return (-1);
2801 * Detach the specified device.
2804 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2806 zfs_cmd_t zc = {"\0"};
2807 char msg[1024];
2808 nvlist_t *tgt;
2809 boolean_t avail_spare, l2cache;
2810 libzfs_handle_t *hdl = zhp->zpool_hdl;
2812 (void) snprintf(msg, sizeof (msg),
2813 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2815 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2816 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2817 NULL)) == 0)
2818 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2820 if (avail_spare)
2821 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2823 if (l2cache)
2824 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2826 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2828 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2829 return (0);
2831 switch (errno) {
2833 case ENOTSUP:
2835 * Can't detach from this type of vdev.
2837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2838 "applicable to mirror and replacing vdevs"));
2839 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2840 break;
2842 case EBUSY:
2844 * There are no other replicas of this device.
2846 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2847 break;
2849 default:
2850 (void) zpool_standard_error(hdl, errno, msg);
2853 return (-1);
2857 * Find a mirror vdev in the source nvlist.
2859 * The mchild array contains a list of disks in one of the top-level mirrors
2860 * of the source pool. The schild array contains a list of disks that the
2861 * user specified on the command line. We loop over the mchild array to
2862 * see if any entry in the schild array matches.
2864 * If a disk in the mchild array is found in the schild array, we return
2865 * the index of that entry. Otherwise we return -1.
2867 static int
2868 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2869 nvlist_t **schild, uint_t schildren)
2871 uint_t mc;
2873 for (mc = 0; mc < mchildren; mc++) {
2874 uint_t sc;
2875 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2876 mchild[mc], 0);
2878 for (sc = 0; sc < schildren; sc++) {
2879 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2880 schild[sc], 0);
2881 boolean_t result = (strcmp(mpath, spath) == 0);
2883 free(spath);
2884 if (result) {
2885 free(mpath);
2886 return (mc);
2890 free(mpath);
2893 return (-1);
2897 * Split a mirror pool. If newroot points to null, then a new nvlist
2898 * is generated and it is the responsibility of the caller to free it.
2901 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2902 nvlist_t *props, splitflags_t flags)
2904 zfs_cmd_t zc = {"\0"};
2905 char msg[1024];
2906 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2907 nvlist_t **varray = NULL, *zc_props = NULL;
2908 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2909 libzfs_handle_t *hdl = zhp->zpool_hdl;
2910 uint64_t vers;
2911 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2912 int retval = 0;
2914 (void) snprintf(msg, sizeof (msg),
2915 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2917 if (!zpool_name_valid(hdl, B_FALSE, newname))
2918 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2920 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2921 (void) fprintf(stderr, gettext("Internal error: unable to "
2922 "retrieve pool configuration\n"));
2923 return (-1);
2926 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2927 == 0);
2928 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2930 if (props) {
2931 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2932 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2933 props, vers, flags, msg)) == NULL)
2934 return (-1);
2937 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2938 &children) != 0) {
2939 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2940 "Source pool is missing vdev tree"));
2941 nvlist_free(zc_props);
2942 return (-1);
2945 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2946 vcount = 0;
2948 if (*newroot == NULL ||
2949 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2950 &newchild, &newchildren) != 0)
2951 newchildren = 0;
2953 for (c = 0; c < children; c++) {
2954 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2955 char *type;
2956 nvlist_t **mchild, *vdev;
2957 uint_t mchildren;
2958 int entry;
2961 * Unlike cache & spares, slogs are stored in the
2962 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2964 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2965 &is_log);
2966 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2967 &is_hole);
2968 if (is_log || is_hole) {
2970 * Create a hole vdev and put it in the config.
2972 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2973 goto out;
2974 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2975 VDEV_TYPE_HOLE) != 0)
2976 goto out;
2977 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2978 1) != 0)
2979 goto out;
2980 if (lastlog == 0)
2981 lastlog = vcount;
2982 varray[vcount++] = vdev;
2983 continue;
2985 lastlog = 0;
2986 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2987 == 0);
2988 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2990 "Source pool must be composed only of mirrors\n"));
2991 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2992 goto out;
2995 verify(nvlist_lookup_nvlist_array(child[c],
2996 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2998 /* find or add an entry for this top-level vdev */
2999 if (newchildren > 0 &&
3000 (entry = find_vdev_entry(zhp, mchild, mchildren,
3001 newchild, newchildren)) >= 0) {
3002 /* We found a disk that the user specified. */
3003 vdev = mchild[entry];
3004 ++found;
3005 } else {
3006 /* User didn't specify a disk for this vdev. */
3007 vdev = mchild[mchildren - 1];
3010 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3011 goto out;
3014 /* did we find every disk the user specified? */
3015 if (found != newchildren) {
3016 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3017 "include at most one disk from each mirror"));
3018 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3019 goto out;
3022 /* Prepare the nvlist for populating. */
3023 if (*newroot == NULL) {
3024 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3025 goto out;
3026 freelist = B_TRUE;
3027 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3028 VDEV_TYPE_ROOT) != 0)
3029 goto out;
3030 } else {
3031 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3034 /* Add all the children we found */
3035 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3036 lastlog == 0 ? vcount : lastlog) != 0)
3037 goto out;
3040 * If we're just doing a dry run, exit now with success.
3042 if (flags.dryrun) {
3043 memory_err = B_FALSE;
3044 freelist = B_FALSE;
3045 goto out;
3048 /* now build up the config list & call the ioctl */
3049 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3050 goto out;
3052 if (nvlist_add_nvlist(newconfig,
3053 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3054 nvlist_add_string(newconfig,
3055 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3056 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3057 goto out;
3060 * The new pool is automatically part of the namespace unless we
3061 * explicitly export it.
3063 if (!flags.import)
3064 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3065 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3066 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3067 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3068 goto out;
3069 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3070 goto out;
3072 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3073 retval = zpool_standard_error(hdl, errno, msg);
3074 goto out;
3077 freelist = B_FALSE;
3078 memory_err = B_FALSE;
3080 out:
3081 if (varray != NULL) {
3082 int v;
3084 for (v = 0; v < vcount; v++)
3085 nvlist_free(varray[v]);
3086 free(varray);
3088 zcmd_free_nvlists(&zc);
3089 nvlist_free(zc_props);
3090 nvlist_free(newconfig);
3091 if (freelist) {
3092 nvlist_free(*newroot);
3093 *newroot = NULL;
3096 if (retval != 0)
3097 return (retval);
3099 if (memory_err)
3100 return (no_memory(hdl));
3102 return (0);
3106 * Remove the given device. Currently, this is supported only for hot spares,
3107 * cache, and log devices.
3110 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3112 zfs_cmd_t zc = {"\0"};
3113 char msg[1024];
3114 nvlist_t *tgt;
3115 boolean_t avail_spare, l2cache, islog;
3116 libzfs_handle_t *hdl = zhp->zpool_hdl;
3117 uint64_t version;
3119 (void) snprintf(msg, sizeof (msg),
3120 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3122 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3123 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3124 &islog)) == 0)
3125 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3127 * XXX - this should just go away.
3129 if (!avail_spare && !l2cache && !islog) {
3130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3131 "only inactive hot spares, cache, "
3132 "or log devices can be removed"));
3133 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3136 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3137 if (islog && version < SPA_VERSION_HOLES) {
3138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3139 "pool must be upgrade to support log removal"));
3140 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3143 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3145 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3146 return (0);
3148 return (zpool_standard_error(hdl, errno, msg));
3152 * Clear the errors for the pool, or the particular device if specified.
3155 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3157 zfs_cmd_t zc = {"\0"};
3158 char msg[1024];
3159 nvlist_t *tgt;
3160 zpool_rewind_policy_t policy;
3161 boolean_t avail_spare, l2cache;
3162 libzfs_handle_t *hdl = zhp->zpool_hdl;
3163 nvlist_t *nvi = NULL;
3164 int error;
3166 if (path)
3167 (void) snprintf(msg, sizeof (msg),
3168 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3169 path);
3170 else
3171 (void) snprintf(msg, sizeof (msg),
3172 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3173 zhp->zpool_name);
3175 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3176 if (path) {
3177 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3178 &l2cache, NULL)) == 0)
3179 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3182 * Don't allow error clearing for hot spares. Do allow
3183 * error clearing for l2cache devices.
3185 if (avail_spare)
3186 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3188 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3189 &zc.zc_guid) == 0);
3192 zpool_get_rewind_policy(rewindnvl, &policy);
3193 zc.zc_cookie = policy.zrp_request;
3195 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3196 return (-1);
3198 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3199 return (-1);
3201 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3202 errno == ENOMEM) {
3203 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3204 zcmd_free_nvlists(&zc);
3205 return (-1);
3209 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3210 errno != EPERM && errno != EACCES)) {
3211 if (policy.zrp_request &
3212 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3213 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3214 zpool_rewind_exclaim(hdl, zc.zc_name,
3215 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3216 nvi);
3217 nvlist_free(nvi);
3219 zcmd_free_nvlists(&zc);
3220 return (0);
3223 zcmd_free_nvlists(&zc);
3224 return (zpool_standard_error(hdl, errno, msg));
3228 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3231 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3233 zfs_cmd_t zc = {"\0"};
3234 char msg[1024];
3235 libzfs_handle_t *hdl = zhp->zpool_hdl;
3237 (void) snprintf(msg, sizeof (msg),
3238 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3239 (u_longlong_t)guid);
3241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3242 zc.zc_guid = guid;
3243 zc.zc_cookie = ZPOOL_NO_REWIND;
3245 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3246 return (0);
3248 return (zpool_standard_error(hdl, errno, msg));
3252 * Change the GUID for a pool.
3255 zpool_reguid(zpool_handle_t *zhp)
3257 char msg[1024];
3258 libzfs_handle_t *hdl = zhp->zpool_hdl;
3259 zfs_cmd_t zc = {"\0"};
3261 (void) snprintf(msg, sizeof (msg),
3262 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3264 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3265 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3266 return (0);
3268 return (zpool_standard_error(hdl, errno, msg));
3272 * Reopen the pool.
3275 zpool_reopen(zpool_handle_t *zhp)
3277 zfs_cmd_t zc = {"\0"};
3278 char msg[1024];
3279 libzfs_handle_t *hdl = zhp->zpool_hdl;
3281 (void) snprintf(msg, sizeof (msg),
3282 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3283 zhp->zpool_name);
3285 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3286 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3287 return (0);
3288 return (zpool_standard_error(hdl, errno, msg));
3291 #if defined(__sun__) || defined(__sun)
3293 * Convert from a devid string to a path.
3295 static char *
3296 devid_to_path(char *devid_str)
3298 ddi_devid_t devid;
3299 char *minor;
3300 char *path;
3301 devid_nmlist_t *list = NULL;
3302 int ret;
3304 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3305 return (NULL);
3307 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3309 devid_str_free(minor);
3310 devid_free(devid);
3312 if (ret != 0)
3313 return (NULL);
3316 * In a case the strdup() fails, we will just return NULL below.
3318 path = strdup(list[0].devname);
3320 devid_free_nmlist(list);
3322 return (path);
3326 * Convert from a path to a devid string.
3328 static char *
3329 path_to_devid(const char *path)
3331 int fd;
3332 ddi_devid_t devid;
3333 char *minor, *ret;
3335 if ((fd = open(path, O_RDONLY)) < 0)
3336 return (NULL);
3338 minor = NULL;
3339 ret = NULL;
3340 if (devid_get(fd, &devid) == 0) {
3341 if (devid_get_minor_name(fd, &minor) == 0)
3342 ret = devid_str_encode(devid, minor);
3343 if (minor != NULL)
3344 devid_str_free(minor);
3345 devid_free(devid);
3347 (void) close(fd);
3349 return (ret);
3353 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3354 * ignore any failure here, since a common case is for an unprivileged user to
3355 * type 'zpool status', and we'll display the correct information anyway.
3357 static void
3358 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3360 zfs_cmd_t zc = {"\0"};
3362 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3363 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3364 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3365 &zc.zc_guid) == 0);
3367 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3369 #endif /* sun */
3372 * Remove partition suffix from a vdev path. Partition suffixes may take three
3373 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3374 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3375 * third case only occurs when preceded by a string matching the regular
3376 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3378 * caller must free the returned string
3380 char *
3381 zfs_strip_partition(char *path)
3383 char *tmp = strdup(path);
3384 char *part = NULL, *d = NULL;
3385 if (!tmp)
3386 return (NULL);
3388 if ((part = strstr(tmp, "-part")) && part != tmp) {
3389 d = part + 5;
3390 } else if ((part = strrchr(tmp, 'p')) &&
3391 part > tmp + 1 && isdigit(*(part-1))) {
3392 d = part + 1;
3393 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3394 tmp[1] == 'd') {
3395 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3396 } else if (strncmp("xvd", tmp, 3) == 0) {
3397 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3399 if (part && d && *d != '\0') {
3400 for (; isdigit(*d); d++) { }
3401 if (*d == '\0')
3402 *part = '\0';
3405 return (tmp);
3409 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3411 * path: /dev/sda1
3412 * returns: /dev/sda
3414 * Returned string must be freed.
3416 char *
3417 zfs_strip_partition_path(char *path)
3419 char *newpath = strdup(path);
3420 char *sd_offset;
3421 char *new_sd;
3423 if (!newpath)
3424 return (NULL);
3426 /* Point to "sda1" part of "/dev/sda1" */
3427 sd_offset = strrchr(newpath, '/') + 1;
3429 /* Get our new name "sda" */
3430 new_sd = zfs_strip_partition(sd_offset);
3431 if (!new_sd) {
3432 free(newpath);
3433 return (NULL);
3436 /* Paste the "sda" where "sda1" was */
3437 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3439 /* Free temporary "sda" */
3440 free(new_sd);
3442 return (newpath);
3445 #define PATH_BUF_LEN 64
3448 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3449 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3450 * We also check if this is a whole disk, in which case we strip off the
3451 * trailing 's0' slice name.
3453 * This routine is also responsible for identifying when disks have been
3454 * reconfigured in a new location. The kernel will have opened the device by
3455 * devid, but the path will still refer to the old location. To catch this, we
3456 * first do a path -> devid translation (which is fast for the common case). If
3457 * the devid matches, we're done. If not, we do a reverse devid -> path
3458 * translation and issue the appropriate ioctl() to update the path of the vdev.
3459 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3460 * of these checks.
3462 char *
3463 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3464 int name_flags)
3466 char *path, *type, *env;
3467 uint64_t value;
3468 char buf[PATH_BUF_LEN];
3469 char tmpbuf[PATH_BUF_LEN];
3471 env = getenv("ZPOOL_VDEV_NAME_PATH");
3472 if (env && (strtoul(env, NULL, 0) > 0 ||
3473 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3474 name_flags |= VDEV_NAME_PATH;
3476 env = getenv("ZPOOL_VDEV_NAME_GUID");
3477 if (env && (strtoul(env, NULL, 0) > 0 ||
3478 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3479 name_flags |= VDEV_NAME_GUID;
3481 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3482 if (env && (strtoul(env, NULL, 0) > 0 ||
3483 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3484 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3486 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3487 name_flags & VDEV_NAME_GUID) {
3488 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3489 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3490 path = buf;
3491 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3492 #if defined(__sun__) || defined(__sun)
3494 * Live VDEV path updates to a kernel VDEV during a
3495 * zpool_vdev_name lookup are not supported on Linux.
3497 char *devid;
3498 vdev_stat_t *vs;
3499 uint_t vsc;
3502 * If the device is dead (faulted, offline, etc) then don't
3503 * bother opening it. Otherwise we may be forcing the user to
3504 * open a misbehaving device, which can have undesirable
3505 * effects.
3507 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3508 (uint64_t **)&vs, &vsc) != 0 ||
3509 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3510 zhp != NULL &&
3511 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3513 * Determine if the current path is correct.
3515 char *newdevid = path_to_devid(path);
3517 if (newdevid == NULL ||
3518 strcmp(devid, newdevid) != 0) {
3519 char *newpath;
3521 if ((newpath = devid_to_path(devid)) != NULL) {
3523 * Update the path appropriately.
3525 set_path(zhp, nv, newpath);
3526 if (nvlist_add_string(nv,
3527 ZPOOL_CONFIG_PATH, newpath) == 0)
3528 verify(nvlist_lookup_string(nv,
3529 ZPOOL_CONFIG_PATH,
3530 &path) == 0);
3531 free(newpath);
3535 if (newdevid)
3536 devid_str_free(newdevid);
3538 #endif /* sun */
3540 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3541 char *rp = realpath(path, NULL);
3542 if (rp) {
3543 strlcpy(buf, rp, sizeof (buf));
3544 path = buf;
3545 free(rp);
3550 * For a block device only use the name.
3552 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3553 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3554 !(name_flags & VDEV_NAME_PATH)) {
3555 path = strrchr(path, '/');
3556 path++;
3560 * Remove the partition from the path it this is a whole disk.
3562 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3563 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3564 return (zfs_strip_partition(path));
3566 } else {
3567 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3570 * If it's a raidz device, we need to stick in the parity level.
3572 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3573 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3574 &value) == 0);
3575 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3576 (u_longlong_t)value);
3577 path = buf;
3581 * We identify each top-level vdev by using a <type-id>
3582 * naming convention.
3584 if (name_flags & VDEV_NAME_TYPE_ID) {
3585 uint64_t id;
3586 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3587 &id) == 0);
3588 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3589 path, (u_longlong_t)id);
3590 path = tmpbuf;
3594 return (zfs_strdup(hdl, path));
3597 static int
3598 zbookmark_mem_compare(const void *a, const void *b)
3600 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3604 * Retrieve the persistent error log, uniquify the members, and return to the
3605 * caller.
3608 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3610 zfs_cmd_t zc = {"\0"};
3611 libzfs_handle_t *hdl = zhp->zpool_hdl;
3612 uint64_t count;
3613 zbookmark_phys_t *zb = NULL;
3614 int i;
3617 * Retrieve the raw error list from the kernel. If the number of errors
3618 * has increased, allocate more space and continue until we get the
3619 * entire list.
3621 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3622 &count) == 0);
3623 if (count == 0)
3624 return (0);
3625 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3626 count * sizeof (zbookmark_phys_t));
3627 zc.zc_nvlist_dst_size = count;
3628 (void) strcpy(zc.zc_name, zhp->zpool_name);
3629 for (;;) {
3630 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3631 &zc) != 0) {
3632 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3633 if (errno == ENOMEM) {
3634 void *dst;
3636 count = zc.zc_nvlist_dst_size;
3637 dst = zfs_alloc(zhp->zpool_hdl, count *
3638 sizeof (zbookmark_phys_t));
3639 zc.zc_nvlist_dst = (uintptr_t)dst;
3640 } else {
3641 return (zpool_standard_error_fmt(hdl, errno,
3642 dgettext(TEXT_DOMAIN, "errors: List of "
3643 "errors unavailable")));
3645 } else {
3646 break;
3651 * Sort the resulting bookmarks. This is a little confusing due to the
3652 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3653 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3654 * _not_ copied as part of the process. So we point the start of our
3655 * array appropriate and decrement the total number of elements.
3657 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3658 zc.zc_nvlist_dst_size;
3659 count -= zc.zc_nvlist_dst_size;
3661 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3663 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3666 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3668 for (i = 0; i < count; i++) {
3669 nvlist_t *nv;
3671 /* ignoring zb_blkid and zb_level for now */
3672 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3673 zb[i-1].zb_object == zb[i].zb_object)
3674 continue;
3676 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3677 goto nomem;
3678 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3679 zb[i].zb_objset) != 0) {
3680 nvlist_free(nv);
3681 goto nomem;
3683 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3684 zb[i].zb_object) != 0) {
3685 nvlist_free(nv);
3686 goto nomem;
3688 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3689 nvlist_free(nv);
3690 goto nomem;
3692 nvlist_free(nv);
3695 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3696 return (0);
3698 nomem:
3699 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3700 return (no_memory(zhp->zpool_hdl));
3704 * Upgrade a ZFS pool to the latest on-disk version.
3707 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3709 zfs_cmd_t zc = {"\0"};
3710 libzfs_handle_t *hdl = zhp->zpool_hdl;
3712 (void) strcpy(zc.zc_name, zhp->zpool_name);
3713 zc.zc_cookie = new_version;
3715 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3716 return (zpool_standard_error_fmt(hdl, errno,
3717 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3718 zhp->zpool_name));
3719 return (0);
3722 void
3723 zfs_save_arguments(int argc, char **argv, char *string, int len)
3725 int i;
3727 (void) strlcpy(string, basename(argv[0]), len);
3728 for (i = 1; i < argc; i++) {
3729 (void) strlcat(string, " ", len);
3730 (void) strlcat(string, argv[i], len);
3735 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3737 zfs_cmd_t zc = {"\0"};
3738 nvlist_t *args;
3739 int err;
3741 args = fnvlist_alloc();
3742 fnvlist_add_string(args, "message", message);
3743 err = zcmd_write_src_nvlist(hdl, &zc, args);
3744 if (err == 0)
3745 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3746 nvlist_free(args);
3747 zcmd_free_nvlists(&zc);
3748 return (err);
3752 * Perform ioctl to get some command history of a pool.
3754 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3755 * logical offset of the history buffer to start reading from.
3757 * Upon return, 'off' is the next logical offset to read from and
3758 * 'len' is the actual amount of bytes read into 'buf'.
3760 static int
3761 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3763 zfs_cmd_t zc = {"\0"};
3764 libzfs_handle_t *hdl = zhp->zpool_hdl;
3766 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3768 zc.zc_history = (uint64_t)(uintptr_t)buf;
3769 zc.zc_history_len = *len;
3770 zc.zc_history_offset = *off;
3772 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3773 switch (errno) {
3774 case EPERM:
3775 return (zfs_error_fmt(hdl, EZFS_PERM,
3776 dgettext(TEXT_DOMAIN,
3777 "cannot show history for pool '%s'"),
3778 zhp->zpool_name));
3779 case ENOENT:
3780 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3781 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3782 "'%s'"), zhp->zpool_name));
3783 case ENOTSUP:
3784 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3785 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3786 "'%s', pool must be upgraded"), zhp->zpool_name));
3787 default:
3788 return (zpool_standard_error_fmt(hdl, errno,
3789 dgettext(TEXT_DOMAIN,
3790 "cannot get history for '%s'"), zhp->zpool_name));
3794 *len = zc.zc_history_len;
3795 *off = zc.zc_history_offset;
3797 return (0);
3801 * Process the buffer of nvlists, unpacking and storing each nvlist record
3802 * into 'records'. 'leftover' is set to the number of bytes that weren't
3803 * processed as there wasn't a complete record.
3806 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3807 nvlist_t ***records, uint_t *numrecords)
3809 uint64_t reclen;
3810 nvlist_t *nv;
3811 int i;
3812 void *tmp;
3814 while (bytes_read > sizeof (reclen)) {
3816 /* get length of packed record (stored as little endian) */
3817 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3818 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3820 if (bytes_read < sizeof (reclen) + reclen)
3821 break;
3823 /* unpack record */
3824 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3825 return (ENOMEM);
3826 bytes_read -= sizeof (reclen) + reclen;
3827 buf += sizeof (reclen) + reclen;
3829 /* add record to nvlist array */
3830 (*numrecords)++;
3831 if (ISP2(*numrecords + 1)) {
3832 tmp = realloc(*records,
3833 *numrecords * 2 * sizeof (nvlist_t *));
3834 if (tmp == NULL) {
3835 nvlist_free(nv);
3836 (*numrecords)--;
3837 return (ENOMEM);
3839 *records = tmp;
3841 (*records)[*numrecords - 1] = nv;
3844 *leftover = bytes_read;
3845 return (0);
3849 * Retrieve the command history of a pool.
3852 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3854 char *buf;
3855 int buflen = 128 * 1024;
3856 uint64_t off = 0;
3857 nvlist_t **records = NULL;
3858 uint_t numrecords = 0;
3859 int err, i;
3861 buf = malloc(buflen);
3862 if (buf == NULL)
3863 return (ENOMEM);
3864 do {
3865 uint64_t bytes_read = buflen;
3866 uint64_t leftover;
3868 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3869 break;
3871 /* if nothing else was read in, we're at EOF, just return */
3872 if (!bytes_read)
3873 break;
3875 if ((err = zpool_history_unpack(buf, bytes_read,
3876 &leftover, &records, &numrecords)) != 0)
3877 break;
3878 off -= leftover;
3879 if (leftover == bytes_read) {
3881 * no progress made, because buffer is not big enough
3882 * to hold this record; resize and retry.
3884 buflen *= 2;
3885 free(buf);
3886 buf = malloc(buflen);
3887 if (buf == NULL)
3888 return (ENOMEM);
3891 /* CONSTCOND */
3892 } while (1);
3894 free(buf);
3896 if (!err) {
3897 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3898 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3899 records, numrecords) == 0);
3901 for (i = 0; i < numrecords; i++)
3902 nvlist_free(records[i]);
3903 free(records);
3905 return (err);
3909 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
3910 * If there is a new event available 'nvp' will contain a newly allocated
3911 * nvlist and 'dropped' will be set to the number of missed events since
3912 * the last call to this function. When 'nvp' is set to NULL it indicates
3913 * no new events are available. In either case the function returns 0 and
3914 * it is up to the caller to free 'nvp'. In the case of a fatal error the
3915 * function will return a non-zero value. When the function is called in
3916 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
3917 * it will not return until a new event is available.
3920 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3921 int *dropped, unsigned flags, int zevent_fd)
3923 zfs_cmd_t zc = {"\0"};
3924 int error = 0;
3926 *nvp = NULL;
3927 *dropped = 0;
3928 zc.zc_cleanup_fd = zevent_fd;
3930 if (flags & ZEVENT_NONBLOCK)
3931 zc.zc_guid = ZEVENT_NONBLOCK;
3933 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3934 return (-1);
3936 retry:
3937 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3938 switch (errno) {
3939 case ESHUTDOWN:
3940 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3941 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3942 goto out;
3943 case ENOENT:
3944 /* Blocking error case should not occur */
3945 if (!(flags & ZEVENT_NONBLOCK))
3946 error = zpool_standard_error_fmt(hdl, errno,
3947 dgettext(TEXT_DOMAIN, "cannot get event"));
3949 goto out;
3950 case ENOMEM:
3951 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3952 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3953 dgettext(TEXT_DOMAIN, "cannot get event"));
3954 goto out;
3955 } else {
3956 goto retry;
3958 default:
3959 error = zpool_standard_error_fmt(hdl, errno,
3960 dgettext(TEXT_DOMAIN, "cannot get event"));
3961 goto out;
3965 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3966 if (error != 0)
3967 goto out;
3969 *dropped = (int)zc.zc_cookie;
3970 out:
3971 zcmd_free_nvlists(&zc);
3973 return (error);
3977 * Clear all events.
3980 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3982 zfs_cmd_t zc = {"\0"};
3983 char msg[1024];
3985 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3986 "cannot clear events"));
3988 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3989 return (zpool_standard_error_fmt(hdl, errno, msg));
3991 if (count != NULL)
3992 *count = (int)zc.zc_cookie; /* # of events cleared */
3994 return (0);
3998 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
3999 * the passed zevent_fd file handle. On success zero is returned,
4000 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4003 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4005 zfs_cmd_t zc = {"\0"};
4006 int error = 0;
4008 zc.zc_guid = eid;
4009 zc.zc_cleanup_fd = zevent_fd;
4011 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4012 switch (errno) {
4013 case ENOENT:
4014 error = zfs_error_fmt(hdl, EZFS_NOENT,
4015 dgettext(TEXT_DOMAIN, "cannot get event"));
4016 break;
4018 case ENOMEM:
4019 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4020 dgettext(TEXT_DOMAIN, "cannot get event"));
4021 break;
4023 default:
4024 error = zpool_standard_error_fmt(hdl, errno,
4025 dgettext(TEXT_DOMAIN, "cannot get event"));
4026 break;
4030 return (error);
4033 void
4034 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4035 char *pathname, size_t len)
4037 zfs_cmd_t zc = {"\0"};
4038 boolean_t mounted = B_FALSE;
4039 char *mntpnt = NULL;
4040 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4042 if (dsobj == 0) {
4043 /* special case for the MOS */
4044 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4045 (longlong_t)obj);
4046 return;
4049 /* get the dataset's name */
4050 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4051 zc.zc_obj = dsobj;
4052 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4053 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4054 /* just write out a path of two object numbers */
4055 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4056 (longlong_t)dsobj, (longlong_t)obj);
4057 return;
4059 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4061 /* find out if the dataset is mounted */
4062 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4064 /* get the corrupted object's path */
4065 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4066 zc.zc_obj = obj;
4067 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4068 &zc) == 0) {
4069 if (mounted) {
4070 (void) snprintf(pathname, len, "%s%s", mntpnt,
4071 zc.zc_value);
4072 } else {
4073 (void) snprintf(pathname, len, "%s:%s",
4074 dsname, zc.zc_value);
4076 } else {
4077 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4078 (longlong_t)obj);
4080 free(mntpnt);
4084 * Read the EFI label from the config, if a label does not exist then
4085 * pass back the error to the caller. If the caller has passed a non-NULL
4086 * diskaddr argument then we set it to the starting address of the EFI
4087 * partition.
4089 static int
4090 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4092 char *path;
4093 int fd;
4094 char diskname[MAXPATHLEN];
4095 int err = -1;
4097 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4098 return (err);
4100 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4101 strrchr(path, '/'));
4102 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
4103 struct dk_gpt *vtoc;
4105 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4106 if (sb != NULL)
4107 *sb = vtoc->efi_parts[0].p_start;
4108 efi_free(vtoc);
4110 (void) close(fd);
4112 return (err);
4116 * determine where a partition starts on a disk in the current
4117 * configuration
4119 static diskaddr_t
4120 find_start_block(nvlist_t *config)
4122 nvlist_t **child;
4123 uint_t c, children;
4124 diskaddr_t sb = MAXOFFSET_T;
4125 uint64_t wholedisk;
4127 if (nvlist_lookup_nvlist_array(config,
4128 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4129 if (nvlist_lookup_uint64(config,
4130 ZPOOL_CONFIG_WHOLE_DISK,
4131 &wholedisk) != 0 || !wholedisk) {
4132 return (MAXOFFSET_T);
4134 if (read_efi_label(config, &sb) < 0)
4135 sb = MAXOFFSET_T;
4136 return (sb);
4139 for (c = 0; c < children; c++) {
4140 sb = find_start_block(child[c]);
4141 if (sb != MAXOFFSET_T) {
4142 return (sb);
4145 return (MAXOFFSET_T);
4148 static int
4149 zpool_label_disk_check(char *path)
4151 struct dk_gpt *vtoc;
4152 int fd, err;
4154 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
4155 return (errno);
4157 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4158 (void) close(fd);
4159 return (err);
4162 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4163 efi_free(vtoc);
4164 (void) close(fd);
4165 return (EIDRM);
4168 efi_free(vtoc);
4169 (void) close(fd);
4170 return (0);
4174 * Generate a unique partition name for the ZFS member. Partitions must
4175 * have unique names to ensure udev will be able to create symlinks under
4176 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4177 * of the form <pool>-<unique-id>.
4179 static void
4180 zpool_label_name(char *label_name, int label_size)
4182 uint64_t id = 0;
4183 int fd;
4185 fd = open("/dev/urandom", O_RDONLY);
4186 if (fd >= 0) {
4187 if (read(fd, &id, sizeof (id)) != sizeof (id))
4188 id = 0;
4190 close(fd);
4193 if (id == 0)
4194 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4196 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4200 * Label an individual disk. The name provided is the short name,
4201 * stripped of any leading /dev path.
4204 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4206 char path[MAXPATHLEN];
4207 struct dk_gpt *vtoc;
4208 int rval, fd;
4209 size_t resv = EFI_MIN_RESV_SIZE;
4210 uint64_t slice_size;
4211 diskaddr_t start_block;
4212 char errbuf[1024];
4214 /* prepare an error message just in case */
4215 (void) snprintf(errbuf, sizeof (errbuf),
4216 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4218 if (zhp) {
4219 nvlist_t *nvroot;
4221 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4222 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4224 if (zhp->zpool_start_block == 0)
4225 start_block = find_start_block(nvroot);
4226 else
4227 start_block = zhp->zpool_start_block;
4228 zhp->zpool_start_block = start_block;
4229 } else {
4230 /* new pool */
4231 start_block = NEW_START_BLOCK;
4234 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4236 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4238 * This shouldn't happen. We've long since verified that this
4239 * is a valid device.
4241 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4242 "label '%s': unable to open device: %d"), path, errno);
4243 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4246 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4248 * The only way this can fail is if we run out of memory, or we
4249 * were unable to read the disk's capacity
4251 if (errno == ENOMEM)
4252 (void) no_memory(hdl);
4254 (void) close(fd);
4255 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4256 "label '%s': unable to read disk capacity"), path);
4258 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4261 slice_size = vtoc->efi_last_u_lba + 1;
4262 slice_size -= EFI_MIN_RESV_SIZE;
4263 if (start_block == MAXOFFSET_T)
4264 start_block = NEW_START_BLOCK;
4265 slice_size -= start_block;
4266 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4268 vtoc->efi_parts[0].p_start = start_block;
4269 vtoc->efi_parts[0].p_size = slice_size;
4272 * Why we use V_USR: V_BACKUP confuses users, and is considered
4273 * disposable by some EFI utilities (since EFI doesn't have a backup
4274 * slice). V_UNASSIGNED is supposed to be used only for zero size
4275 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4276 * etc. were all pretty specific. V_USR is as close to reality as we
4277 * can get, in the absence of V_OTHER.
4279 vtoc->efi_parts[0].p_tag = V_USR;
4280 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4282 vtoc->efi_parts[8].p_start = slice_size + start_block;
4283 vtoc->efi_parts[8].p_size = resv;
4284 vtoc->efi_parts[8].p_tag = V_RESERVED;
4286 rval = efi_write(fd, vtoc);
4288 /* Flush the buffers to disk and invalidate the page cache. */
4289 (void) fsync(fd);
4290 (void) ioctl(fd, BLKFLSBUF);
4292 if (rval == 0)
4293 rval = efi_rescan(fd);
4296 * Some block drivers (like pcata) may not support EFI GPT labels.
4297 * Print out a helpful error message directing the user to manually
4298 * label the disk and give a specific slice.
4300 if (rval != 0) {
4301 (void) close(fd);
4302 efi_free(vtoc);
4304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4305 "parted(8) and then provide a specific slice: %d"), rval);
4306 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4309 (void) close(fd);
4310 efi_free(vtoc);
4312 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4313 (void) zfs_append_partition(path, MAXPATHLEN);
4315 /* Wait to udev to signal use the device has settled. */
4316 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4317 if (rval) {
4318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4319 "detect device partitions on '%s': %d"), path, rval);
4320 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4323 /* We can't be to paranoid. Read the label back and verify it. */
4324 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4325 rval = zpool_label_disk_check(path);
4326 if (rval) {
4327 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4328 "EFI label on '%s' is damaged. Ensure\nthis device "
4329 "is not in in use, and is functioning properly: %d"),
4330 path, rval);
4331 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4334 return (0);
4338 * Allocate and return the underlying device name for a device mapper device.
4339 * If a device mapper device maps to multiple devices, return the first device.
4341 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4342 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4344 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4345 * device then return NULL.
4347 * NOTE: The returned name string must be *freed*.
4349 char *
4350 dm_get_underlying_path(char *dm_name)
4352 DIR *dp = NULL;
4353 struct dirent *ep;
4354 char *realp;
4355 char *tmp = NULL;
4356 char *path = NULL;
4357 char *dev_str;
4358 int size;
4360 if (dm_name == NULL)
4361 return (NULL);
4363 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4364 realp = realpath(dm_name, NULL);
4365 if (realp == NULL)
4366 return (NULL);
4369 * If they preface 'dev' with a path (like "/dev") then strip it off.
4370 * We just want the 'dm-N' part.
4372 tmp = strrchr(realp, '/');
4373 if (tmp != NULL)
4374 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4375 else
4376 dev_str = tmp;
4378 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4379 if (size == -1 || !tmp)
4380 goto end;
4382 dp = opendir(tmp);
4383 if (dp == NULL)
4384 goto end;
4386 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4387 while ((ep = readdir(dp))) {
4388 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4389 size = asprintf(&path, "/dev/%s", ep->d_name);
4390 break;
4394 end:
4395 if (dp != NULL)
4396 closedir(dp);
4397 free(tmp);
4398 free(realp);
4399 return (path);
4403 * Return 1 if device is a device mapper or multipath device.
4404 * Return 0 if not.
4407 zfs_dev_is_dm(char *dev_name)
4410 char *tmp;
4411 tmp = dm_get_underlying_path(dev_name);
4412 if (tmp == NULL)
4413 return (0);
4415 free(tmp);
4416 return (1);
4420 * By "whole disk" we mean an entire physical disk (something we can
4421 * label, toggle the write cache on, etc.) as opposed to the full
4422 * capacity of a pseudo-device such as lofi or did. We act as if we
4423 * are labeling the disk, which should be a pretty good test of whether
4424 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4425 * it isn't.
4428 zfs_dev_is_whole_disk(char *dev_name)
4430 struct dk_gpt *label;
4431 int fd;
4433 if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
4434 return (0);
4436 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
4437 (void) close(fd);
4438 return (0);
4441 efi_free(label);
4442 (void) close(fd);
4444 return (1);
4448 * Lookup the underlying device for a device name
4450 * Often you'll have a symlink to a device, a partition device,
4451 * or a multipath device, and want to look up the underlying device.
4452 * This function returns the underlying device name. If the device
4453 * name is already the underlying device, then just return the same
4454 * name. If the device is a DM device with multiple underlying devices
4455 * then return the first one.
4457 * For example:
4459 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4460 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4461 * returns: /dev/sda
4463 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4464 * dev_name: /dev/mapper/mpatha
4465 * returns: /dev/sda (first device)
4467 * 3. /dev/sda (already the underlying device)
4468 * dev_name: /dev/sda
4469 * returns: /dev/sda
4471 * 4. /dev/dm-3 (mapped to /dev/sda)
4472 * dev_name: /dev/dm-3
4473 * returns: /dev/sda
4475 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4476 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4477 * returns: /dev/sdb
4479 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4480 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4481 * returns: /dev/sda
4483 * Returns underlying device name, or NULL on error or no match.
4485 * NOTE: The returned name string must be *freed*.
4487 char *
4488 zfs_get_underlying_path(char *dev_name)
4490 char *name = NULL;
4491 char *tmp;
4493 if (dev_name == NULL)
4494 return (NULL);
4496 tmp = dm_get_underlying_path(dev_name);
4498 /* dev_name not a DM device, so just un-symlinkize it */
4499 if (tmp == NULL)
4500 tmp = realpath(dev_name, NULL);
4502 if (tmp != NULL) {
4503 name = zfs_strip_partition_path(tmp);
4504 free(tmp);
4507 return (name);
4511 * Given a dev name like "sda", return the full enclosure sysfs path to
4512 * the disk. You can also pass in the name with "/dev" prepended
4513 * to it (like /dev/sda).
4515 * For example, disk "sda" in enclosure slot 1:
4516 * dev: "sda"
4517 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4519 * 'dev' must be a non-devicemapper device.
4521 * Returned string must be freed.
4523 char *
4524 zfs_get_enclosure_sysfs_path(char *dev_name)
4526 DIR *dp = NULL;
4527 struct dirent *ep;
4528 char buf[MAXPATHLEN];
4529 char *tmp1 = NULL;
4530 char *tmp2 = NULL;
4531 char *tmp3 = NULL;
4532 char *path = NULL;
4533 size_t size;
4534 int tmpsize;
4536 if (dev_name == NULL)
4537 return (NULL);
4539 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4540 tmp1 = strrchr(dev_name, '/');
4541 if (tmp1 != NULL)
4542 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4544 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4545 if (tmpsize == -1 || tmp1 == NULL) {
4546 tmp1 = NULL;
4547 goto end;
4550 dp = opendir(tmp1);
4551 if (dp == NULL) {
4552 tmp1 = NULL; /* To make free() at the end a NOP */
4553 goto end;
4557 * Look though all sysfs entries in /sys/block/<dev>/device for
4558 * the enclosure symlink.
4560 while ((ep = readdir(dp))) {
4561 /* Ignore everything that's not our enclosure_device link */
4562 if (strstr(ep->d_name, "enclosure_device") == NULL)
4563 continue;
4565 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4566 tmp2 == NULL)
4567 break;
4569 size = readlink(tmp2, buf, sizeof (buf));
4571 /* Did readlink fail or crop the link name? */
4572 if (size == -1 || size >= sizeof (buf)) {
4573 free(tmp2);
4574 tmp2 = NULL; /* To make free() at the end a NOP */
4575 break;
4579 * We got a valid link. readlink() doesn't terminate strings
4580 * so we have to do it.
4582 buf[size] = '\0';
4585 * Our link will look like:
4587 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4589 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4591 tmp3 = strstr(buf, "enclosure");
4592 if (tmp3 == NULL)
4593 break;
4595 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4596 /* If asprintf() fails, 'path' is undefined */
4597 path = NULL;
4598 break;
4601 if (path == NULL)
4602 break;
4605 end:
4606 free(tmp2);
4607 free(tmp1);
4609 if (dp != NULL)
4610 closedir(dp);
4612 return (path);