Patrick Welche <prlw1@cam.ac.uk>
[netbsd-mini2440.git] / external / cddl / osnet / dist / lib / libzfs / common / libzfs_pool.c
blobff47eea87c6bd21db15095da8425f1e015783cfb
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <alloca.h>
28 #include <assert.h>
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <dirent.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #include <unistd.h>
39 #include <zone.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/zio.h>
44 #include <strings.h>
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52 #if defined(__i386) || defined(__amd64)
53 #define BOOTCMD "installgrub(1M)"
54 #else
55 #define BOOTCMD "installboot(1M)"
56 #endif
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
67 zfs_cmd_t zc = { 0 };
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 return (-1);
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
79 return (-1);
81 } else {
82 zcmd_free_nvlists(&zc);
83 return (-1);
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
89 return (-1);
92 zcmd_free_nvlists(&zc);
94 return (0);
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
100 nvlist_t *old_props;
102 old_props = zhp->zpool_props;
104 if (zpool_get_all_props(zhp) != 0)
105 return (-1);
107 nvlist_free(old_props);
108 return (0);
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113 zprop_source_t *src)
115 nvlist_t *nv, *nvl;
116 uint64_t ival;
117 char *value;
118 zprop_source_t source;
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 source = ival;
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 } else {
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 value = "-";
131 if (src)
132 *src = source;
134 return (value);
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 nvlist_t *nv, *nvl;
141 uint64_t value;
142 zprop_source_t source;
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 == 0)) {
155 return (value);
157 return (zpool_prop_default_numeric(prop));
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 source = value;
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 } else {
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
170 if (src)
171 *src = source;
173 return (value);
177 * Map VDEV STATE to printed strings.
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
182 switch (state) {
183 case VDEV_STATE_CLOSED:
184 case VDEV_STATE_OFFLINE:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN:
189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190 return (gettext("FAULTED"));
191 else
192 return (gettext("UNAVAIL"));
193 case VDEV_STATE_FAULTED:
194 return (gettext("FAULTED"));
195 case VDEV_STATE_DEGRADED:
196 return (gettext("DEGRADED"));
197 case VDEV_STATE_HEALTHY:
198 return (gettext("ONLINE"));
201 return (gettext("UNKNOWN"));
205 * Get a zpool property value for 'prop' and return the value in
206 * a pre-allocated buffer.
209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
210 zprop_source_t *srctype)
212 uint64_t intval;
213 const char *strval;
214 zprop_source_t src = ZPROP_SRC_NONE;
215 nvlist_t *nvroot;
216 vdev_stat_t *vs;
217 uint_t vsc;
219 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
220 if (prop == ZPOOL_PROP_NAME)
221 (void) strlcpy(buf, zpool_get_name(zhp), len);
222 else if (prop == ZPOOL_PROP_HEALTH)
223 (void) strlcpy(buf, "FAULTED", len);
224 else
225 (void) strlcpy(buf, "-", len);
226 return (0);
229 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
230 prop != ZPOOL_PROP_NAME)
231 return (-1);
233 switch (zpool_prop_get_type(prop)) {
234 case PROP_TYPE_STRING:
235 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
236 len);
237 break;
239 case PROP_TYPE_NUMBER:
240 intval = zpool_get_prop_int(zhp, prop, &src);
242 switch (prop) {
243 case ZPOOL_PROP_SIZE:
244 case ZPOOL_PROP_USED:
245 case ZPOOL_PROP_AVAILABLE:
246 (void) zfs_nicenum(intval, buf, len);
247 break;
249 case ZPOOL_PROP_CAPACITY:
250 (void) snprintf(buf, len, "%llu%%",
251 (u_longlong_t)intval);
252 break;
254 case ZPOOL_PROP_HEALTH:
255 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
256 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
257 verify(nvlist_lookup_uint64_array(nvroot,
258 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
260 (void) strlcpy(buf, zpool_state_to_name(intval,
261 vs->vs_aux), len);
262 break;
263 default:
264 (void) snprintf(buf, len, "%llu", intval);
266 break;
268 case PROP_TYPE_INDEX:
269 intval = zpool_get_prop_int(zhp, prop, &src);
270 if (zpool_prop_index_to_string(prop, intval, &strval)
271 != 0)
272 return (-1);
273 (void) strlcpy(buf, strval, len);
274 break;
276 default:
277 abort();
280 if (srctype)
281 *srctype = src;
283 return (0);
287 * Check if the bootfs name has the same pool name as it is set to.
288 * Assuming bootfs is a valid dataset name.
290 static boolean_t
291 bootfs_name_valid(const char *pool, char *bootfs)
293 int len = strlen(pool);
295 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
296 return (B_FALSE);
298 if (strncmp(pool, bootfs, len) == 0 &&
299 (bootfs[len] == '/' || bootfs[len] == '\0'))
300 return (B_TRUE);
302 return (B_FALSE);
306 * Inspect the configuration to determine if any of the devices contain
307 * an EFI label.
309 static boolean_t
310 pool_uses_efi(nvlist_t *config)
312 nvlist_t **child;
313 uint_t c, children;
315 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
316 &child, &children) != 0)
317 return (read_efi_label(config, NULL) >= 0);
319 for (c = 0; c < children; c++) {
320 if (pool_uses_efi(child[c]))
321 return (B_TRUE);
323 return (B_FALSE);
326 static boolean_t
327 pool_is_bootable(zpool_handle_t *zhp)
329 char bootfs[ZPOOL_MAXNAMELEN];
331 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
332 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
333 sizeof (bootfs)) != 0);
338 * Given an nvlist of zpool properties to be set, validate that they are
339 * correct, and parse any numeric properties (index, boolean, etc) if they are
340 * specified as strings.
342 static nvlist_t *
343 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
344 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
346 nvpair_t *elem;
347 nvlist_t *retprops;
348 zpool_prop_t prop;
349 char *strval;
350 uint64_t intval;
351 char *slash;
352 struct stat64 statbuf;
353 zpool_handle_t *zhp;
354 nvlist_t *nvroot;
356 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
357 (void) no_memory(hdl);
358 return (NULL);
361 elem = NULL;
362 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
363 const char *propname = nvpair_name(elem);
366 * Make sure this property is valid and applies to this type.
368 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
369 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
370 "invalid property '%s'"), propname);
371 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
372 goto error;
375 if (zpool_prop_readonly(prop)) {
376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
377 "is readonly"), propname);
378 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
379 goto error;
382 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
383 &strval, &intval, errbuf) != 0)
384 goto error;
387 * Perform additional checking for specific properties.
389 switch (prop) {
390 case ZPOOL_PROP_VERSION:
391 if (intval < version || intval > SPA_VERSION) {
392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
393 "property '%s' number %d is invalid."),
394 propname, intval);
395 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
396 goto error;
398 break;
400 case ZPOOL_PROP_BOOTFS:
401 if (create_or_import) {
402 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
403 "property '%s' cannot be set at creation "
404 "or import time"), propname);
405 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
406 goto error;
409 if (version < SPA_VERSION_BOOTFS) {
410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
411 "pool must be upgraded to support "
412 "'%s' property"), propname);
413 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
414 goto error;
418 * bootfs property value has to be a dataset name and
419 * the dataset has to be in the same pool as it sets to.
421 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
422 strval)) {
423 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
424 "is an invalid name"), strval);
425 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
426 goto error;
429 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
430 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
431 "could not open pool '%s'"), poolname);
432 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
433 goto error;
435 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
436 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
439 * bootfs property cannot be set on a disk which has
440 * been EFI labeled.
442 if (pool_uses_efi(nvroot)) {
443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
444 "property '%s' not supported on "
445 "EFI labeled devices"), propname);
446 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
447 zpool_close(zhp);
448 goto error;
450 zpool_close(zhp);
451 break;
453 case ZPOOL_PROP_ALTROOT:
454 if (!create_or_import) {
455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
456 "property '%s' can only be set during pool "
457 "creation or import"), propname);
458 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
459 goto error;
462 if (strval[0] != '/') {
463 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
464 "bad alternate root '%s'"), strval);
465 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
466 goto error;
468 break;
470 case ZPOOL_PROP_CACHEFILE:
471 if (strval[0] == '\0')
472 break;
474 if (strcmp(strval, "none") == 0)
475 break;
477 if (strval[0] != '/') {
478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
479 "property '%s' must be empty, an "
480 "absolute path, or 'none'"), propname);
481 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
482 goto error;
485 slash = strrchr(strval, '/');
487 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
488 strcmp(slash, "/..") == 0) {
489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 "'%s' is not a valid file"), strval);
491 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
492 goto error;
495 *slash = '\0';
497 if (strval[0] != '\0' &&
498 (stat64(strval, &statbuf) != 0 ||
499 !S_ISDIR(statbuf.st_mode))) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "'%s' is not a valid directory"),
502 strval);
503 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
504 goto error;
507 *slash = '/';
508 break;
512 return (retprops);
513 error:
514 nvlist_free(retprops);
515 return (NULL);
519 * Set zpool property : propname=propval.
522 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
524 zfs_cmd_t zc = { 0 };
525 int ret = -1;
526 char errbuf[1024];
527 nvlist_t *nvl = NULL;
528 nvlist_t *realprops;
529 uint64_t version;
531 (void) snprintf(errbuf, sizeof (errbuf),
532 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
533 zhp->zpool_name);
535 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
536 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
538 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
539 return (no_memory(zhp->zpool_hdl));
541 if (nvlist_add_string(nvl, propname, propval) != 0) {
542 nvlist_free(nvl);
543 return (no_memory(zhp->zpool_hdl));
546 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
547 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
548 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
549 nvlist_free(nvl);
550 return (-1);
553 nvlist_free(nvl);
554 nvl = realprops;
557 * Execute the corresponding ioctl() to set this property.
559 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
561 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
562 nvlist_free(nvl);
563 return (-1);
566 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
568 zcmd_free_nvlists(&zc);
569 nvlist_free(nvl);
571 if (ret)
572 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
573 else
574 (void) zpool_props_refresh(zhp);
576 return (ret);
580 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
582 libzfs_handle_t *hdl = zhp->zpool_hdl;
583 zprop_list_t *entry;
584 char buf[ZFS_MAXPROPLEN];
586 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
587 return (-1);
589 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
591 if (entry->pl_fixed)
592 continue;
594 if (entry->pl_prop != ZPROP_INVAL &&
595 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
596 NULL) == 0) {
597 if (strlen(buf) > entry->pl_width)
598 entry->pl_width = strlen(buf);
602 return (0);
607 * Validate the given pool name, optionally putting an extended error message in
608 * 'buf'.
610 boolean_t
611 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
613 namecheck_err_t why;
614 char what;
615 int ret;
617 ret = pool_namecheck(pool, &why, &what);
620 * The rules for reserved pool names were extended at a later point.
621 * But we need to support users with existing pools that may now be
622 * invalid. So we only check for this expanded set of names during a
623 * create (or import), and only in userland.
625 if (ret == 0 && !isopen &&
626 (strncmp(pool, "mirror", 6) == 0 ||
627 strncmp(pool, "raidz", 5) == 0 ||
628 strncmp(pool, "spare", 5) == 0 ||
629 strcmp(pool, "log") == 0)) {
630 if (hdl != NULL)
631 zfs_error_aux(hdl,
632 dgettext(TEXT_DOMAIN, "name is reserved"));
633 return (B_FALSE);
637 if (ret != 0) {
638 if (hdl != NULL) {
639 switch (why) {
640 case NAME_ERR_TOOLONG:
641 zfs_error_aux(hdl,
642 dgettext(TEXT_DOMAIN, "name is too long"));
643 break;
645 case NAME_ERR_INVALCHAR:
646 zfs_error_aux(hdl,
647 dgettext(TEXT_DOMAIN, "invalid character "
648 "'%c' in pool name"), what);
649 break;
651 case NAME_ERR_NOLETTER:
652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 "name must begin with a letter"));
654 break;
656 case NAME_ERR_RESERVED:
657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
658 "name is reserved"));
659 break;
661 case NAME_ERR_DISKLIKE:
662 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
663 "pool name is reserved"));
664 break;
666 case NAME_ERR_LEADING_SLASH:
667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 "leading slash in name"));
669 break;
671 case NAME_ERR_EMPTY_COMPONENT:
672 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
673 "empty component in name"));
674 break;
676 case NAME_ERR_TRAILING_SLASH:
677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
678 "trailing slash in name"));
679 break;
681 case NAME_ERR_MULTIPLE_AT:
682 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
683 "multiple '@' delimiters in name"));
684 break;
688 return (B_FALSE);
691 return (B_TRUE);
695 * Open a handle to the given pool, even if the pool is currently in the FAULTED
696 * state.
698 zpool_handle_t *
699 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
701 zpool_handle_t *zhp;
702 boolean_t missing;
705 * Make sure the pool name is valid.
707 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
708 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
709 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
710 pool);
711 return (NULL);
714 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
715 return (NULL);
717 zhp->zpool_hdl = hdl;
718 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
720 if (zpool_refresh_stats(zhp, &missing) != 0) {
721 zpool_close(zhp);
722 return (NULL);
725 if (missing) {
726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
727 (void) zfs_error_fmt(hdl, EZFS_NOENT,
728 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
729 zpool_close(zhp);
730 return (NULL);
733 return (zhp);
737 * Like the above, but silent on error. Used when iterating over pools (because
738 * the configuration cache may be out of date).
741 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
743 zpool_handle_t *zhp;
744 boolean_t missing;
746 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
747 return (-1);
749 zhp->zpool_hdl = hdl;
750 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
752 if (zpool_refresh_stats(zhp, &missing) != 0) {
753 zpool_close(zhp);
754 return (-1);
757 if (missing) {
758 zpool_close(zhp);
759 *ret = NULL;
760 return (0);
763 *ret = zhp;
764 return (0);
768 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
769 * state.
771 zpool_handle_t *
772 zpool_open(libzfs_handle_t *hdl, const char *pool)
774 zpool_handle_t *zhp;
776 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
777 return (NULL);
779 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
780 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
781 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
782 zpool_close(zhp);
783 return (NULL);
786 return (zhp);
790 * Close the handle. Simply frees the memory associated with the handle.
792 void
793 zpool_close(zpool_handle_t *zhp)
795 if (zhp->zpool_config)
796 nvlist_free(zhp->zpool_config);
797 if (zhp->zpool_old_config)
798 nvlist_free(zhp->zpool_old_config);
799 if (zhp->zpool_props)
800 nvlist_free(zhp->zpool_props);
801 free(zhp);
805 * Return the name of the pool.
807 const char *
808 zpool_get_name(zpool_handle_t *zhp)
810 return (zhp->zpool_name);
815 * Return the state of the pool (ACTIVE or UNAVAILABLE)
818 zpool_get_state(zpool_handle_t *zhp)
820 return (zhp->zpool_state);
824 * Create the named pool, using the provided vdev list. It is assumed
825 * that the consumer has already validated the contents of the nvlist, so we
826 * don't have to worry about error semantics.
829 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
830 nvlist_t *props, nvlist_t *fsprops)
832 zfs_cmd_t zc = { 0 };
833 nvlist_t *zc_fsprops = NULL;
834 nvlist_t *zc_props = NULL;
835 char msg[1024];
836 char *altroot;
837 int ret = -1;
839 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
840 "cannot create '%s'"), pool);
842 if (!zpool_name_valid(hdl, B_FALSE, pool))
843 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
845 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
846 return (-1);
848 if (props) {
849 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
850 SPA_VERSION_1, B_TRUE, msg)) == NULL) {
851 goto create_failed;
855 if (fsprops) {
856 uint64_t zoned;
857 char *zonestr;
859 zoned = ((nvlist_lookup_string(fsprops,
860 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
861 strcmp(zonestr, "on") == 0);
863 if ((zc_fsprops = zfs_valid_proplist(hdl,
864 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
865 goto create_failed;
867 if (!zc_props &&
868 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
869 goto create_failed;
871 if (nvlist_add_nvlist(zc_props,
872 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
873 goto create_failed;
877 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
878 goto create_failed;
880 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
882 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
884 zcmd_free_nvlists(&zc);
885 nvlist_free(zc_props);
886 nvlist_free(zc_fsprops);
888 switch (errno) {
889 case EBUSY:
891 * This can happen if the user has specified the same
892 * device multiple times. We can't reliably detect this
893 * until we try to add it and see we already have a
894 * label.
896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
897 "one or more vdevs refer to the same device"));
898 return (zfs_error(hdl, EZFS_BADDEV, msg));
900 case EOVERFLOW:
902 * This occurs when one of the devices is below
903 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
904 * device was the problem device since there's no
905 * reliable way to determine device size from userland.
908 char buf[64];
910 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
913 "one or more devices is less than the "
914 "minimum size (%s)"), buf);
916 return (zfs_error(hdl, EZFS_BADDEV, msg));
918 case ENOSPC:
919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
920 "one or more devices is out of space"));
921 return (zfs_error(hdl, EZFS_BADDEV, msg));
923 case ENOTBLK:
924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
925 "cache device must be a disk or disk slice"));
926 return (zfs_error(hdl, EZFS_BADDEV, msg));
928 default:
929 return (zpool_standard_error(hdl, errno, msg));
934 * If this is an alternate root pool, then we automatically set the
935 * mountpoint of the root dataset to be '/'.
937 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
938 &altroot) == 0) {
939 zfs_handle_t *zhp;
941 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
942 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
943 "/") == 0);
945 zfs_close(zhp);
948 create_failed:
949 zcmd_free_nvlists(&zc);
950 nvlist_free(zc_props);
951 nvlist_free(zc_fsprops);
952 return (ret);
956 * Destroy the given pool. It is up to the caller to ensure that there are no
957 * datasets left in the pool.
960 zpool_destroy(zpool_handle_t *zhp)
962 zfs_cmd_t zc = { 0 };
963 zfs_handle_t *zfp = NULL;
964 libzfs_handle_t *hdl = zhp->zpool_hdl;
965 char msg[1024];
967 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
968 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
969 ZFS_TYPE_FILESYSTEM)) == NULL)
970 return (-1);
972 if (zpool_remove_zvol_links(zhp) != 0)
973 return (-1);
975 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
977 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
978 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
979 "cannot destroy '%s'"), zhp->zpool_name);
981 if (errno == EROFS) {
982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
983 "one or more devices is read only"));
984 (void) zfs_error(hdl, EZFS_BADDEV, msg);
985 } else {
986 (void) zpool_standard_error(hdl, errno, msg);
989 if (zfp)
990 zfs_close(zfp);
991 return (-1);
994 if (zfp) {
995 remove_mountpoint(zfp);
996 zfs_close(zfp);
999 return (0);
1003 * Add the given vdevs to the pool. The caller must have already performed the
1004 * necessary verification to ensure that the vdev specification is well-formed.
1007 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1009 zfs_cmd_t zc = { 0 };
1010 int ret;
1011 libzfs_handle_t *hdl = zhp->zpool_hdl;
1012 char msg[1024];
1013 nvlist_t **spares, **l2cache;
1014 uint_t nspares, nl2cache;
1016 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1017 "cannot add to '%s'"), zhp->zpool_name);
1019 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1020 SPA_VERSION_SPARES &&
1021 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1022 &spares, &nspares) == 0) {
1023 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1024 "upgraded to add hot spares"));
1025 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1028 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1029 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1030 uint64_t s;
1032 for (s = 0; s < nspares; s++) {
1033 char *path;
1035 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1036 &path) == 0 && pool_uses_efi(spares[s])) {
1037 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1038 "device '%s' contains an EFI label and "
1039 "cannot be used on root pools."),
1040 zpool_vdev_name(hdl, NULL, spares[s]));
1041 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1046 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1047 SPA_VERSION_L2CACHE &&
1048 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1049 &l2cache, &nl2cache) == 0) {
1050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1051 "upgraded to add cache devices"));
1052 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1055 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1056 return (-1);
1057 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1059 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1060 switch (errno) {
1061 case EBUSY:
1063 * This can happen if the user has specified the same
1064 * device multiple times. We can't reliably detect this
1065 * until we try to add it and see we already have a
1066 * label.
1068 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1069 "one or more vdevs refer to the same device"));
1070 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1071 break;
1073 case EOVERFLOW:
1075 * This occurrs when one of the devices is below
1076 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1077 * device was the problem device since there's no
1078 * reliable way to determine device size from userland.
1081 char buf[64];
1083 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1085 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1086 "device is less than the minimum "
1087 "size (%s)"), buf);
1089 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1090 break;
1092 case ENOTSUP:
1093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1094 "pool must be upgraded to add these vdevs"));
1095 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1096 break;
1098 case EDOM:
1099 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1100 "root pool can not have multiple vdevs"
1101 " or separate logs"));
1102 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1103 break;
1105 case ENOTBLK:
1106 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1107 "cache device must be a disk or disk slice"));
1108 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1109 break;
1111 default:
1112 (void) zpool_standard_error(hdl, errno, msg);
1115 ret = -1;
1116 } else {
1117 ret = 0;
1120 zcmd_free_nvlists(&zc);
1122 return (ret);
1126 * Exports the pool from the system. The caller must ensure that there are no
1127 * mounted datasets in the pool.
1130 zpool_export(zpool_handle_t *zhp, boolean_t force)
1132 zfs_cmd_t zc = { 0 };
1133 char msg[1024];
1135 if (zpool_remove_zvol_links(zhp) != 0)
1136 return (-1);
1138 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1139 "cannot export '%s'"), zhp->zpool_name);
1141 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1142 zc.zc_cookie = force;
1144 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1145 switch (errno) {
1146 case EXDEV:
1147 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1148 "use '-f' to override the following errors:\n"
1149 "'%s' has an active shared spare which could be"
1150 " used by other pools once '%s' is exported."),
1151 zhp->zpool_name, zhp->zpool_name);
1152 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1153 msg));
1154 default:
1155 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1156 msg));
1160 return (0);
1164 * zpool_import() is a contracted interface. Should be kept the same
1165 * if possible.
1167 * Applications should use zpool_import_props() to import a pool with
1168 * new properties value to be set.
1171 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1172 char *altroot)
1174 nvlist_t *props = NULL;
1175 int ret;
1177 if (altroot != NULL) {
1178 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1179 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1180 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1181 newname));
1184 if (nvlist_add_string(props,
1185 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1186 nvlist_add_string(props,
1187 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1188 nvlist_free(props);
1189 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1190 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1191 newname));
1195 ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1196 if (props)
1197 nvlist_free(props);
1198 return (ret);
1202 * Import the given pool using the known configuration and a list of
1203 * properties to be set. The configuration should have come from
1204 * zpool_find_import(). The 'newname' parameters control whether the pool
1205 * is imported with a different name.
1208 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1209 nvlist_t *props, boolean_t importfaulted)
1211 zfs_cmd_t zc = { 0 };
1212 char *thename;
1213 char *origname;
1214 int ret;
1215 char errbuf[1024];
1217 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1218 &origname) == 0);
1220 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1221 "cannot import pool '%s'"), origname);
1223 if (newname != NULL) {
1224 if (!zpool_name_valid(hdl, B_FALSE, newname))
1225 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1226 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1227 newname));
1228 thename = (char *)newname;
1229 } else {
1230 thename = origname;
1233 if (props) {
1234 uint64_t version;
1236 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1237 &version) == 0);
1239 if ((props = zpool_valid_proplist(hdl, origname,
1240 props, version, B_TRUE, errbuf)) == NULL) {
1241 return (-1);
1242 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1243 nvlist_free(props);
1244 return (-1);
1248 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1250 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1251 &zc.zc_guid) == 0);
1253 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1254 nvlist_free(props);
1255 return (-1);
1258 zc.zc_cookie = (uint64_t)importfaulted;
1259 ret = 0;
1260 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1261 char desc[1024];
1262 if (newname == NULL)
1263 (void) snprintf(desc, sizeof (desc),
1264 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1265 thename);
1266 else
1267 (void) snprintf(desc, sizeof (desc),
1268 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1269 origname, thename);
1271 switch (errno) {
1272 case ENOTSUP:
1274 * Unsupported version.
1276 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1277 break;
1279 case EINVAL:
1280 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1281 break;
1283 default:
1284 (void) zpool_standard_error(hdl, errno, desc);
1287 ret = -1;
1288 } else {
1289 zpool_handle_t *zhp;
1292 * This should never fail, but play it safe anyway.
1294 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1295 ret = -1;
1296 } else if (zhp != NULL) {
1297 ret = zpool_create_zvol_links(zhp);
1298 zpool_close(zhp);
1303 zcmd_free_nvlists(&zc);
1304 nvlist_free(props);
1306 return (ret);
1310 * Scrub the pool.
1313 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1315 zfs_cmd_t zc = { 0 };
1316 char msg[1024];
1317 libzfs_handle_t *hdl = zhp->zpool_hdl;
1319 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1320 zc.zc_cookie = type;
1322 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1323 return (0);
1325 (void) snprintf(msg, sizeof (msg),
1326 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1328 if (errno == EBUSY)
1329 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1330 else
1331 return (zpool_standard_error(hdl, errno, msg));
1335 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1336 * spare; but FALSE if its an INUSE spare.
1338 static nvlist_t *
1339 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1340 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1342 uint_t c, children;
1343 nvlist_t **child;
1344 uint64_t theguid, present;
1345 char *path;
1346 uint64_t wholedisk = 0;
1347 nvlist_t *ret;
1348 uint64_t is_log;
1350 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1352 if (search == NULL &&
1353 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1355 * If the device has never been present since import, the only
1356 * reliable way to match the vdev is by GUID.
1358 if (theguid == guid)
1359 return (nv);
1360 } else if (search != NULL &&
1361 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1362 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1363 &wholedisk);
1364 if (wholedisk) {
1366 * For whole disks, the internal path has 's0', but the
1367 * path passed in by the user doesn't.
1369 if (strlen(search) == strlen(path) - 2 &&
1370 strncmp(search, path, strlen(search)) == 0)
1371 return (nv);
1372 } else if (strcmp(search, path) == 0) {
1373 return (nv);
1377 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1378 &child, &children) != 0)
1379 return (NULL);
1381 for (c = 0; c < children; c++) {
1382 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1383 avail_spare, l2cache, NULL)) != NULL) {
1385 * The 'is_log' value is only set for the toplevel
1386 * vdev, not the leaf vdevs. So we always lookup the
1387 * log device from the root of the vdev tree (where
1388 * 'log' is non-NULL).
1390 if (log != NULL &&
1391 nvlist_lookup_uint64(child[c],
1392 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1393 is_log) {
1394 *log = B_TRUE;
1396 return (ret);
1400 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1401 &child, &children) == 0) {
1402 for (c = 0; c < children; c++) {
1403 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1404 avail_spare, l2cache, NULL)) != NULL) {
1405 *avail_spare = B_TRUE;
1406 return (ret);
1411 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1412 &child, &children) == 0) {
1413 for (c = 0; c < children; c++) {
1414 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1415 avail_spare, l2cache, NULL)) != NULL) {
1416 *l2cache = B_TRUE;
1417 return (ret);
1422 return (NULL);
1425 nvlist_t *
1426 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1427 boolean_t *l2cache, boolean_t *log)
1429 char buf[MAXPATHLEN];
1430 const char *search;
1431 char *end;
1432 nvlist_t *nvroot;
1433 uint64_t guid;
1435 guid = strtoull(path, &end, 10);
1436 if (guid != 0 && *end == '\0') {
1437 search = NULL;
1438 } else if (path[0] != '/') {
1439 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1440 search = buf;
1441 } else {
1442 search = path;
1445 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1446 &nvroot) == 0);
1448 *avail_spare = B_FALSE;
1449 *l2cache = B_FALSE;
1450 if (log != NULL)
1451 *log = B_FALSE;
1452 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1453 l2cache, log));
1456 static int
1457 vdev_online(nvlist_t *nv)
1459 uint64_t ival;
1461 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1462 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1463 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1464 return (0);
1466 return (1);
1470 * Get phys_path for a root pool
1471 * Return 0 on success; non-zeron on failure.
1474 zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1476 nvlist_t *vdev_root;
1477 nvlist_t **child;
1478 uint_t count;
1479 int i;
1482 * Make sure this is a root pool, as phys_path doesn't mean
1483 * anything to a non-root pool.
1485 if (!pool_is_bootable(zhp))
1486 return (-1);
1488 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1489 ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1491 if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1492 &child, &count) != 0)
1493 return (-2);
1495 for (i = 0; i < count; i++) {
1496 nvlist_t **child2;
1497 uint_t count2;
1498 char *type;
1499 char *tmppath;
1500 int j;
1502 if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1503 != 0)
1504 return (-3);
1506 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1507 if (!vdev_online(child[i]))
1508 return (-8);
1509 verify(nvlist_lookup_string(child[i],
1510 ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1511 (void) strncpy(physpath, tmppath, strlen(tmppath));
1512 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1513 if (nvlist_lookup_nvlist_array(child[i],
1514 ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1515 return (-4);
1517 for (j = 0; j < count2; j++) {
1518 if (!vdev_online(child2[j]))
1519 return (-8);
1520 if (nvlist_lookup_string(child2[j],
1521 ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1522 return (-5);
1524 if ((strlen(physpath) + strlen(tmppath)) >
1525 MAXNAMELEN)
1526 return (-6);
1528 if (strlen(physpath) == 0) {
1529 (void) strncpy(physpath, tmppath,
1530 strlen(tmppath));
1531 } else {
1532 (void) strcat(physpath, " ");
1533 (void) strcat(physpath, tmppath);
1536 } else {
1537 return (-7);
1541 return (0);
1545 * Returns TRUE if the given guid corresponds to the given type.
1546 * This is used to check for hot spares (INUSE or not), and level 2 cache
1547 * devices.
1549 static boolean_t
1550 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1552 uint64_t target_guid;
1553 nvlist_t *nvroot;
1554 nvlist_t **list;
1555 uint_t count;
1556 int i;
1558 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1559 &nvroot) == 0);
1560 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1561 for (i = 0; i < count; i++) {
1562 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1563 &target_guid) == 0);
1564 if (guid == target_guid)
1565 return (B_TRUE);
1569 return (B_FALSE);
1573 * Bring the specified vdev online. The 'flags' parameter is a set of the
1574 * ZFS_ONLINE_* flags.
1577 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1578 vdev_state_t *newstate)
1580 zfs_cmd_t zc = { 0 };
1581 char msg[1024];
1582 nvlist_t *tgt;
1583 boolean_t avail_spare, l2cache;
1584 libzfs_handle_t *hdl = zhp->zpool_hdl;
1586 (void) snprintf(msg, sizeof (msg),
1587 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1589 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1590 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1591 NULL)) == NULL)
1592 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1594 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1596 if (avail_spare ||
1597 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1598 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1600 zc.zc_cookie = VDEV_STATE_ONLINE;
1601 zc.zc_obj = flags;
1603 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1604 return (zpool_standard_error(hdl, errno, msg));
1606 *newstate = zc.zc_cookie;
1607 return (0);
1611 * Take the specified vdev offline
1614 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1616 zfs_cmd_t zc = { 0 };
1617 char msg[1024];
1618 nvlist_t *tgt;
1619 boolean_t avail_spare, l2cache;
1620 libzfs_handle_t *hdl = zhp->zpool_hdl;
1622 (void) snprintf(msg, sizeof (msg),
1623 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1625 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1626 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1627 NULL)) == NULL)
1628 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1630 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1632 if (avail_spare ||
1633 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1634 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1636 zc.zc_cookie = VDEV_STATE_OFFLINE;
1637 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1639 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1640 return (0);
1642 switch (errno) {
1643 case EBUSY:
1646 * There are no other replicas of this device.
1648 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1650 default:
1651 return (zpool_standard_error(hdl, errno, msg));
1656 * Mark the given vdev faulted.
1659 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1661 zfs_cmd_t zc = { 0 };
1662 char msg[1024];
1663 libzfs_handle_t *hdl = zhp->zpool_hdl;
1665 (void) snprintf(msg, sizeof (msg),
1666 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1668 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1669 zc.zc_guid = guid;
1670 zc.zc_cookie = VDEV_STATE_FAULTED;
1672 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1673 return (0);
1675 switch (errno) {
1676 case EBUSY:
1679 * There are no other replicas of this device.
1681 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1683 default:
1684 return (zpool_standard_error(hdl, errno, msg));
1690 * Mark the given vdev degraded.
1693 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1695 zfs_cmd_t zc = { 0 };
1696 char msg[1024];
1697 libzfs_handle_t *hdl = zhp->zpool_hdl;
1699 (void) snprintf(msg, sizeof (msg),
1700 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1702 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1703 zc.zc_guid = guid;
1704 zc.zc_cookie = VDEV_STATE_DEGRADED;
1706 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1707 return (0);
1709 return (zpool_standard_error(hdl, errno, msg));
1713 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1714 * a hot spare.
1716 static boolean_t
1717 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1719 nvlist_t **child;
1720 uint_t c, children;
1721 char *type;
1723 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1724 &children) == 0) {
1725 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1726 &type) == 0);
1728 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1729 children == 2 && child[which] == tgt)
1730 return (B_TRUE);
1732 for (c = 0; c < children; c++)
1733 if (is_replacing_spare(child[c], tgt, which))
1734 return (B_TRUE);
1737 return (B_FALSE);
1741 * Attach new_disk (fully described by nvroot) to old_disk.
1742 * If 'replacing' is specified, the new disk will replace the old one.
1745 zpool_vdev_attach(zpool_handle_t *zhp,
1746 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1748 zfs_cmd_t zc = { 0 };
1749 char msg[1024];
1750 int ret;
1751 nvlist_t *tgt;
1752 boolean_t avail_spare, l2cache, islog;
1753 uint64_t val;
1754 char *path, *newname;
1755 nvlist_t **child;
1756 uint_t children;
1757 nvlist_t *config_root;
1758 libzfs_handle_t *hdl = zhp->zpool_hdl;
1759 boolean_t rootpool = pool_is_bootable(zhp);
1761 if (replacing)
1762 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1763 "cannot replace %s with %s"), old_disk, new_disk);
1764 else
1765 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1766 "cannot attach %s to %s"), new_disk, old_disk);
1769 * If this is a root pool, make sure that we're not attaching an
1770 * EFI labeled device.
1772 if (rootpool && pool_uses_efi(nvroot)) {
1773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1774 "EFI labeled devices are not supported on root pools."));
1775 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1778 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1779 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1780 &islog)) == 0)
1781 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1783 if (avail_spare)
1784 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1786 if (l2cache)
1787 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1789 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1790 zc.zc_cookie = replacing;
1792 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1793 &child, &children) != 0 || children != 1) {
1794 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1795 "new device must be a single disk"));
1796 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1799 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1800 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1802 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1803 return (-1);
1806 * If the target is a hot spare that has been swapped in, we can only
1807 * replace it with another hot spare.
1809 if (replacing &&
1810 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1811 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1812 NULL) == NULL || !avail_spare) &&
1813 is_replacing_spare(config_root, tgt, 1)) {
1814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1815 "can only be replaced by another hot spare"));
1816 free(newname);
1817 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1821 * If we are attempting to replace a spare, it canot be applied to an
1822 * already spared device.
1824 if (replacing &&
1825 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1826 zpool_find_vdev(zhp, newname, &avail_spare,
1827 &l2cache, NULL) != NULL && avail_spare &&
1828 is_replacing_spare(config_root, tgt, 0)) {
1829 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1830 "device has already been replaced with a spare"));
1831 free(newname);
1832 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1835 free(newname);
1837 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1838 return (-1);
1840 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1842 zcmd_free_nvlists(&zc);
1844 if (ret == 0) {
1845 if (rootpool) {
1847 * XXX - This should be removed once we can
1848 * automatically install the bootblocks on the
1849 * newly attached disk.
1851 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
1852 "be sure to invoke %s to make '%s' bootable.\n"),
1853 BOOTCMD, new_disk);
1855 return (0);
1858 switch (errno) {
1859 case ENOTSUP:
1861 * Can't attach to or replace this type of vdev.
1863 if (replacing) {
1864 if (islog)
1865 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1866 "cannot replace a log with a spare"));
1867 else
1868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1869 "cannot replace a replacing device"));
1870 } else {
1871 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1872 "can only attach to mirrors and top-level "
1873 "disks"));
1875 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1876 break;
1878 case EINVAL:
1880 * The new device must be a single disk.
1882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1883 "new device must be a single disk"));
1884 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1885 break;
1887 case EBUSY:
1888 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1889 new_disk);
1890 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1891 break;
1893 case EOVERFLOW:
1895 * The new device is too small.
1897 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1898 "device is too small"));
1899 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1900 break;
1902 case EDOM:
1904 * The new device has a different alignment requirement.
1906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1907 "devices have different sector alignment"));
1908 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1909 break;
1911 case ENAMETOOLONG:
1913 * The resulting top-level vdev spec won't fit in the label.
1915 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1916 break;
1918 default:
1919 (void) zpool_standard_error(hdl, errno, msg);
1922 return (-1);
1926 * Detach the specified device.
1929 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1931 zfs_cmd_t zc = { 0 };
1932 char msg[1024];
1933 nvlist_t *tgt;
1934 boolean_t avail_spare, l2cache;
1935 libzfs_handle_t *hdl = zhp->zpool_hdl;
1937 (void) snprintf(msg, sizeof (msg),
1938 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1940 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1941 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1942 NULL)) == 0)
1943 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1945 if (avail_spare)
1946 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1948 if (l2cache)
1949 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1951 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1953 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1954 return (0);
1956 switch (errno) {
1958 case ENOTSUP:
1960 * Can't detach from this type of vdev.
1962 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1963 "applicable to mirror and replacing vdevs"));
1964 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1965 break;
1967 case EBUSY:
1969 * There are no other replicas of this device.
1971 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1972 break;
1974 default:
1975 (void) zpool_standard_error(hdl, errno, msg);
1978 return (-1);
1982 * Remove the given device. Currently, this is supported only for hot spares
1983 * and level 2 cache devices.
1986 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1988 zfs_cmd_t zc = { 0 };
1989 char msg[1024];
1990 nvlist_t *tgt;
1991 boolean_t avail_spare, l2cache;
1992 libzfs_handle_t *hdl = zhp->zpool_hdl;
1994 (void) snprintf(msg, sizeof (msg),
1995 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1997 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1998 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1999 NULL)) == 0)
2000 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2002 if (!avail_spare && !l2cache) {
2003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2004 "only inactive hot spares or cache devices "
2005 "can be removed"));
2006 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2009 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2011 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2012 return (0);
2014 return (zpool_standard_error(hdl, errno, msg));
2018 * Clear the errors for the pool, or the particular device if specified.
2021 zpool_clear(zpool_handle_t *zhp, const char *path)
2023 zfs_cmd_t zc = { 0 };
2024 char msg[1024];
2025 nvlist_t *tgt;
2026 boolean_t avail_spare, l2cache;
2027 libzfs_handle_t *hdl = zhp->zpool_hdl;
2029 if (path)
2030 (void) snprintf(msg, sizeof (msg),
2031 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2032 path);
2033 else
2034 (void) snprintf(msg, sizeof (msg),
2035 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2036 zhp->zpool_name);
2038 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2039 if (path) {
2040 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2041 &l2cache, NULL)) == 0)
2042 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2045 * Don't allow error clearing for hot spares. Do allow
2046 * error clearing for l2cache devices.
2048 if (avail_spare)
2049 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2051 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2052 &zc.zc_guid) == 0);
2055 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2056 return (0);
2058 return (zpool_standard_error(hdl, errno, msg));
2062 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2065 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2067 zfs_cmd_t zc = { 0 };
2068 char msg[1024];
2069 libzfs_handle_t *hdl = zhp->zpool_hdl;
2071 (void) snprintf(msg, sizeof (msg),
2072 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2073 guid);
2075 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2076 zc.zc_guid = guid;
2078 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2079 return (0);
2081 return (zpool_standard_error(hdl, errno, msg));
2085 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2086 * hierarchy.
2089 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2090 void *data)
2092 libzfs_handle_t *hdl = zhp->zpool_hdl;
2093 char (*paths)[MAXPATHLEN];
2094 size_t size = 4;
2095 int curr, base, ret = 0;
2096 #ifdef PORT_NETBSD
2097 int fd;
2098 DIR *dirp;
2099 struct dirent *dp;
2100 #endif
2101 struct stat st;
2103 if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
2104 return (errno == ENOENT ? 0 : -1);
2105 #ifdef PORT_NETBSD
2106 if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
2107 int err = errno;
2108 (void) close(base);
2109 return (err == ENOENT ? 0 : -1);
2111 #endif
2113 * Oddly this wasn't a directory -- ignore that failure since we
2114 * know there are no links lower in the (non-existant) hierarchy.
2116 if (!S_ISDIR(st.st_mode)) {
2117 (void) close(base);
2118 return (0);
2121 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2122 (void) close(base);
2123 return (-1);
2126 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2127 curr = 0;
2129 #ifdef PORT_NETBSD
2130 while (curr >= 0) {
2131 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
2132 goto err;
2134 if (S_ISDIR(st.st_mode)) {
2135 if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
2136 goto err;
2138 if ((dirp = fdopendir(fd)) == NULL) {
2139 (void) close(fd);
2140 goto err;
2143 while ((dp = readdir(dirp)) != NULL) {
2144 if (dp->d_name[0] == '.')
2145 continue;
2147 if (curr + 1 == size) {
2148 paths = zfs_realloc(hdl, paths,
2149 size * sizeof (paths[0]),
2150 size * 2 * sizeof (paths[0]));
2151 if (paths == NULL) {
2152 (void) closedir(dirp);
2153 (void) close(fd);
2154 goto err;
2157 size *= 2;
2160 (void) strlcpy(paths[curr + 1], paths[curr],
2161 sizeof (paths[curr + 1]));
2162 (void) strlcat(paths[curr], "/",
2163 sizeof (paths[curr]));
2164 (void) strlcat(paths[curr], dp->d_name,
2165 sizeof (paths[curr]));
2166 curr++;
2169 (void) closedir(dirp);
2171 } else {
2172 if ((ret = cb(paths[curr], data)) != 0)
2173 break;
2176 curr--;
2178 #endif /* PORT_NETBSD */
2180 free(paths);
2181 (void) close(base);
2183 return (ret);
2185 err:
2186 free(paths);
2187 (void) close(base);
2188 return (-1);
2191 typedef struct zvol_cb {
2192 zpool_handle_t *zcb_pool;
2193 boolean_t zcb_create;
2194 } zvol_cb_t;
2196 /*ARGSUSED*/
2197 static int
2198 do_zvol_create(zfs_handle_t *zhp, void *data)
2200 int ret = 0;
2202 if (ZFS_IS_VOLUME(zhp)) {
2203 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2204 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2207 if (ret == 0)
2208 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2210 zfs_close(zhp);
2212 return (ret);
2216 * Iterate over all zvols in the pool and make any necessary minor nodes.
2219 zpool_create_zvol_links(zpool_handle_t *zhp)
2221 zfs_handle_t *zfp;
2222 int ret;
2225 * If the pool is unavailable, just return success.
2227 if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2228 zhp->zpool_name)) == NULL)
2229 return (0);
2231 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2233 zfs_close(zfp);
2234 return (ret);
2237 static int
2238 do_zvol_remove(const char *dataset, void *data)
2240 zpool_handle_t *zhp = data;
2242 return (zvol_remove_link(zhp->zpool_hdl, dataset));
2246 * Iterate over all zvols in the pool and remove any minor nodes. We iterate
2247 * by examining the /dev links so that a corrupted pool doesn't impede this
2248 * operation.
2251 zpool_remove_zvol_links(zpool_handle_t *zhp)
2253 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2257 * Convert from a devid string to a path.
2259 static char *
2260 devid_to_path(char *devid_str)
2262 ddi_devid_t devid;
2263 char *minor;
2264 char *path;
2265 devid_nmlist_t *list = NULL;
2266 int ret;
2268 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2269 return (NULL);
2271 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2273 devid_str_free(minor);
2274 devid_free(devid);
2276 if (ret != 0)
2277 return (NULL);
2279 if ((path = strdup(list[0].devname)) == NULL)
2280 return (NULL);
2282 devid_free_nmlist(list);
2284 return (path);
2288 * Convert from a path to a devid string.
2290 static char *
2291 path_to_devid(const char *path)
2293 int fd;
2294 ddi_devid_t devid;
2295 char *minor, *ret;
2297 if ((fd = open(path, O_RDONLY)) < 0)
2298 return (NULL);
2300 minor = NULL;
2301 ret = NULL;
2302 if (devid_get(fd, &devid) == 0) {
2303 if (devid_get_minor_name(fd, &minor) == 0)
2304 ret = devid_str_encode(devid, minor);
2305 if (minor != NULL)
2306 devid_str_free(minor);
2307 devid_free(devid);
2309 (void) close(fd);
2311 return (ret);
2315 * Issue the necessary ioctl() to update the stored path value for the vdev. We
2316 * ignore any failure here, since a common case is for an unprivileged user to
2317 * type 'zpool status', and we'll display the correct information anyway.
2319 static void
2320 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2322 zfs_cmd_t zc = { 0 };
2324 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2325 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2326 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2327 &zc.zc_guid) == 0);
2329 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2333 * Given a vdev, return the name to display in iostat. If the vdev has a path,
2334 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2335 * We also check if this is a whole disk, in which case we strip off the
2336 * trailing 's0' slice name.
2338 * This routine is also responsible for identifying when disks have been
2339 * reconfigured in a new location. The kernel will have opened the device by
2340 * devid, but the path will still refer to the old location. To catch this, we
2341 * first do a path -> devid translation (which is fast for the common case). If
2342 * the devid matches, we're done. If not, we do a reverse devid -> path
2343 * translation and issue the appropriate ioctl() to update the path of the vdev.
2344 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2345 * of these checks.
2347 char *
2348 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2350 char *path, *devid;
2351 uint64_t value;
2352 char buf[64];
2353 vdev_stat_t *vs;
2354 uint_t vsc;
2356 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2357 &value) == 0) {
2358 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2359 &value) == 0);
2360 (void) snprintf(buf, sizeof (buf), "%llu",
2361 (u_longlong_t)value);
2362 path = buf;
2363 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2366 * If the device is dead (faulted, offline, etc) then don't
2367 * bother opening it. Otherwise we may be forcing the user to
2368 * open a misbehaving device, which can have undesirable
2369 * effects.
2371 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2372 (uint64_t **)&vs, &vsc) != 0 ||
2373 vs->vs_state >= VDEV_STATE_DEGRADED) &&
2374 zhp != NULL &&
2375 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2377 * Determine if the current path is correct.
2379 char *newdevid = path_to_devid(path);
2381 if (newdevid == NULL ||
2382 strcmp(devid, newdevid) != 0) {
2383 char *newpath;
2385 if ((newpath = devid_to_path(devid)) != NULL) {
2387 * Update the path appropriately.
2389 set_path(zhp, nv, newpath);
2390 if (nvlist_add_string(nv,
2391 ZPOOL_CONFIG_PATH, newpath) == 0)
2392 verify(nvlist_lookup_string(nv,
2393 ZPOOL_CONFIG_PATH,
2394 &path) == 0);
2395 free(newpath);
2399 if (newdevid)
2400 devid_str_free(newdevid);
2403 if (strncmp(path, "/dev/dsk/", 9) == 0)
2404 path += 9;
2406 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2407 &value) == 0 && value) {
2408 char *tmp = zfs_strdup(hdl, path);
2409 if (tmp == NULL)
2410 return (NULL);
2411 tmp[strlen(path) - 2] = '\0';
2412 return (tmp);
2414 } else {
2415 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2418 * If it's a raidz device, we need to stick in the parity level.
2420 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2421 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2422 &value) == 0);
2423 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
2424 (u_longlong_t)value);
2425 path = buf;
2429 return (zfs_strdup(hdl, path));
2432 static int
2433 zbookmark_compare(const void *a, const void *b)
2435 return (memcmp(a, b, sizeof (zbookmark_t)));
2439 * Retrieve the persistent error log, uniquify the members, and return to the
2440 * caller.
2443 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2445 zfs_cmd_t zc = { 0 };
2446 uint64_t count;
2447 zbookmark_t *zb = NULL;
2448 int i;
2451 * Retrieve the raw error list from the kernel. If the number of errors
2452 * has increased, allocate more space and continue until we get the
2453 * entire list.
2455 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2456 &count) == 0);
2457 if (count == 0)
2458 return (0);
2459 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2460 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2461 return (-1);
2462 zc.zc_nvlist_dst_size = count;
2463 (void) strcpy(zc.zc_name, zhp->zpool_name);
2464 for (;;) {
2465 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2466 &zc) != 0) {
2467 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2468 if (errno == ENOMEM) {
2469 count = zc.zc_nvlist_dst_size;
2470 if ((zc.zc_nvlist_dst = (uintptr_t)
2471 zfs_alloc(zhp->zpool_hdl, count *
2472 sizeof (zbookmark_t))) == (uintptr_t)NULL)
2473 return (-1);
2474 } else {
2475 return (-1);
2477 } else {
2478 break;
2483 * Sort the resulting bookmarks. This is a little confusing due to the
2484 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
2485 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2486 * _not_ copied as part of the process. So we point the start of our
2487 * array appropriate and decrement the total number of elements.
2489 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2490 zc.zc_nvlist_dst_size;
2491 count -= zc.zc_nvlist_dst_size;
2493 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2495 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2498 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2500 for (i = 0; i < count; i++) {
2501 nvlist_t *nv;
2503 /* ignoring zb_blkid and zb_level for now */
2504 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2505 zb[i-1].zb_object == zb[i].zb_object)
2506 continue;
2508 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2509 goto nomem;
2510 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2511 zb[i].zb_objset) != 0) {
2512 nvlist_free(nv);
2513 goto nomem;
2515 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2516 zb[i].zb_object) != 0) {
2517 nvlist_free(nv);
2518 goto nomem;
2520 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2521 nvlist_free(nv);
2522 goto nomem;
2524 nvlist_free(nv);
2527 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2528 return (0);
2530 nomem:
2531 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2532 return (no_memory(zhp->zpool_hdl));
2536 * Upgrade a ZFS pool to the latest on-disk version.
2539 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2541 zfs_cmd_t zc = { 0 };
2542 libzfs_handle_t *hdl = zhp->zpool_hdl;
2544 (void) strcpy(zc.zc_name, zhp->zpool_name);
2545 zc.zc_cookie = new_version;
2547 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2548 return (zpool_standard_error_fmt(hdl, errno,
2549 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2550 zhp->zpool_name));
2551 return (0);
2554 void
2555 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2556 char *history_str)
2558 int i;
2560 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2561 for (i = 1; i < argc; i++) {
2562 if (strlen(history_str) + 1 + strlen(argv[i]) >
2563 HIS_MAX_RECORD_LEN)
2564 break;
2565 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2566 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2571 * Stage command history for logging.
2574 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2576 if (history_str == NULL)
2577 return (EINVAL);
2579 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2580 return (EINVAL);
2582 if (hdl->libzfs_log_str != NULL)
2583 free(hdl->libzfs_log_str);
2585 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2586 return (no_memory(hdl));
2588 return (0);
2592 * Perform ioctl to get some command history of a pool.
2594 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
2595 * logical offset of the history buffer to start reading from.
2597 * Upon return, 'off' is the next logical offset to read from and
2598 * 'len' is the actual amount of bytes read into 'buf'.
2600 static int
2601 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2603 zfs_cmd_t zc = { 0 };
2604 libzfs_handle_t *hdl = zhp->zpool_hdl;
2606 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2608 zc.zc_history = (uint64_t)(uintptr_t)buf;
2609 zc.zc_history_len = *len;
2610 zc.zc_history_offset = *off;
2612 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2613 switch (errno) {
2614 case EPERM:
2615 return (zfs_error_fmt(hdl, EZFS_PERM,
2616 dgettext(TEXT_DOMAIN,
2617 "cannot show history for pool '%s'"),
2618 zhp->zpool_name));
2619 case ENOENT:
2620 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2621 dgettext(TEXT_DOMAIN, "cannot get history for pool "
2622 "'%s'"), zhp->zpool_name));
2623 case ENOTSUP:
2624 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2625 dgettext(TEXT_DOMAIN, "cannot get history for pool "
2626 "'%s', pool must be upgraded"), zhp->zpool_name));
2627 default:
2628 return (zpool_standard_error_fmt(hdl, errno,
2629 dgettext(TEXT_DOMAIN,
2630 "cannot get history for '%s'"), zhp->zpool_name));
2634 *len = zc.zc_history_len;
2635 *off = zc.zc_history_offset;
2637 return (0);
2641 * Process the buffer of nvlists, unpacking and storing each nvlist record
2642 * into 'records'. 'leftover' is set to the number of bytes that weren't
2643 * processed as there wasn't a complete record.
2645 static int
2646 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2647 nvlist_t ***records, uint_t *numrecords)
2649 uint64_t reclen;
2650 nvlist_t *nv;
2651 int i;
2653 while (bytes_read > sizeof (reclen)) {
2655 /* get length of packed record (stored as little endian) */
2656 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2657 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2659 if (bytes_read < sizeof (reclen) + reclen)
2660 break;
2662 /* unpack record */
2663 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2664 return (ENOMEM);
2665 bytes_read -= sizeof (reclen) + reclen;
2666 buf += sizeof (reclen) + reclen;
2668 /* add record to nvlist array */
2669 (*numrecords)++;
2670 if (ISP2(*numrecords + 1)) {
2671 *records = realloc(*records,
2672 *numrecords * 2 * sizeof (nvlist_t *));
2674 (*records)[*numrecords - 1] = nv;
2677 *leftover = bytes_read;
2678 return (0);
2681 #define HIS_BUF_LEN (128*1024)
2684 * Retrieve the command history of a pool.
2687 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2689 char buf[HIS_BUF_LEN];
2690 uint64_t off = 0;
2691 nvlist_t **records = NULL;
2692 uint_t numrecords = 0;
2693 int err, i;
2695 do {
2696 uint64_t bytes_read = sizeof (buf);
2697 uint64_t leftover;
2699 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2700 break;
2702 /* if nothing else was read in, we're at EOF, just return */
2703 if (!bytes_read)
2704 break;
2706 if ((err = zpool_history_unpack(buf, bytes_read,
2707 &leftover, &records, &numrecords)) != 0)
2708 break;
2709 off -= leftover;
2711 /* CONSTCOND */
2712 } while (1);
2714 if (!err) {
2715 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2716 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2717 records, numrecords) == 0);
2719 for (i = 0; i < numrecords; i++)
2720 nvlist_free(records[i]);
2721 free(records);
2723 return (err);
2726 void
2727 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2728 char *pathname, size_t len)
2730 zfs_cmd_t zc = { 0 };
2731 boolean_t mounted = B_FALSE;
2732 char *mntpnt = NULL;
2733 char dsname[MAXNAMELEN];
2735 if (dsobj == 0) {
2736 /* special case for the MOS */
2737 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2738 return;
2741 /* get the dataset's name */
2742 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2743 zc.zc_obj = dsobj;
2744 if (ioctl(zhp->zpool_hdl->libzfs_fd,
2745 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2746 /* just write out a path of two object numbers */
2747 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2748 dsobj, obj);
2749 return;
2751 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2753 /* find out if the dataset is mounted */
2754 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2756 /* get the corrupted object's path */
2757 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2758 zc.zc_obj = obj;
2759 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2760 &zc) == 0) {
2761 if (mounted) {
2762 (void) snprintf(pathname, len, "%s%s", mntpnt,
2763 zc.zc_value);
2764 } else {
2765 (void) snprintf(pathname, len, "%s:%s",
2766 dsname, zc.zc_value);
2768 } else {
2769 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2771 free(mntpnt);
2774 #define RDISK_ROOT "/dev/rdsk"
2775 #define BACKUP_SLICE "s2"
2777 * Don't start the slice at the default block of 34; many storage
2778 * devices will use a stripe width of 128k, so start there instead.
2780 #define NEW_START_BLOCK 256
2783 * Read the EFI label from the config, if a label does not exist then
2784 * pass back the error to the caller. If the caller has passed a non-NULL
2785 * diskaddr argument then we set it to the starting address of the EFI
2786 * partition.
2788 static int
2789 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2791 char *path;
2792 int fd;
2793 char diskname[MAXPATHLEN];
2794 int err = -1;
2796 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2797 return (err);
2799 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2800 strrchr(path, '/'));
2801 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2802 struct dk_gpt *vtoc;
2804 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2805 if (sb != NULL)
2806 *sb = vtoc->efi_parts[0].p_start;
2807 efi_free(vtoc);
2809 (void) close(fd);
2811 return (err);
2815 * determine where a partition starts on a disk in the current
2816 * configuration
2818 static diskaddr_t
2819 find_start_block(nvlist_t *config)
2821 nvlist_t **child;
2822 uint_t c, children;
2823 diskaddr_t sb = MAXOFFSET_T;
2824 uint64_t wholedisk;
2826 if (nvlist_lookup_nvlist_array(config,
2827 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2828 if (nvlist_lookup_uint64(config,
2829 ZPOOL_CONFIG_WHOLE_DISK,
2830 &wholedisk) != 0 || !wholedisk) {
2831 return (MAXOFFSET_T);
2833 if (read_efi_label(config, &sb) < 0)
2834 sb = MAXOFFSET_T;
2835 return (sb);
2838 for (c = 0; c < children; c++) {
2839 sb = find_start_block(child[c]);
2840 if (sb != MAXOFFSET_T) {
2841 return (sb);
2844 return (MAXOFFSET_T);
2848 * Label an individual disk. The name provided is the short name,
2849 * stripped of any leading /dev path.
2852 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2854 char path[MAXPATHLEN];
2855 struct dk_gpt *vtoc;
2856 int fd;
2857 size_t resv = EFI_MIN_RESV_SIZE;
2858 uint64_t slice_size;
2859 diskaddr_t start_block;
2860 char errbuf[1024];
2862 /* prepare an error message just in case */
2863 (void) snprintf(errbuf, sizeof (errbuf),
2864 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2866 if (zhp) {
2867 nvlist_t *nvroot;
2869 if (pool_is_bootable(zhp)) {
2870 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2871 "EFI labeled devices are not supported on root "
2872 "pools."));
2873 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
2876 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2877 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2879 if (zhp->zpool_start_block == 0)
2880 start_block = find_start_block(nvroot);
2881 else
2882 start_block = zhp->zpool_start_block;
2883 zhp->zpool_start_block = start_block;
2884 } else {
2885 /* new pool */
2886 start_block = NEW_START_BLOCK;
2889 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2890 BACKUP_SLICE);
2892 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2894 * This shouldn't happen. We've long since verified that this
2895 * is a valid device.
2897 zfs_error_aux(hdl,
2898 dgettext(TEXT_DOMAIN, "unable to open device"));
2899 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2902 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2904 * The only way this can fail is if we run out of memory, or we
2905 * were unable to read the disk's capacity
2907 if (errno == ENOMEM)
2908 (void) no_memory(hdl);
2910 (void) close(fd);
2911 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2912 "unable to read disk capacity"), name);
2914 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2917 slice_size = vtoc->efi_last_u_lba + 1;
2918 slice_size -= EFI_MIN_RESV_SIZE;
2919 if (start_block == MAXOFFSET_T)
2920 start_block = NEW_START_BLOCK;
2921 slice_size -= start_block;
2923 vtoc->efi_parts[0].p_start = start_block;
2924 vtoc->efi_parts[0].p_size = slice_size;
2927 * Why we use V_USR: V_BACKUP confuses users, and is considered
2928 * disposable by some EFI utilities (since EFI doesn't have a backup
2929 * slice). V_UNASSIGNED is supposed to be used only for zero size
2930 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
2931 * etc. were all pretty specific. V_USR is as close to reality as we
2932 * can get, in the absence of V_OTHER.
2934 vtoc->efi_parts[0].p_tag = V_USR;
2935 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2937 vtoc->efi_parts[8].p_start = slice_size + start_block;
2938 vtoc->efi_parts[8].p_size = resv;
2939 vtoc->efi_parts[8].p_tag = V_RESERVED;
2941 if (efi_write(fd, vtoc) != 0) {
2943 * Some block drivers (like pcata) may not support EFI
2944 * GPT labels. Print out a helpful error message dir-
2945 * ecting the user to manually label the disk and give
2946 * a specific slice.
2948 (void) close(fd);
2949 efi_free(vtoc);
2951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2952 "try using fdisk(1M) and then provide a specific slice"));
2953 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2956 (void) close(fd);
2957 efi_free(vtoc);
2958 return (0);
2961 static boolean_t
2962 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2964 char *type;
2965 nvlist_t **child;
2966 uint_t children, c;
2968 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2969 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2970 strcmp(type, VDEV_TYPE_FILE) == 0 ||
2971 strcmp(type, VDEV_TYPE_LOG) == 0 ||
2972 strcmp(type, VDEV_TYPE_MISSING) == 0) {
2973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2974 "vdev type '%s' is not supported"), type);
2975 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2976 return (B_FALSE);
2978 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2979 &child, &children) == 0) {
2980 for (c = 0; c < children; c++) {
2981 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2982 return (B_FALSE);
2985 return (B_TRUE);
2989 * check if this zvol is allowable for use as a dump device; zero if
2990 * it is, > 0 if it isn't, < 0 if it isn't a zvol
2993 zvol_check_dump_config(char *arg)
2995 zpool_handle_t *zhp = NULL;
2996 nvlist_t *config, *nvroot;
2997 char *p, *volname;
2998 nvlist_t **top;
2999 uint_t toplevels;
3000 libzfs_handle_t *hdl;
3001 char errbuf[1024];
3002 char poolname[ZPOOL_MAXNAMELEN];
3003 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3004 int ret = 1;
3006 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3007 return (-1);
3010 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3011 "dump is not supported on device '%s'"), arg);
3013 if ((hdl = libzfs_init()) == NULL)
3014 return (1);
3015 libzfs_print_on_error(hdl, B_TRUE);
3017 volname = arg + pathlen;
3019 /* check the configuration of the pool */
3020 if ((p = strchr(volname, '/')) == NULL) {
3021 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3022 "malformed dataset name"));
3023 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3024 return (1);
3025 } else if (p - volname >= ZFS_MAXNAMELEN) {
3026 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3027 "dataset name is too long"));
3028 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3029 return (1);
3030 } else {
3031 (void) strncpy(poolname, volname, p - volname);
3032 poolname[p - volname] = '\0';
3035 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3037 "could not open pool '%s'"), poolname);
3038 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3039 goto out;
3041 config = zpool_get_config(zhp, NULL);
3042 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3043 &nvroot) != 0) {
3044 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3045 "could not obtain vdev configuration for '%s'"), poolname);
3046 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3047 goto out;
3050 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3051 &top, &toplevels) == 0);
3052 if (toplevels != 1) {
3053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3054 "'%s' has multiple top level vdevs"), poolname);
3055 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3056 goto out;
3059 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3060 goto out;
3062 ret = 0;
3064 out:
3065 if (zhp)
3066 zpool_close(zhp);
3067 libzfs_fini(hdl);
3068 return (ret);