Linux: zfs_zaccess_trivial() should always call generic_permission()
[zfs.git] / module / zfs / vdev.c
blob4bfd95861e02659a75c1be04b1b7ffd5a8ae0788
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
25 * Copyright 2017 Nexenta Systems, Inc.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2016 Toomas Soome <tsoome@me.com>
28 * Copyright 2017 Joyent, Inc.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2019, Datto Inc. All rights reserved.
31 * Copyright (c) 2021, Klara Inc.
32 * Copyright [2021] Hewlett Packard Enterprise Development LP
35 #include <sys/zfs_context.h>
36 #include <sys/fm/fs/zfs.h>
37 #include <sys/spa.h>
38 #include <sys/spa_impl.h>
39 #include <sys/bpobj.h>
40 #include <sys/dmu.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/vdev_rebuild.h>
45 #include <sys/vdev_draid.h>
46 #include <sys/uberblock_impl.h>
47 #include <sys/metaslab.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/space_map.h>
50 #include <sys/space_reftree.h>
51 #include <sys/zio.h>
52 #include <sys/zap.h>
53 #include <sys/fs/zfs.h>
54 #include <sys/arc.h>
55 #include <sys/zil.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/vdev_raidz.h>
58 #include <sys/abd.h>
59 #include <sys/vdev_initialize.h>
60 #include <sys/vdev_trim.h>
61 #include <sys/zvol.h>
62 #include <sys/zfs_ratelimit.h>
63 #include "zfs_prop.h"
66 * One metaslab from each (normal-class) vdev is used by the ZIL. These are
67 * called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
68 * part of the spa_embedded_log_class. The metaslab with the most free space
69 * in each vdev is selected for this purpose when the pool is opened (or a
70 * vdev is added). See vdev_metaslab_init().
72 * Log blocks can be allocated from the following locations. Each one is tried
73 * in order until the allocation succeeds:
74 * 1. dedicated log vdevs, aka "slog" (spa_log_class)
75 * 2. embedded slog metaslabs (spa_embedded_log_class)
76 * 3. other metaslabs in normal vdevs (spa_normal_class)
78 * zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
79 * than this number of metaslabs in the vdev. This ensures that we don't set
80 * aside an unreasonable amount of space for the ZIL. If set to less than
81 * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
82 * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
84 static uint_t zfs_embedded_slog_min_ms = 64;
86 /* default target for number of metaslabs per top-level vdev */
87 static uint_t zfs_vdev_default_ms_count = 200;
89 /* minimum number of metaslabs per top-level vdev */
90 static uint_t zfs_vdev_min_ms_count = 16;
92 /* practical upper limit of total metaslabs per top-level vdev */
93 static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
95 /* lower limit for metaslab size (512M) */
96 static uint_t zfs_vdev_default_ms_shift = 29;
98 /* upper limit for metaslab size (16G) */
99 static uint_t zfs_vdev_max_ms_shift = 34;
101 int vdev_validate_skip = B_FALSE;
104 * Since the DTL space map of a vdev is not expected to have a lot of
105 * entries, we default its block size to 4K.
107 int zfs_vdev_dtl_sm_blksz = (1 << 12);
110 * Rate limit slow IO (delay) events to this many per second.
112 static unsigned int zfs_slow_io_events_per_second = 20;
115 * Rate limit checksum events after this many checksum errors per second.
117 static unsigned int zfs_checksum_events_per_second = 20;
120 * Ignore errors during scrub/resilver. Allows to work around resilver
121 * upon import when there are pool errors.
123 static int zfs_scan_ignore_errors = 0;
126 * vdev-wide space maps that have lots of entries written to them at
127 * the end of each transaction can benefit from a higher I/O bandwidth
128 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
130 int zfs_vdev_standard_sm_blksz = (1 << 17);
133 * Tunable parameter for debugging or performance analysis. Setting this
134 * will cause pool corruption on power loss if a volatile out-of-order
135 * write cache is enabled.
137 int zfs_nocacheflush = 0;
140 * Maximum and minimum ashift values that can be automatically set based on
141 * vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
142 * is higher than the maximum value, it is intentionally limited here to not
143 * excessively impact pool space efficiency. Higher ashift values may still
144 * be forced by vdev logical ashift or by user via ashift property, but won't
145 * be set automatically as a performance optimization.
147 uint_t zfs_vdev_max_auto_ashift = 14;
148 uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
150 void
151 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
153 va_list adx;
154 char buf[256];
156 va_start(adx, fmt);
157 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
158 va_end(adx);
160 if (vd->vdev_path != NULL) {
161 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
162 vd->vdev_path, buf);
163 } else {
164 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
165 vd->vdev_ops->vdev_op_type,
166 (u_longlong_t)vd->vdev_id,
167 (u_longlong_t)vd->vdev_guid, buf);
171 void
172 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
174 char state[20];
176 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
177 zfs_dbgmsg("%*svdev %llu: %s", indent, "",
178 (u_longlong_t)vd->vdev_id,
179 vd->vdev_ops->vdev_op_type);
180 return;
183 switch (vd->vdev_state) {
184 case VDEV_STATE_UNKNOWN:
185 (void) snprintf(state, sizeof (state), "unknown");
186 break;
187 case VDEV_STATE_CLOSED:
188 (void) snprintf(state, sizeof (state), "closed");
189 break;
190 case VDEV_STATE_OFFLINE:
191 (void) snprintf(state, sizeof (state), "offline");
192 break;
193 case VDEV_STATE_REMOVED:
194 (void) snprintf(state, sizeof (state), "removed");
195 break;
196 case VDEV_STATE_CANT_OPEN:
197 (void) snprintf(state, sizeof (state), "can't open");
198 break;
199 case VDEV_STATE_FAULTED:
200 (void) snprintf(state, sizeof (state), "faulted");
201 break;
202 case VDEV_STATE_DEGRADED:
203 (void) snprintf(state, sizeof (state), "degraded");
204 break;
205 case VDEV_STATE_HEALTHY:
206 (void) snprintf(state, sizeof (state), "healthy");
207 break;
208 default:
209 (void) snprintf(state, sizeof (state), "<state %u>",
210 (uint_t)vd->vdev_state);
213 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
214 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
215 vd->vdev_islog ? " (log)" : "",
216 (u_longlong_t)vd->vdev_guid,
217 vd->vdev_path ? vd->vdev_path : "N/A", state);
219 for (uint64_t i = 0; i < vd->vdev_children; i++)
220 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
224 * Virtual device management.
227 static vdev_ops_t *const vdev_ops_table[] = {
228 &vdev_root_ops,
229 &vdev_raidz_ops,
230 &vdev_draid_ops,
231 &vdev_draid_spare_ops,
232 &vdev_mirror_ops,
233 &vdev_replacing_ops,
234 &vdev_spare_ops,
235 &vdev_disk_ops,
236 &vdev_file_ops,
237 &vdev_missing_ops,
238 &vdev_hole_ops,
239 &vdev_indirect_ops,
240 NULL
244 * Given a vdev type, return the appropriate ops vector.
246 static vdev_ops_t *
247 vdev_getops(const char *type)
249 vdev_ops_t *ops, *const *opspp;
251 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
252 if (strcmp(ops->vdev_op_type, type) == 0)
253 break;
255 return (ops);
259 * Given a vdev and a metaslab class, find which metaslab group we're
260 * interested in. All vdevs may belong to two different metaslab classes.
261 * Dedicated slog devices use only the primary metaslab group, rather than a
262 * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
264 metaslab_group_t *
265 vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
267 if (mc == spa_embedded_log_class(vd->vdev_spa) &&
268 vd->vdev_log_mg != NULL)
269 return (vd->vdev_log_mg);
270 else
271 return (vd->vdev_mg);
274 void
275 vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
276 range_seg64_t *physical_rs, range_seg64_t *remain_rs)
278 (void) vd, (void) remain_rs;
280 physical_rs->rs_start = logical_rs->rs_start;
281 physical_rs->rs_end = logical_rs->rs_end;
285 * Derive the enumerated allocation bias from string input.
286 * String origin is either the per-vdev zap or zpool(8).
288 static vdev_alloc_bias_t
289 vdev_derive_alloc_bias(const char *bias)
291 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
293 if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
294 alloc_bias = VDEV_BIAS_LOG;
295 else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
296 alloc_bias = VDEV_BIAS_SPECIAL;
297 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
298 alloc_bias = VDEV_BIAS_DEDUP;
300 return (alloc_bias);
304 * Default asize function: return the MAX of psize with the asize of
305 * all children. This is what's used by anything other than RAID-Z.
307 uint64_t
308 vdev_default_asize(vdev_t *vd, uint64_t psize)
310 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
311 uint64_t csize;
313 for (int c = 0; c < vd->vdev_children; c++) {
314 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
315 asize = MAX(asize, csize);
318 return (asize);
321 uint64_t
322 vdev_default_min_asize(vdev_t *vd)
324 return (vd->vdev_min_asize);
328 * Get the minimum allocatable size. We define the allocatable size as
329 * the vdev's asize rounded to the nearest metaslab. This allows us to
330 * replace or attach devices which don't have the same physical size but
331 * can still satisfy the same number of allocations.
333 uint64_t
334 vdev_get_min_asize(vdev_t *vd)
336 vdev_t *pvd = vd->vdev_parent;
339 * If our parent is NULL (inactive spare or cache) or is the root,
340 * just return our own asize.
342 if (pvd == NULL)
343 return (vd->vdev_asize);
346 * The top-level vdev just returns the allocatable size rounded
347 * to the nearest metaslab.
349 if (vd == vd->vdev_top)
350 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
352 return (pvd->vdev_ops->vdev_op_min_asize(pvd));
355 void
356 vdev_set_min_asize(vdev_t *vd)
358 vd->vdev_min_asize = vdev_get_min_asize(vd);
360 for (int c = 0; c < vd->vdev_children; c++)
361 vdev_set_min_asize(vd->vdev_child[c]);
365 * Get the minimal allocation size for the top-level vdev.
367 uint64_t
368 vdev_get_min_alloc(vdev_t *vd)
370 uint64_t min_alloc = 1ULL << vd->vdev_ashift;
372 if (vd->vdev_ops->vdev_op_min_alloc != NULL)
373 min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
375 return (min_alloc);
379 * Get the parity level for a top-level vdev.
381 uint64_t
382 vdev_get_nparity(vdev_t *vd)
384 uint64_t nparity = 0;
386 if (vd->vdev_ops->vdev_op_nparity != NULL)
387 nparity = vd->vdev_ops->vdev_op_nparity(vd);
389 return (nparity);
392 static int
393 vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
395 spa_t *spa = vd->vdev_spa;
396 objset_t *mos = spa->spa_meta_objset;
397 uint64_t objid;
398 int err;
400 if (vd->vdev_root_zap != 0) {
401 objid = vd->vdev_root_zap;
402 } else if (vd->vdev_top_zap != 0) {
403 objid = vd->vdev_top_zap;
404 } else if (vd->vdev_leaf_zap != 0) {
405 objid = vd->vdev_leaf_zap;
406 } else {
407 return (EINVAL);
410 err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
411 sizeof (uint64_t), 1, value);
413 if (err == ENOENT)
414 *value = vdev_prop_default_numeric(prop);
416 return (err);
420 * Get the number of data disks for a top-level vdev.
422 uint64_t
423 vdev_get_ndisks(vdev_t *vd)
425 uint64_t ndisks = 1;
427 if (vd->vdev_ops->vdev_op_ndisks != NULL)
428 ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
430 return (ndisks);
433 vdev_t *
434 vdev_lookup_top(spa_t *spa, uint64_t vdev)
436 vdev_t *rvd = spa->spa_root_vdev;
438 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
440 if (vdev < rvd->vdev_children) {
441 ASSERT(rvd->vdev_child[vdev] != NULL);
442 return (rvd->vdev_child[vdev]);
445 return (NULL);
448 vdev_t *
449 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
451 vdev_t *mvd;
453 if (vd->vdev_guid == guid)
454 return (vd);
456 for (int c = 0; c < vd->vdev_children; c++)
457 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
458 NULL)
459 return (mvd);
461 return (NULL);
464 static int
465 vdev_count_leaves_impl(vdev_t *vd)
467 int n = 0;
469 if (vd->vdev_ops->vdev_op_leaf)
470 return (1);
472 for (int c = 0; c < vd->vdev_children; c++)
473 n += vdev_count_leaves_impl(vd->vdev_child[c]);
475 return (n);
479 vdev_count_leaves(spa_t *spa)
481 int rc;
483 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
484 rc = vdev_count_leaves_impl(spa->spa_root_vdev);
485 spa_config_exit(spa, SCL_VDEV, FTAG);
487 return (rc);
490 void
491 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
493 size_t oldsize, newsize;
494 uint64_t id = cvd->vdev_id;
495 vdev_t **newchild;
497 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
498 ASSERT(cvd->vdev_parent == NULL);
500 cvd->vdev_parent = pvd;
502 if (pvd == NULL)
503 return;
505 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
507 oldsize = pvd->vdev_children * sizeof (vdev_t *);
508 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
509 newsize = pvd->vdev_children * sizeof (vdev_t *);
511 newchild = kmem_alloc(newsize, KM_SLEEP);
512 if (pvd->vdev_child != NULL) {
513 memcpy(newchild, pvd->vdev_child, oldsize);
514 kmem_free(pvd->vdev_child, oldsize);
517 pvd->vdev_child = newchild;
518 pvd->vdev_child[id] = cvd;
520 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
521 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
524 * Walk up all ancestors to update guid sum.
526 for (; pvd != NULL; pvd = pvd->vdev_parent)
527 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
529 if (cvd->vdev_ops->vdev_op_leaf) {
530 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
531 cvd->vdev_spa->spa_leaf_list_gen++;
535 void
536 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
538 int c;
539 uint_t id = cvd->vdev_id;
541 ASSERT(cvd->vdev_parent == pvd);
543 if (pvd == NULL)
544 return;
546 ASSERT(id < pvd->vdev_children);
547 ASSERT(pvd->vdev_child[id] == cvd);
549 pvd->vdev_child[id] = NULL;
550 cvd->vdev_parent = NULL;
552 for (c = 0; c < pvd->vdev_children; c++)
553 if (pvd->vdev_child[c])
554 break;
556 if (c == pvd->vdev_children) {
557 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
558 pvd->vdev_child = NULL;
559 pvd->vdev_children = 0;
562 if (cvd->vdev_ops->vdev_op_leaf) {
563 spa_t *spa = cvd->vdev_spa;
564 list_remove(&spa->spa_leaf_list, cvd);
565 spa->spa_leaf_list_gen++;
569 * Walk up all ancestors to update guid sum.
571 for (; pvd != NULL; pvd = pvd->vdev_parent)
572 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
576 * Remove any holes in the child array.
578 void
579 vdev_compact_children(vdev_t *pvd)
581 vdev_t **newchild, *cvd;
582 int oldc = pvd->vdev_children;
583 int newc;
585 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
587 if (oldc == 0)
588 return;
590 for (int c = newc = 0; c < oldc; c++)
591 if (pvd->vdev_child[c])
592 newc++;
594 if (newc > 0) {
595 newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
597 for (int c = newc = 0; c < oldc; c++) {
598 if ((cvd = pvd->vdev_child[c]) != NULL) {
599 newchild[newc] = cvd;
600 cvd->vdev_id = newc++;
603 } else {
604 newchild = NULL;
607 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
608 pvd->vdev_child = newchild;
609 pvd->vdev_children = newc;
613 * Allocate and minimally initialize a vdev_t.
615 vdev_t *
616 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
618 vdev_t *vd;
619 vdev_indirect_config_t *vic;
621 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
622 vic = &vd->vdev_indirect_config;
624 if (spa->spa_root_vdev == NULL) {
625 ASSERT(ops == &vdev_root_ops);
626 spa->spa_root_vdev = vd;
627 spa->spa_load_guid = spa_generate_guid(NULL);
630 if (guid == 0 && ops != &vdev_hole_ops) {
631 if (spa->spa_root_vdev == vd) {
633 * The root vdev's guid will also be the pool guid,
634 * which must be unique among all pools.
636 guid = spa_generate_guid(NULL);
637 } else {
639 * Any other vdev's guid must be unique within the pool.
641 guid = spa_generate_guid(spa);
643 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
646 vd->vdev_spa = spa;
647 vd->vdev_id = id;
648 vd->vdev_guid = guid;
649 vd->vdev_guid_sum = guid;
650 vd->vdev_ops = ops;
651 vd->vdev_state = VDEV_STATE_CLOSED;
652 vd->vdev_ishole = (ops == &vdev_hole_ops);
653 vic->vic_prev_indirect_vdev = UINT64_MAX;
655 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
656 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
657 vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
658 0, 0);
661 * Initialize rate limit structs for events. We rate limit ZIO delay
662 * and checksum events so that we don't overwhelm ZED with thousands
663 * of events when a disk is acting up.
665 zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
667 zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
669 zfs_ratelimit_init(&vd->vdev_checksum_rl,
670 &zfs_checksum_events_per_second, 1);
673 * Default Thresholds for tuning ZED
675 vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
676 vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
677 vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
678 vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
680 list_link_init(&vd->vdev_config_dirty_node);
681 list_link_init(&vd->vdev_state_dirty_node);
682 list_link_init(&vd->vdev_initialize_node);
683 list_link_init(&vd->vdev_leaf_node);
684 list_link_init(&vd->vdev_trim_node);
686 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
687 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
688 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
689 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
691 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
692 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
693 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
694 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
696 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
697 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
698 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
699 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
700 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
701 cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
702 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
704 mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
705 cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
707 for (int t = 0; t < DTL_TYPES; t++) {
708 vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
712 txg_list_create(&vd->vdev_ms_list, spa,
713 offsetof(struct metaslab, ms_txg_node));
714 txg_list_create(&vd->vdev_dtl_list, spa,
715 offsetof(struct vdev, vdev_dtl_node));
716 vd->vdev_stat.vs_timestamp = gethrtime();
717 vdev_queue_init(vd);
718 vdev_cache_init(vd);
720 return (vd);
724 * Allocate a new vdev. The 'alloctype' is used to control whether we are
725 * creating a new vdev or loading an existing one - the behavior is slightly
726 * different for each case.
729 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
730 int alloctype)
732 vdev_ops_t *ops;
733 const char *type;
734 uint64_t guid = 0, islog;
735 vdev_t *vd;
736 vdev_indirect_config_t *vic;
737 const char *tmp = NULL;
738 int rc;
739 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
740 boolean_t top_level = (parent && !parent->vdev_parent);
742 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
744 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
745 return (SET_ERROR(EINVAL));
747 if ((ops = vdev_getops(type)) == NULL)
748 return (SET_ERROR(EINVAL));
751 * If this is a load, get the vdev guid from the nvlist.
752 * Otherwise, vdev_alloc_common() will generate one for us.
754 if (alloctype == VDEV_ALLOC_LOAD) {
755 uint64_t label_id;
757 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
758 label_id != id)
759 return (SET_ERROR(EINVAL));
761 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
762 return (SET_ERROR(EINVAL));
763 } else if (alloctype == VDEV_ALLOC_SPARE) {
764 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
765 return (SET_ERROR(EINVAL));
766 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
767 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
768 return (SET_ERROR(EINVAL));
769 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
770 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
771 return (SET_ERROR(EINVAL));
775 * The first allocated vdev must be of type 'root'.
777 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
778 return (SET_ERROR(EINVAL));
781 * Determine whether we're a log vdev.
783 islog = 0;
784 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
785 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
786 return (SET_ERROR(ENOTSUP));
788 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
789 return (SET_ERROR(ENOTSUP));
791 if (top_level && alloctype == VDEV_ALLOC_ADD) {
792 const char *bias;
795 * If creating a top-level vdev, check for allocation
796 * classes input.
798 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
799 &bias) == 0) {
800 alloc_bias = vdev_derive_alloc_bias(bias);
802 /* spa_vdev_add() expects feature to be enabled */
803 if (spa->spa_load_state != SPA_LOAD_CREATE &&
804 !spa_feature_is_enabled(spa,
805 SPA_FEATURE_ALLOCATION_CLASSES)) {
806 return (SET_ERROR(ENOTSUP));
810 /* spa_vdev_add() expects feature to be enabled */
811 if (ops == &vdev_draid_ops &&
812 spa->spa_load_state != SPA_LOAD_CREATE &&
813 !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
814 return (SET_ERROR(ENOTSUP));
819 * Initialize the vdev specific data. This is done before calling
820 * vdev_alloc_common() since it may fail and this simplifies the
821 * error reporting and cleanup code paths.
823 void *tsd = NULL;
824 if (ops->vdev_op_init != NULL) {
825 rc = ops->vdev_op_init(spa, nv, &tsd);
826 if (rc != 0) {
827 return (rc);
831 vd = vdev_alloc_common(spa, id, guid, ops);
832 vd->vdev_tsd = tsd;
833 vd->vdev_islog = islog;
835 if (top_level && alloc_bias != VDEV_BIAS_NONE)
836 vd->vdev_alloc_bias = alloc_bias;
838 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
839 vd->vdev_path = spa_strdup(tmp);
842 * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
843 * fault on a vdev and want it to persist across imports (like with
844 * zpool offline -f).
846 rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
847 if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
848 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
849 vd->vdev_faulted = 1;
850 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
853 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
854 vd->vdev_devid = spa_strdup(tmp);
855 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
856 vd->vdev_physpath = spa_strdup(tmp);
858 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
859 &tmp) == 0)
860 vd->vdev_enc_sysfs_path = spa_strdup(tmp);
862 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
863 vd->vdev_fru = spa_strdup(tmp);
866 * Set the whole_disk property. If it's not specified, leave the value
867 * as -1.
869 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
870 &vd->vdev_wholedisk) != 0)
871 vd->vdev_wholedisk = -1ULL;
873 vic = &vd->vdev_indirect_config;
875 ASSERT0(vic->vic_mapping_object);
876 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
877 &vic->vic_mapping_object);
878 ASSERT0(vic->vic_births_object);
879 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
880 &vic->vic_births_object);
881 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
882 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
883 &vic->vic_prev_indirect_vdev);
886 * Look for the 'not present' flag. This will only be set if the device
887 * was not present at the time of import.
889 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
890 &vd->vdev_not_present);
893 * Get the alignment requirement.
895 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
898 * Retrieve the vdev creation time.
900 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
901 &vd->vdev_crtxg);
903 if (vd->vdev_ops == &vdev_root_ops &&
904 (alloctype == VDEV_ALLOC_LOAD ||
905 alloctype == VDEV_ALLOC_SPLIT ||
906 alloctype == VDEV_ALLOC_ROOTPOOL)) {
907 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
908 &vd->vdev_root_zap);
912 * If we're a top-level vdev, try to load the allocation parameters.
914 if (top_level &&
915 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
916 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
917 &vd->vdev_ms_array);
918 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
919 &vd->vdev_ms_shift);
920 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
921 &vd->vdev_asize);
922 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
923 &vd->vdev_noalloc);
924 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
925 &vd->vdev_removing);
926 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
927 &vd->vdev_top_zap);
928 } else {
929 ASSERT0(vd->vdev_top_zap);
932 if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
933 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
934 alloctype == VDEV_ALLOC_ADD ||
935 alloctype == VDEV_ALLOC_SPLIT ||
936 alloctype == VDEV_ALLOC_ROOTPOOL);
937 /* Note: metaslab_group_create() is now deferred */
940 if (vd->vdev_ops->vdev_op_leaf &&
941 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
942 (void) nvlist_lookup_uint64(nv,
943 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
944 } else {
945 ASSERT0(vd->vdev_leaf_zap);
949 * If we're a leaf vdev, try to load the DTL object and other state.
952 if (vd->vdev_ops->vdev_op_leaf &&
953 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
954 alloctype == VDEV_ALLOC_ROOTPOOL)) {
955 if (alloctype == VDEV_ALLOC_LOAD) {
956 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
957 &vd->vdev_dtl_object);
958 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
959 &vd->vdev_unspare);
962 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
963 uint64_t spare = 0;
965 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
966 &spare) == 0 && spare)
967 spa_spare_add(vd);
970 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
971 &vd->vdev_offline);
973 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
974 &vd->vdev_resilver_txg);
976 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
977 &vd->vdev_rebuild_txg);
979 if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
980 vdev_defer_resilver(vd);
983 * In general, when importing a pool we want to ignore the
984 * persistent fault state, as the diagnosis made on another
985 * system may not be valid in the current context. The only
986 * exception is if we forced a vdev to a persistently faulted
987 * state with 'zpool offline -f'. The persistent fault will
988 * remain across imports until cleared.
990 * Local vdevs will remain in the faulted state.
992 if (spa_load_state(spa) == SPA_LOAD_OPEN ||
993 spa_load_state(spa) == SPA_LOAD_IMPORT) {
994 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
995 &vd->vdev_faulted);
996 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
997 &vd->vdev_degraded);
998 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
999 &vd->vdev_removed);
1001 if (vd->vdev_faulted || vd->vdev_degraded) {
1002 const char *aux;
1004 vd->vdev_label_aux =
1005 VDEV_AUX_ERR_EXCEEDED;
1006 if (nvlist_lookup_string(nv,
1007 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
1008 strcmp(aux, "external") == 0)
1009 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
1010 else
1011 vd->vdev_faulted = 0ULL;
1017 * Add ourselves to the parent's list of children.
1019 vdev_add_child(parent, vd);
1021 *vdp = vd;
1023 return (0);
1026 void
1027 vdev_free(vdev_t *vd)
1029 spa_t *spa = vd->vdev_spa;
1031 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1032 ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1033 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
1034 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
1037 * Scan queues are normally destroyed at the end of a scan. If the
1038 * queue exists here, that implies the vdev is being removed while
1039 * the scan is still running.
1041 if (vd->vdev_scan_io_queue != NULL) {
1042 mutex_enter(&vd->vdev_scan_io_queue_lock);
1043 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
1044 vd->vdev_scan_io_queue = NULL;
1045 mutex_exit(&vd->vdev_scan_io_queue_lock);
1049 * vdev_free() implies closing the vdev first. This is simpler than
1050 * trying to ensure complicated semantics for all callers.
1052 vdev_close(vd);
1054 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
1055 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
1058 * Free all children.
1060 for (int c = 0; c < vd->vdev_children; c++)
1061 vdev_free(vd->vdev_child[c]);
1063 ASSERT(vd->vdev_child == NULL);
1064 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
1066 if (vd->vdev_ops->vdev_op_fini != NULL)
1067 vd->vdev_ops->vdev_op_fini(vd);
1070 * Discard allocation state.
1072 if (vd->vdev_mg != NULL) {
1073 vdev_metaslab_fini(vd);
1074 metaslab_group_destroy(vd->vdev_mg);
1075 vd->vdev_mg = NULL;
1077 if (vd->vdev_log_mg != NULL) {
1078 ASSERT0(vd->vdev_ms_count);
1079 metaslab_group_destroy(vd->vdev_log_mg);
1080 vd->vdev_log_mg = NULL;
1083 ASSERT0(vd->vdev_stat.vs_space);
1084 ASSERT0(vd->vdev_stat.vs_dspace);
1085 ASSERT0(vd->vdev_stat.vs_alloc);
1088 * Remove this vdev from its parent's child list.
1090 vdev_remove_child(vd->vdev_parent, vd);
1092 ASSERT(vd->vdev_parent == NULL);
1093 ASSERT(!list_link_active(&vd->vdev_leaf_node));
1096 * Clean up vdev structure.
1098 vdev_queue_fini(vd);
1099 vdev_cache_fini(vd);
1101 if (vd->vdev_path)
1102 spa_strfree(vd->vdev_path);
1103 if (vd->vdev_devid)
1104 spa_strfree(vd->vdev_devid);
1105 if (vd->vdev_physpath)
1106 spa_strfree(vd->vdev_physpath);
1108 if (vd->vdev_enc_sysfs_path)
1109 spa_strfree(vd->vdev_enc_sysfs_path);
1111 if (vd->vdev_fru)
1112 spa_strfree(vd->vdev_fru);
1114 if (vd->vdev_isspare)
1115 spa_spare_remove(vd);
1116 if (vd->vdev_isl2cache)
1117 spa_l2cache_remove(vd);
1119 txg_list_destroy(&vd->vdev_ms_list);
1120 txg_list_destroy(&vd->vdev_dtl_list);
1122 mutex_enter(&vd->vdev_dtl_lock);
1123 space_map_close(vd->vdev_dtl_sm);
1124 for (int t = 0; t < DTL_TYPES; t++) {
1125 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
1126 range_tree_destroy(vd->vdev_dtl[t]);
1128 mutex_exit(&vd->vdev_dtl_lock);
1130 EQUIV(vd->vdev_indirect_births != NULL,
1131 vd->vdev_indirect_mapping != NULL);
1132 if (vd->vdev_indirect_births != NULL) {
1133 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1134 vdev_indirect_births_close(vd->vdev_indirect_births);
1137 if (vd->vdev_obsolete_sm != NULL) {
1138 ASSERT(vd->vdev_removing ||
1139 vd->vdev_ops == &vdev_indirect_ops);
1140 space_map_close(vd->vdev_obsolete_sm);
1141 vd->vdev_obsolete_sm = NULL;
1143 range_tree_destroy(vd->vdev_obsolete_segments);
1144 rw_destroy(&vd->vdev_indirect_rwlock);
1145 mutex_destroy(&vd->vdev_obsolete_lock);
1147 mutex_destroy(&vd->vdev_dtl_lock);
1148 mutex_destroy(&vd->vdev_stat_lock);
1149 mutex_destroy(&vd->vdev_probe_lock);
1150 mutex_destroy(&vd->vdev_scan_io_queue_lock);
1152 mutex_destroy(&vd->vdev_initialize_lock);
1153 mutex_destroy(&vd->vdev_initialize_io_lock);
1154 cv_destroy(&vd->vdev_initialize_io_cv);
1155 cv_destroy(&vd->vdev_initialize_cv);
1157 mutex_destroy(&vd->vdev_trim_lock);
1158 mutex_destroy(&vd->vdev_autotrim_lock);
1159 mutex_destroy(&vd->vdev_trim_io_lock);
1160 cv_destroy(&vd->vdev_trim_cv);
1161 cv_destroy(&vd->vdev_autotrim_cv);
1162 cv_destroy(&vd->vdev_autotrim_kick_cv);
1163 cv_destroy(&vd->vdev_trim_io_cv);
1165 mutex_destroy(&vd->vdev_rebuild_lock);
1166 cv_destroy(&vd->vdev_rebuild_cv);
1168 zfs_ratelimit_fini(&vd->vdev_delay_rl);
1169 zfs_ratelimit_fini(&vd->vdev_deadman_rl);
1170 zfs_ratelimit_fini(&vd->vdev_checksum_rl);
1172 if (vd == spa->spa_root_vdev)
1173 spa->spa_root_vdev = NULL;
1175 kmem_free(vd, sizeof (vdev_t));
1179 * Transfer top-level vdev state from svd to tvd.
1181 static void
1182 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
1184 spa_t *spa = svd->vdev_spa;
1185 metaslab_t *msp;
1186 vdev_t *vd;
1187 int t;
1189 ASSERT(tvd == tvd->vdev_top);
1191 tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
1192 tvd->vdev_ms_array = svd->vdev_ms_array;
1193 tvd->vdev_ms_shift = svd->vdev_ms_shift;
1194 tvd->vdev_ms_count = svd->vdev_ms_count;
1195 tvd->vdev_top_zap = svd->vdev_top_zap;
1197 svd->vdev_ms_array = 0;
1198 svd->vdev_ms_shift = 0;
1199 svd->vdev_ms_count = 0;
1200 svd->vdev_top_zap = 0;
1202 if (tvd->vdev_mg)
1203 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
1204 if (tvd->vdev_log_mg)
1205 ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
1206 tvd->vdev_mg = svd->vdev_mg;
1207 tvd->vdev_log_mg = svd->vdev_log_mg;
1208 tvd->vdev_ms = svd->vdev_ms;
1210 svd->vdev_mg = NULL;
1211 svd->vdev_log_mg = NULL;
1212 svd->vdev_ms = NULL;
1214 if (tvd->vdev_mg != NULL)
1215 tvd->vdev_mg->mg_vd = tvd;
1216 if (tvd->vdev_log_mg != NULL)
1217 tvd->vdev_log_mg->mg_vd = tvd;
1219 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
1220 svd->vdev_checkpoint_sm = NULL;
1222 tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
1223 svd->vdev_alloc_bias = VDEV_BIAS_NONE;
1225 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
1226 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
1227 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
1229 svd->vdev_stat.vs_alloc = 0;
1230 svd->vdev_stat.vs_space = 0;
1231 svd->vdev_stat.vs_dspace = 0;
1234 * State which may be set on a top-level vdev that's in the
1235 * process of being removed.
1237 ASSERT0(tvd->vdev_indirect_config.vic_births_object);
1238 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
1239 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
1240 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
1241 ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
1242 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
1243 ASSERT0(tvd->vdev_noalloc);
1244 ASSERT0(tvd->vdev_removing);
1245 ASSERT0(tvd->vdev_rebuilding);
1246 tvd->vdev_noalloc = svd->vdev_noalloc;
1247 tvd->vdev_removing = svd->vdev_removing;
1248 tvd->vdev_rebuilding = svd->vdev_rebuilding;
1249 tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
1250 tvd->vdev_indirect_config = svd->vdev_indirect_config;
1251 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
1252 tvd->vdev_indirect_births = svd->vdev_indirect_births;
1253 range_tree_swap(&svd->vdev_obsolete_segments,
1254 &tvd->vdev_obsolete_segments);
1255 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
1256 svd->vdev_indirect_config.vic_mapping_object = 0;
1257 svd->vdev_indirect_config.vic_births_object = 0;
1258 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
1259 svd->vdev_indirect_mapping = NULL;
1260 svd->vdev_indirect_births = NULL;
1261 svd->vdev_obsolete_sm = NULL;
1262 svd->vdev_noalloc = 0;
1263 svd->vdev_removing = 0;
1264 svd->vdev_rebuilding = 0;
1266 for (t = 0; t < TXG_SIZE; t++) {
1267 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
1268 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
1269 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
1270 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
1271 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1272 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1275 if (list_link_active(&svd->vdev_config_dirty_node)) {
1276 vdev_config_clean(svd);
1277 vdev_config_dirty(tvd);
1280 if (list_link_active(&svd->vdev_state_dirty_node)) {
1281 vdev_state_clean(svd);
1282 vdev_state_dirty(tvd);
1285 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1286 svd->vdev_deflate_ratio = 0;
1288 tvd->vdev_islog = svd->vdev_islog;
1289 svd->vdev_islog = 0;
1291 dsl_scan_io_queue_vdev_xfer(svd, tvd);
1294 static void
1295 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1297 if (vd == NULL)
1298 return;
1300 vd->vdev_top = tvd;
1302 for (int c = 0; c < vd->vdev_children; c++)
1303 vdev_top_update(tvd, vd->vdev_child[c]);
1307 * Add a mirror/replacing vdev above an existing vdev. There is no need to
1308 * call .vdev_op_init() since mirror/replacing vdevs do not have private state.
1310 vdev_t *
1311 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1313 spa_t *spa = cvd->vdev_spa;
1314 vdev_t *pvd = cvd->vdev_parent;
1315 vdev_t *mvd;
1317 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1319 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1321 mvd->vdev_asize = cvd->vdev_asize;
1322 mvd->vdev_min_asize = cvd->vdev_min_asize;
1323 mvd->vdev_max_asize = cvd->vdev_max_asize;
1324 mvd->vdev_psize = cvd->vdev_psize;
1325 mvd->vdev_ashift = cvd->vdev_ashift;
1326 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
1327 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
1328 mvd->vdev_state = cvd->vdev_state;
1329 mvd->vdev_crtxg = cvd->vdev_crtxg;
1331 vdev_remove_child(pvd, cvd);
1332 vdev_add_child(pvd, mvd);
1333 cvd->vdev_id = mvd->vdev_children;
1334 vdev_add_child(mvd, cvd);
1335 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1337 if (mvd == mvd->vdev_top)
1338 vdev_top_transfer(cvd, mvd);
1340 return (mvd);
1344 * Remove a 1-way mirror/replacing vdev from the tree.
1346 void
1347 vdev_remove_parent(vdev_t *cvd)
1349 vdev_t *mvd = cvd->vdev_parent;
1350 vdev_t *pvd = mvd->vdev_parent;
1352 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1354 ASSERT(mvd->vdev_children == 1);
1355 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1356 mvd->vdev_ops == &vdev_replacing_ops ||
1357 mvd->vdev_ops == &vdev_spare_ops);
1358 cvd->vdev_ashift = mvd->vdev_ashift;
1359 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
1360 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
1361 vdev_remove_child(mvd, cvd);
1362 vdev_remove_child(pvd, mvd);
1365 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1366 * Otherwise, we could have detached an offline device, and when we
1367 * go to import the pool we'll think we have two top-level vdevs,
1368 * instead of a different version of the same top-level vdev.
1370 if (mvd->vdev_top == mvd) {
1371 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1372 cvd->vdev_orig_guid = cvd->vdev_guid;
1373 cvd->vdev_guid += guid_delta;
1374 cvd->vdev_guid_sum += guid_delta;
1377 * If pool not set for autoexpand, we need to also preserve
1378 * mvd's asize to prevent automatic expansion of cvd.
1379 * Otherwise if we are adjusting the mirror by attaching and
1380 * detaching children of non-uniform sizes, the mirror could
1381 * autoexpand, unexpectedly requiring larger devices to
1382 * re-establish the mirror.
1384 if (!cvd->vdev_spa->spa_autoexpand)
1385 cvd->vdev_asize = mvd->vdev_asize;
1387 cvd->vdev_id = mvd->vdev_id;
1388 vdev_add_child(pvd, cvd);
1389 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1391 if (cvd == cvd->vdev_top)
1392 vdev_top_transfer(mvd, cvd);
1394 ASSERT(mvd->vdev_children == 0);
1395 vdev_free(mvd);
1398 void
1399 vdev_metaslab_group_create(vdev_t *vd)
1401 spa_t *spa = vd->vdev_spa;
1404 * metaslab_group_create was delayed until allocation bias was available
1406 if (vd->vdev_mg == NULL) {
1407 metaslab_class_t *mc;
1409 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
1410 vd->vdev_alloc_bias = VDEV_BIAS_LOG;
1412 ASSERT3U(vd->vdev_islog, ==,
1413 (vd->vdev_alloc_bias == VDEV_BIAS_LOG));
1415 switch (vd->vdev_alloc_bias) {
1416 case VDEV_BIAS_LOG:
1417 mc = spa_log_class(spa);
1418 break;
1419 case VDEV_BIAS_SPECIAL:
1420 mc = spa_special_class(spa);
1421 break;
1422 case VDEV_BIAS_DEDUP:
1423 mc = spa_dedup_class(spa);
1424 break;
1425 default:
1426 mc = spa_normal_class(spa);
1429 vd->vdev_mg = metaslab_group_create(mc, vd,
1430 spa->spa_alloc_count);
1432 if (!vd->vdev_islog) {
1433 vd->vdev_log_mg = metaslab_group_create(
1434 spa_embedded_log_class(spa), vd, 1);
1438 * The spa ashift min/max only apply for the normal metaslab
1439 * class. Class destination is late binding so ashift boundary
1440 * setting had to wait until now.
1442 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1443 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
1444 if (vd->vdev_ashift > spa->spa_max_ashift)
1445 spa->spa_max_ashift = vd->vdev_ashift;
1446 if (vd->vdev_ashift < spa->spa_min_ashift)
1447 spa->spa_min_ashift = vd->vdev_ashift;
1449 uint64_t min_alloc = vdev_get_min_alloc(vd);
1450 if (min_alloc < spa->spa_min_alloc)
1451 spa->spa_min_alloc = min_alloc;
1457 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1459 spa_t *spa = vd->vdev_spa;
1460 uint64_t oldc = vd->vdev_ms_count;
1461 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1462 metaslab_t **mspp;
1463 int error;
1464 boolean_t expanding = (oldc != 0);
1466 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1469 * This vdev is not being allocated from yet or is a hole.
1471 if (vd->vdev_ms_shift == 0)
1472 return (0);
1474 ASSERT(!vd->vdev_ishole);
1476 ASSERT(oldc <= newc);
1478 mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1480 if (expanding) {
1481 memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
1482 vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1485 vd->vdev_ms = mspp;
1486 vd->vdev_ms_count = newc;
1488 for (uint64_t m = oldc; m < newc; m++) {
1489 uint64_t object = 0;
1491 * vdev_ms_array may be 0 if we are creating the "fake"
1492 * metaslabs for an indirect vdev for zdb's leak detection.
1493 * See zdb_leak_init().
1495 if (txg == 0 && vd->vdev_ms_array != 0) {
1496 error = dmu_read(spa->spa_meta_objset,
1497 vd->vdev_ms_array,
1498 m * sizeof (uint64_t), sizeof (uint64_t), &object,
1499 DMU_READ_PREFETCH);
1500 if (error != 0) {
1501 vdev_dbgmsg(vd, "unable to read the metaslab "
1502 "array [error=%d]", error);
1503 return (error);
1507 error = metaslab_init(vd->vdev_mg, m, object, txg,
1508 &(vd->vdev_ms[m]));
1509 if (error != 0) {
1510 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1511 error);
1512 return (error);
1517 * Find the emptiest metaslab on the vdev and mark it for use for
1518 * embedded slog by moving it from the regular to the log metaslab
1519 * group.
1521 if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
1522 vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
1523 avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
1524 uint64_t slog_msid = 0;
1525 uint64_t smallest = UINT64_MAX;
1528 * Note, we only search the new metaslabs, because the old
1529 * (pre-existing) ones may be active (e.g. have non-empty
1530 * range_tree's), and we don't move them to the new
1531 * metaslab_t.
1533 for (uint64_t m = oldc; m < newc; m++) {
1534 uint64_t alloc =
1535 space_map_allocated(vd->vdev_ms[m]->ms_sm);
1536 if (alloc < smallest) {
1537 slog_msid = m;
1538 smallest = alloc;
1541 metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
1543 * The metaslab was marked as dirty at the end of
1544 * metaslab_init(). Remove it from the dirty list so that we
1545 * can uninitialize and reinitialize it to the new class.
1547 if (txg != 0) {
1548 (void) txg_list_remove_this(&vd->vdev_ms_list,
1549 slog_ms, txg);
1551 uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
1552 metaslab_fini(slog_ms);
1553 VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
1554 &vd->vdev_ms[slog_msid]));
1557 if (txg == 0)
1558 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1561 * If the vdev is marked as non-allocating then don't
1562 * activate the metaslabs since we want to ensure that
1563 * no allocations are performed on this device.
1565 if (vd->vdev_noalloc) {
1566 /* track non-allocating vdev space */
1567 spa->spa_nonallocating_dspace += spa_deflate(spa) ?
1568 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1569 } else if (!expanding) {
1570 metaslab_group_activate(vd->vdev_mg);
1571 if (vd->vdev_log_mg != NULL)
1572 metaslab_group_activate(vd->vdev_log_mg);
1575 if (txg == 0)
1576 spa_config_exit(spa, SCL_ALLOC, FTAG);
1578 return (0);
1581 void
1582 vdev_metaslab_fini(vdev_t *vd)
1584 if (vd->vdev_checkpoint_sm != NULL) {
1585 ASSERT(spa_feature_is_active(vd->vdev_spa,
1586 SPA_FEATURE_POOL_CHECKPOINT));
1587 space_map_close(vd->vdev_checkpoint_sm);
1589 * Even though we close the space map, we need to set its
1590 * pointer to NULL. The reason is that vdev_metaslab_fini()
1591 * may be called multiple times for certain operations
1592 * (i.e. when destroying a pool) so we need to ensure that
1593 * this clause never executes twice. This logic is similar
1594 * to the one used for the vdev_ms clause below.
1596 vd->vdev_checkpoint_sm = NULL;
1599 if (vd->vdev_ms != NULL) {
1600 metaslab_group_t *mg = vd->vdev_mg;
1602 metaslab_group_passivate(mg);
1603 if (vd->vdev_log_mg != NULL) {
1604 ASSERT(!vd->vdev_islog);
1605 metaslab_group_passivate(vd->vdev_log_mg);
1608 uint64_t count = vd->vdev_ms_count;
1609 for (uint64_t m = 0; m < count; m++) {
1610 metaslab_t *msp = vd->vdev_ms[m];
1611 if (msp != NULL)
1612 metaslab_fini(msp);
1614 vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1615 vd->vdev_ms = NULL;
1616 vd->vdev_ms_count = 0;
1618 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1619 ASSERT0(mg->mg_histogram[i]);
1620 if (vd->vdev_log_mg != NULL)
1621 ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
1624 ASSERT0(vd->vdev_ms_count);
1625 ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
1628 typedef struct vdev_probe_stats {
1629 boolean_t vps_readable;
1630 boolean_t vps_writeable;
1631 int vps_flags;
1632 } vdev_probe_stats_t;
1634 static void
1635 vdev_probe_done(zio_t *zio)
1637 spa_t *spa = zio->io_spa;
1638 vdev_t *vd = zio->io_vd;
1639 vdev_probe_stats_t *vps = zio->io_private;
1641 ASSERT(vd->vdev_probe_zio != NULL);
1643 if (zio->io_type == ZIO_TYPE_READ) {
1644 if (zio->io_error == 0)
1645 vps->vps_readable = 1;
1646 if (zio->io_error == 0 && spa_writeable(spa)) {
1647 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1648 zio->io_offset, zio->io_size, zio->io_abd,
1649 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1650 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1651 } else {
1652 abd_free(zio->io_abd);
1654 } else if (zio->io_type == ZIO_TYPE_WRITE) {
1655 if (zio->io_error == 0)
1656 vps->vps_writeable = 1;
1657 abd_free(zio->io_abd);
1658 } else if (zio->io_type == ZIO_TYPE_NULL) {
1659 zio_t *pio;
1660 zio_link_t *zl;
1662 vd->vdev_cant_read |= !vps->vps_readable;
1663 vd->vdev_cant_write |= !vps->vps_writeable;
1665 if (vdev_readable(vd) &&
1666 (vdev_writeable(vd) || !spa_writeable(spa))) {
1667 zio->io_error = 0;
1668 } else {
1669 ASSERT(zio->io_error != 0);
1670 vdev_dbgmsg(vd, "failed probe");
1671 (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1672 spa, vd, NULL, NULL, 0);
1673 zio->io_error = SET_ERROR(ENXIO);
1676 mutex_enter(&vd->vdev_probe_lock);
1677 ASSERT(vd->vdev_probe_zio == zio);
1678 vd->vdev_probe_zio = NULL;
1679 mutex_exit(&vd->vdev_probe_lock);
1681 zl = NULL;
1682 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1683 if (!vdev_accessible(vd, pio))
1684 pio->io_error = SET_ERROR(ENXIO);
1686 kmem_free(vps, sizeof (*vps));
1691 * Determine whether this device is accessible.
1693 * Read and write to several known locations: the pad regions of each
1694 * vdev label but the first, which we leave alone in case it contains
1695 * a VTOC.
1697 zio_t *
1698 vdev_probe(vdev_t *vd, zio_t *zio)
1700 spa_t *spa = vd->vdev_spa;
1701 vdev_probe_stats_t *vps = NULL;
1702 zio_t *pio;
1704 ASSERT(vd->vdev_ops->vdev_op_leaf);
1707 * Don't probe the probe.
1709 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1710 return (NULL);
1713 * To prevent 'probe storms' when a device fails, we create
1714 * just one probe i/o at a time. All zios that want to probe
1715 * this vdev will become parents of the probe io.
1717 mutex_enter(&vd->vdev_probe_lock);
1719 if ((pio = vd->vdev_probe_zio) == NULL) {
1720 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1722 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1723 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
1724 ZIO_FLAG_TRYHARD;
1726 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1728 * vdev_cant_read and vdev_cant_write can only
1729 * transition from TRUE to FALSE when we have the
1730 * SCL_ZIO lock as writer; otherwise they can only
1731 * transition from FALSE to TRUE. This ensures that
1732 * any zio looking at these values can assume that
1733 * failures persist for the life of the I/O. That's
1734 * important because when a device has intermittent
1735 * connectivity problems, we want to ensure that
1736 * they're ascribed to the device (ENXIO) and not
1737 * the zio (EIO).
1739 * Since we hold SCL_ZIO as writer here, clear both
1740 * values so the probe can reevaluate from first
1741 * principles.
1743 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1744 vd->vdev_cant_read = B_FALSE;
1745 vd->vdev_cant_write = B_FALSE;
1748 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1749 vdev_probe_done, vps,
1750 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1753 * We can't change the vdev state in this context, so we
1754 * kick off an async task to do it on our behalf.
1756 if (zio != NULL) {
1757 vd->vdev_probe_wanted = B_TRUE;
1758 spa_async_request(spa, SPA_ASYNC_PROBE);
1762 if (zio != NULL)
1763 zio_add_child(zio, pio);
1765 mutex_exit(&vd->vdev_probe_lock);
1767 if (vps == NULL) {
1768 ASSERT(zio != NULL);
1769 return (NULL);
1772 for (int l = 1; l < VDEV_LABELS; l++) {
1773 zio_nowait(zio_read_phys(pio, vd,
1774 vdev_label_offset(vd->vdev_psize, l,
1775 offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
1776 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1777 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1778 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1781 if (zio == NULL)
1782 return (pio);
1784 zio_nowait(pio);
1785 return (NULL);
1788 static void
1789 vdev_load_child(void *arg)
1791 vdev_t *vd = arg;
1793 vd->vdev_load_error = vdev_load(vd);
1796 static void
1797 vdev_open_child(void *arg)
1799 vdev_t *vd = arg;
1801 vd->vdev_open_thread = curthread;
1802 vd->vdev_open_error = vdev_open(vd);
1803 vd->vdev_open_thread = NULL;
1806 static boolean_t
1807 vdev_uses_zvols(vdev_t *vd)
1809 #ifdef _KERNEL
1810 if (zvol_is_zvol(vd->vdev_path))
1811 return (B_TRUE);
1812 #endif
1814 for (int c = 0; c < vd->vdev_children; c++)
1815 if (vdev_uses_zvols(vd->vdev_child[c]))
1816 return (B_TRUE);
1818 return (B_FALSE);
1822 * Returns B_TRUE if the passed child should be opened.
1824 static boolean_t
1825 vdev_default_open_children_func(vdev_t *vd)
1827 (void) vd;
1828 return (B_TRUE);
1832 * Open the requested child vdevs. If any of the leaf vdevs are using
1833 * a ZFS volume then do the opens in a single thread. This avoids a
1834 * deadlock when the current thread is holding the spa_namespace_lock.
1836 static void
1837 vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
1839 int children = vd->vdev_children;
1841 taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
1842 children, children, TASKQ_PREPOPULATE);
1843 vd->vdev_nonrot = B_TRUE;
1845 for (int c = 0; c < children; c++) {
1846 vdev_t *cvd = vd->vdev_child[c];
1848 if (open_func(cvd) == B_FALSE)
1849 continue;
1851 if (tq == NULL || vdev_uses_zvols(vd)) {
1852 cvd->vdev_open_error = vdev_open(cvd);
1853 } else {
1854 VERIFY(taskq_dispatch(tq, vdev_open_child,
1855 cvd, TQ_SLEEP) != TASKQID_INVALID);
1858 vd->vdev_nonrot &= cvd->vdev_nonrot;
1861 if (tq != NULL) {
1862 taskq_wait(tq);
1863 taskq_destroy(tq);
1868 * Open all child vdevs.
1870 void
1871 vdev_open_children(vdev_t *vd)
1873 vdev_open_children_impl(vd, vdev_default_open_children_func);
1877 * Conditionally open a subset of child vdevs.
1879 void
1880 vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
1882 vdev_open_children_impl(vd, open_func);
1886 * Compute the raidz-deflation ratio. Note, we hard-code
1887 * in 128k (1 << 17) because it is the "typical" blocksize.
1888 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
1889 * otherwise it would inconsistently account for existing bp's.
1891 static void
1892 vdev_set_deflate_ratio(vdev_t *vd)
1894 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1895 vd->vdev_deflate_ratio = (1 << 17) /
1896 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
1901 * Choose the best of two ashifts, preferring one between logical ashift
1902 * (absolute minimum) and administrator defined maximum, otherwise take
1903 * the biggest of the two.
1905 uint64_t
1906 vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
1908 if (a > logical && a <= zfs_vdev_max_auto_ashift) {
1909 if (b <= logical || b > zfs_vdev_max_auto_ashift)
1910 return (a);
1911 else
1912 return (MAX(a, b));
1913 } else if (b <= logical || b > zfs_vdev_max_auto_ashift)
1914 return (MAX(a, b));
1915 return (b);
1919 * Maximize performance by inflating the configured ashift for top level
1920 * vdevs to be as close to the physical ashift as possible while maintaining
1921 * administrator defined limits and ensuring it doesn't go below the
1922 * logical ashift.
1924 static void
1925 vdev_ashift_optimize(vdev_t *vd)
1927 ASSERT(vd == vd->vdev_top);
1929 if (vd->vdev_ashift < vd->vdev_physical_ashift &&
1930 vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
1931 vd->vdev_ashift = MIN(
1932 MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
1933 MAX(zfs_vdev_min_auto_ashift,
1934 vd->vdev_physical_ashift));
1935 } else {
1937 * If the logical and physical ashifts are the same, then
1938 * we ensure that the top-level vdev's ashift is not smaller
1939 * than our minimum ashift value. For the unusual case
1940 * where logical ashift > physical ashift, we can't cap
1941 * the calculated ashift based on max ashift as that
1942 * would cause failures.
1943 * We still check if we need to increase it to match
1944 * the min ashift.
1946 vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
1947 vd->vdev_ashift);
1952 * Prepare a virtual device for access.
1955 vdev_open(vdev_t *vd)
1957 spa_t *spa = vd->vdev_spa;
1958 int error;
1959 uint64_t osize = 0;
1960 uint64_t max_osize = 0;
1961 uint64_t asize, max_asize, psize;
1962 uint64_t logical_ashift = 0;
1963 uint64_t physical_ashift = 0;
1965 ASSERT(vd->vdev_open_thread == curthread ||
1966 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1967 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1968 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1969 vd->vdev_state == VDEV_STATE_OFFLINE);
1971 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1972 vd->vdev_cant_read = B_FALSE;
1973 vd->vdev_cant_write = B_FALSE;
1974 vd->vdev_min_asize = vdev_get_min_asize(vd);
1977 * If this vdev is not removed, check its fault status. If it's
1978 * faulted, bail out of the open.
1980 if (!vd->vdev_removed && vd->vdev_faulted) {
1981 ASSERT(vd->vdev_children == 0);
1982 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1983 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1984 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1985 vd->vdev_label_aux);
1986 return (SET_ERROR(ENXIO));
1987 } else if (vd->vdev_offline) {
1988 ASSERT(vd->vdev_children == 0);
1989 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1990 return (SET_ERROR(ENXIO));
1993 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
1994 &logical_ashift, &physical_ashift);
1996 /* Keep the device in removed state if unplugged */
1997 if (error == ENOENT && vd->vdev_removed) {
1998 vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
1999 VDEV_AUX_NONE);
2000 return (error);
2004 * Physical volume size should never be larger than its max size, unless
2005 * the disk has shrunk while we were reading it or the device is buggy
2006 * or damaged: either way it's not safe for use, bail out of the open.
2008 if (osize > max_osize) {
2009 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2010 VDEV_AUX_OPEN_FAILED);
2011 return (SET_ERROR(ENXIO));
2015 * Reset the vdev_reopening flag so that we actually close
2016 * the vdev on error.
2018 vd->vdev_reopening = B_FALSE;
2019 if (zio_injection_enabled && error == 0)
2020 error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
2022 if (error) {
2023 if (vd->vdev_removed &&
2024 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
2025 vd->vdev_removed = B_FALSE;
2027 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
2028 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
2029 vd->vdev_stat.vs_aux);
2030 } else {
2031 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2032 vd->vdev_stat.vs_aux);
2034 return (error);
2037 vd->vdev_removed = B_FALSE;
2040 * Recheck the faulted flag now that we have confirmed that
2041 * the vdev is accessible. If we're faulted, bail.
2043 if (vd->vdev_faulted) {
2044 ASSERT(vd->vdev_children == 0);
2045 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
2046 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
2047 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2048 vd->vdev_label_aux);
2049 return (SET_ERROR(ENXIO));
2052 if (vd->vdev_degraded) {
2053 ASSERT(vd->vdev_children == 0);
2054 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2055 VDEV_AUX_ERR_EXCEEDED);
2056 } else {
2057 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
2061 * For hole or missing vdevs we just return success.
2063 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
2064 return (0);
2066 for (int c = 0; c < vd->vdev_children; c++) {
2067 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
2068 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
2069 VDEV_AUX_NONE);
2070 break;
2074 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
2075 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
2077 if (vd->vdev_children == 0) {
2078 if (osize < SPA_MINDEVSIZE) {
2079 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2080 VDEV_AUX_TOO_SMALL);
2081 return (SET_ERROR(EOVERFLOW));
2083 psize = osize;
2084 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
2085 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
2086 VDEV_LABEL_END_SIZE);
2087 } else {
2088 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
2089 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
2090 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2091 VDEV_AUX_TOO_SMALL);
2092 return (SET_ERROR(EOVERFLOW));
2094 psize = 0;
2095 asize = osize;
2096 max_asize = max_osize;
2100 * If the vdev was expanded, record this so that we can re-create the
2101 * uberblock rings in labels {2,3}, during the next sync.
2103 if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
2104 vd->vdev_copy_uberblocks = B_TRUE;
2106 vd->vdev_psize = psize;
2109 * Make sure the allocatable size hasn't shrunk too much.
2111 if (asize < vd->vdev_min_asize) {
2112 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2113 VDEV_AUX_BAD_LABEL);
2114 return (SET_ERROR(EINVAL));
2118 * We can always set the logical/physical ashift members since
2119 * their values are only used to calculate the vdev_ashift when
2120 * the device is first added to the config. These values should
2121 * not be used for anything else since they may change whenever
2122 * the device is reopened and we don't store them in the label.
2124 vd->vdev_physical_ashift =
2125 MAX(physical_ashift, vd->vdev_physical_ashift);
2126 vd->vdev_logical_ashift = MAX(logical_ashift,
2127 vd->vdev_logical_ashift);
2129 if (vd->vdev_asize == 0) {
2131 * This is the first-ever open, so use the computed values.
2132 * For compatibility, a different ashift can be requested.
2134 vd->vdev_asize = asize;
2135 vd->vdev_max_asize = max_asize;
2138 * If the vdev_ashift was not overridden at creation time,
2139 * then set it the logical ashift and optimize the ashift.
2141 if (vd->vdev_ashift == 0) {
2142 vd->vdev_ashift = vd->vdev_logical_ashift;
2144 if (vd->vdev_logical_ashift > ASHIFT_MAX) {
2145 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2146 VDEV_AUX_ASHIFT_TOO_BIG);
2147 return (SET_ERROR(EDOM));
2150 if (vd->vdev_top == vd) {
2151 vdev_ashift_optimize(vd);
2154 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
2155 vd->vdev_ashift > ASHIFT_MAX)) {
2156 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2157 VDEV_AUX_BAD_ASHIFT);
2158 return (SET_ERROR(EDOM));
2160 } else {
2162 * Make sure the alignment required hasn't increased.
2164 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
2165 vd->vdev_ops->vdev_op_leaf) {
2166 (void) zfs_ereport_post(
2167 FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
2168 spa, vd, NULL, NULL, 0);
2169 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2170 VDEV_AUX_BAD_LABEL);
2171 return (SET_ERROR(EDOM));
2173 vd->vdev_max_asize = max_asize;
2177 * If all children are healthy we update asize if either:
2178 * The asize has increased, due to a device expansion caused by dynamic
2179 * LUN growth or vdev replacement, and automatic expansion is enabled;
2180 * making the additional space available.
2182 * The asize has decreased, due to a device shrink usually caused by a
2183 * vdev replace with a smaller device. This ensures that calculations
2184 * based of max_asize and asize e.g. esize are always valid. It's safe
2185 * to do this as we've already validated that asize is greater than
2186 * vdev_min_asize.
2188 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
2189 ((asize > vd->vdev_asize &&
2190 (vd->vdev_expanding || spa->spa_autoexpand)) ||
2191 (asize < vd->vdev_asize)))
2192 vd->vdev_asize = asize;
2194 vdev_set_min_asize(vd);
2197 * Ensure we can issue some IO before declaring the
2198 * vdev open for business.
2200 if (vd->vdev_ops->vdev_op_leaf &&
2201 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
2202 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
2203 VDEV_AUX_ERR_EXCEEDED);
2204 return (error);
2208 * Track the minimum allocation size.
2210 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
2211 vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
2212 uint64_t min_alloc = vdev_get_min_alloc(vd);
2213 if (min_alloc < spa->spa_min_alloc)
2214 spa->spa_min_alloc = min_alloc;
2218 * If this is a leaf vdev, assess whether a resilver is needed.
2219 * But don't do this if we are doing a reopen for a scrub, since
2220 * this would just restart the scrub we are already doing.
2222 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
2223 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
2225 return (0);
2228 static void
2229 vdev_validate_child(void *arg)
2231 vdev_t *vd = arg;
2233 vd->vdev_validate_thread = curthread;
2234 vd->vdev_validate_error = vdev_validate(vd);
2235 vd->vdev_validate_thread = NULL;
2239 * Called once the vdevs are all opened, this routine validates the label
2240 * contents. This needs to be done before vdev_load() so that we don't
2241 * inadvertently do repair I/Os to the wrong device.
2243 * This function will only return failure if one of the vdevs indicates that it
2244 * has since been destroyed or exported. This is only possible if
2245 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
2246 * will be updated but the function will return 0.
2249 vdev_validate(vdev_t *vd)
2251 spa_t *spa = vd->vdev_spa;
2252 taskq_t *tq = NULL;
2253 nvlist_t *label;
2254 uint64_t guid = 0, aux_guid = 0, top_guid;
2255 uint64_t state;
2256 nvlist_t *nvl;
2257 uint64_t txg;
2258 int children = vd->vdev_children;
2260 if (vdev_validate_skip)
2261 return (0);
2263 if (children > 0) {
2264 tq = taskq_create("vdev_validate", children, minclsyspri,
2265 children, children, TASKQ_PREPOPULATE);
2268 for (uint64_t c = 0; c < children; c++) {
2269 vdev_t *cvd = vd->vdev_child[c];
2271 if (tq == NULL || vdev_uses_zvols(cvd)) {
2272 vdev_validate_child(cvd);
2273 } else {
2274 VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
2275 TQ_SLEEP) != TASKQID_INVALID);
2278 if (tq != NULL) {
2279 taskq_wait(tq);
2280 taskq_destroy(tq);
2282 for (int c = 0; c < children; c++) {
2283 int error = vd->vdev_child[c]->vdev_validate_error;
2285 if (error != 0)
2286 return (SET_ERROR(EBADF));
2291 * If the device has already failed, or was marked offline, don't do
2292 * any further validation. Otherwise, label I/O will fail and we will
2293 * overwrite the previous state.
2295 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
2296 return (0);
2299 * If we are performing an extreme rewind, we allow for a label that
2300 * was modified at a point after the current txg.
2301 * If config lock is not held do not check for the txg. spa_sync could
2302 * be updating the vdev's label before updating spa_last_synced_txg.
2304 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
2305 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
2306 txg = UINT64_MAX;
2307 else
2308 txg = spa_last_synced_txg(spa);
2310 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
2311 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2312 VDEV_AUX_BAD_LABEL);
2313 vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
2314 "txg %llu", (u_longlong_t)txg);
2315 return (0);
2319 * Determine if this vdev has been split off into another
2320 * pool. If so, then refuse to open it.
2322 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
2323 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
2324 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2325 VDEV_AUX_SPLIT_POOL);
2326 nvlist_free(label);
2327 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
2328 return (0);
2331 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
2332 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2333 VDEV_AUX_CORRUPT_DATA);
2334 nvlist_free(label);
2335 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2336 ZPOOL_CONFIG_POOL_GUID);
2337 return (0);
2341 * If config is not trusted then ignore the spa guid check. This is
2342 * necessary because if the machine crashed during a re-guid the new
2343 * guid might have been written to all of the vdev labels, but not the
2344 * cached config. The check will be performed again once we have the
2345 * trusted config from the MOS.
2347 if (spa->spa_trust_config && guid != spa_guid(spa)) {
2348 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2349 VDEV_AUX_CORRUPT_DATA);
2350 nvlist_free(label);
2351 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
2352 "match config (%llu != %llu)", (u_longlong_t)guid,
2353 (u_longlong_t)spa_guid(spa));
2354 return (0);
2357 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
2358 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
2359 &aux_guid) != 0)
2360 aux_guid = 0;
2362 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
2363 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2364 VDEV_AUX_CORRUPT_DATA);
2365 nvlist_free(label);
2366 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2367 ZPOOL_CONFIG_GUID);
2368 return (0);
2371 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
2372 != 0) {
2373 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2374 VDEV_AUX_CORRUPT_DATA);
2375 nvlist_free(label);
2376 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2377 ZPOOL_CONFIG_TOP_GUID);
2378 return (0);
2382 * If this vdev just became a top-level vdev because its sibling was
2383 * detached, it will have adopted the parent's vdev guid -- but the
2384 * label may or may not be on disk yet. Fortunately, either version
2385 * of the label will have the same top guid, so if we're a top-level
2386 * vdev, we can safely compare to that instead.
2387 * However, if the config comes from a cachefile that failed to update
2388 * after the detach, a top-level vdev will appear as a non top-level
2389 * vdev in the config. Also relax the constraints if we perform an
2390 * extreme rewind.
2392 * If we split this vdev off instead, then we also check the
2393 * original pool's guid. We don't want to consider the vdev
2394 * corrupt if it is partway through a split operation.
2396 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
2397 boolean_t mismatch = B_FALSE;
2398 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
2399 if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
2400 mismatch = B_TRUE;
2401 } else {
2402 if (vd->vdev_guid != top_guid &&
2403 vd->vdev_top->vdev_guid != guid)
2404 mismatch = B_TRUE;
2407 if (mismatch) {
2408 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2409 VDEV_AUX_CORRUPT_DATA);
2410 nvlist_free(label);
2411 vdev_dbgmsg(vd, "vdev_validate: config guid "
2412 "doesn't match label guid");
2413 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
2414 (u_longlong_t)vd->vdev_guid,
2415 (u_longlong_t)vd->vdev_top->vdev_guid);
2416 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
2417 "aux_guid %llu", (u_longlong_t)guid,
2418 (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
2419 return (0);
2423 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
2424 &state) != 0) {
2425 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2426 VDEV_AUX_CORRUPT_DATA);
2427 nvlist_free(label);
2428 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
2429 ZPOOL_CONFIG_POOL_STATE);
2430 return (0);
2433 nvlist_free(label);
2436 * If this is a verbatim import, no need to check the
2437 * state of the pool.
2439 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
2440 spa_load_state(spa) == SPA_LOAD_OPEN &&
2441 state != POOL_STATE_ACTIVE) {
2442 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
2443 "for spa %s", (u_longlong_t)state, spa->spa_name);
2444 return (SET_ERROR(EBADF));
2448 * If we were able to open and validate a vdev that was
2449 * previously marked permanently unavailable, clear that state
2450 * now.
2452 if (vd->vdev_not_present)
2453 vd->vdev_not_present = 0;
2455 return (0);
2458 static void
2459 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
2461 char *old, *new;
2462 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
2463 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
2464 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
2465 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
2466 dvd->vdev_path, svd->vdev_path);
2467 spa_strfree(dvd->vdev_path);
2468 dvd->vdev_path = spa_strdup(svd->vdev_path);
2470 } else if (svd->vdev_path != NULL) {
2471 dvd->vdev_path = spa_strdup(svd->vdev_path);
2472 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
2473 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
2477 * Our enclosure sysfs path may have changed between imports
2479 old = dvd->vdev_enc_sysfs_path;
2480 new = svd->vdev_enc_sysfs_path;
2481 if ((old != NULL && new == NULL) ||
2482 (old == NULL && new != NULL) ||
2483 ((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
2484 zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
2485 "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
2486 old, new);
2488 if (dvd->vdev_enc_sysfs_path)
2489 spa_strfree(dvd->vdev_enc_sysfs_path);
2491 if (svd->vdev_enc_sysfs_path) {
2492 dvd->vdev_enc_sysfs_path = spa_strdup(
2493 svd->vdev_enc_sysfs_path);
2494 } else {
2495 dvd->vdev_enc_sysfs_path = NULL;
2501 * Recursively copy vdev paths from one vdev to another. Source and destination
2502 * vdev trees must have same geometry otherwise return error. Intended to copy
2503 * paths from userland config into MOS config.
2506 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
2508 if ((svd->vdev_ops == &vdev_missing_ops) ||
2509 (svd->vdev_ishole && dvd->vdev_ishole) ||
2510 (dvd->vdev_ops == &vdev_indirect_ops))
2511 return (0);
2513 if (svd->vdev_ops != dvd->vdev_ops) {
2514 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
2515 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
2516 return (SET_ERROR(EINVAL));
2519 if (svd->vdev_guid != dvd->vdev_guid) {
2520 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
2521 "%llu)", (u_longlong_t)svd->vdev_guid,
2522 (u_longlong_t)dvd->vdev_guid);
2523 return (SET_ERROR(EINVAL));
2526 if (svd->vdev_children != dvd->vdev_children) {
2527 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
2528 "%llu != %llu", (u_longlong_t)svd->vdev_children,
2529 (u_longlong_t)dvd->vdev_children);
2530 return (SET_ERROR(EINVAL));
2533 for (uint64_t i = 0; i < svd->vdev_children; i++) {
2534 int error = vdev_copy_path_strict(svd->vdev_child[i],
2535 dvd->vdev_child[i]);
2536 if (error != 0)
2537 return (error);
2540 if (svd->vdev_ops->vdev_op_leaf)
2541 vdev_copy_path_impl(svd, dvd);
2543 return (0);
2546 static void
2547 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
2549 ASSERT(stvd->vdev_top == stvd);
2550 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
2552 for (uint64_t i = 0; i < dvd->vdev_children; i++) {
2553 vdev_copy_path_search(stvd, dvd->vdev_child[i]);
2556 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
2557 return;
2560 * The idea here is that while a vdev can shift positions within
2561 * a top vdev (when replacing, attaching mirror, etc.) it cannot
2562 * step outside of it.
2564 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
2566 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
2567 return;
2569 ASSERT(vd->vdev_ops->vdev_op_leaf);
2571 vdev_copy_path_impl(vd, dvd);
2575 * Recursively copy vdev paths from one root vdev to another. Source and
2576 * destination vdev trees may differ in geometry. For each destination leaf
2577 * vdev, search a vdev with the same guid and top vdev id in the source.
2578 * Intended to copy paths from userland config into MOS config.
2580 void
2581 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
2583 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
2584 ASSERT(srvd->vdev_ops == &vdev_root_ops);
2585 ASSERT(drvd->vdev_ops == &vdev_root_ops);
2587 for (uint64_t i = 0; i < children; i++) {
2588 vdev_copy_path_search(srvd->vdev_child[i],
2589 drvd->vdev_child[i]);
2594 * Close a virtual device.
2596 void
2597 vdev_close(vdev_t *vd)
2599 vdev_t *pvd = vd->vdev_parent;
2600 spa_t *spa __maybe_unused = vd->vdev_spa;
2602 ASSERT(vd != NULL);
2603 ASSERT(vd->vdev_open_thread == curthread ||
2604 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2607 * If our parent is reopening, then we are as well, unless we are
2608 * going offline.
2610 if (pvd != NULL && pvd->vdev_reopening)
2611 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2613 vd->vdev_ops->vdev_op_close(vd);
2615 vdev_cache_purge(vd);
2618 * We record the previous state before we close it, so that if we are
2619 * doing a reopen(), we don't generate FMA ereports if we notice that
2620 * it's still faulted.
2622 vd->vdev_prevstate = vd->vdev_state;
2624 if (vd->vdev_offline)
2625 vd->vdev_state = VDEV_STATE_OFFLINE;
2626 else
2627 vd->vdev_state = VDEV_STATE_CLOSED;
2628 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2631 void
2632 vdev_hold(vdev_t *vd)
2634 spa_t *spa = vd->vdev_spa;
2636 ASSERT(spa_is_root(spa));
2637 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2638 return;
2640 for (int c = 0; c < vd->vdev_children; c++)
2641 vdev_hold(vd->vdev_child[c]);
2643 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
2644 vd->vdev_ops->vdev_op_hold(vd);
2647 void
2648 vdev_rele(vdev_t *vd)
2650 ASSERT(spa_is_root(vd->vdev_spa));
2651 for (int c = 0; c < vd->vdev_children; c++)
2652 vdev_rele(vd->vdev_child[c]);
2654 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
2655 vd->vdev_ops->vdev_op_rele(vd);
2659 * Reopen all interior vdevs and any unopened leaves. We don't actually
2660 * reopen leaf vdevs which had previously been opened as they might deadlock
2661 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
2662 * If the leaf has never been opened then open it, as usual.
2664 void
2665 vdev_reopen(vdev_t *vd)
2667 spa_t *spa = vd->vdev_spa;
2669 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2671 /* set the reopening flag unless we're taking the vdev offline */
2672 vd->vdev_reopening = !vd->vdev_offline;
2673 vdev_close(vd);
2674 (void) vdev_open(vd);
2677 * Call vdev_validate() here to make sure we have the same device.
2678 * Otherwise, a device with an invalid label could be successfully
2679 * opened in response to vdev_reopen().
2681 if (vd->vdev_aux) {
2682 (void) vdev_validate_aux(vd);
2683 if (vdev_readable(vd) && vdev_writeable(vd) &&
2684 vd->vdev_aux == &spa->spa_l2cache) {
2686 * In case the vdev is present we should evict all ARC
2687 * buffers and pointers to log blocks and reclaim their
2688 * space before restoring its contents to L2ARC.
2690 if (l2arc_vdev_present(vd)) {
2691 l2arc_rebuild_vdev(vd, B_TRUE);
2692 } else {
2693 l2arc_add_vdev(spa, vd);
2695 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2696 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2698 } else {
2699 (void) vdev_validate(vd);
2703 * Reassess parent vdev's health.
2705 vdev_propagate_state(vd);
2709 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2711 int error;
2714 * Normally, partial opens (e.g. of a mirror) are allowed.
2715 * For a create, however, we want to fail the request if
2716 * there are any components we can't open.
2718 error = vdev_open(vd);
2720 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2721 vdev_close(vd);
2722 return (error ? error : SET_ERROR(ENXIO));
2726 * Recursively load DTLs and initialize all labels.
2728 if ((error = vdev_dtl_load(vd)) != 0 ||
2729 (error = vdev_label_init(vd, txg, isreplacing ?
2730 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2731 vdev_close(vd);
2732 return (error);
2735 return (0);
2738 void
2739 vdev_metaslab_set_size(vdev_t *vd)
2741 uint64_t asize = vd->vdev_asize;
2742 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
2743 uint64_t ms_shift;
2746 * There are two dimensions to the metaslab sizing calculation:
2747 * the size of the metaslab and the count of metaslabs per vdev.
2749 * The default values used below are a good balance between memory
2750 * usage (larger metaslab size means more memory needed for loaded
2751 * metaslabs; more metaslabs means more memory needed for the
2752 * metaslab_t structs), metaslab load time (larger metaslabs take
2753 * longer to load), and metaslab sync time (more metaslabs means
2754 * more time spent syncing all of them).
2756 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
2757 * The range of the dimensions are as follows:
2759 * 2^29 <= ms_size <= 2^34
2760 * 16 <= ms_count <= 131,072
2762 * On the lower end of vdev sizes, we aim for metaslabs sizes of
2763 * at least 512MB (2^29) to minimize fragmentation effects when
2764 * testing with smaller devices. However, the count constraint
2765 * of at least 16 metaslabs will override this minimum size goal.
2767 * On the upper end of vdev sizes, we aim for a maximum metaslab
2768 * size of 16GB. However, we will cap the total count to 2^17
2769 * metaslabs to keep our memory footprint in check and let the
2770 * metaslab size grow from there if that limit is hit.
2772 * The net effect of applying above constrains is summarized below.
2774 * vdev size metaslab count
2775 * --------------|-----------------
2776 * < 8GB ~16
2777 * 8GB - 100GB one per 512MB
2778 * 100GB - 3TB ~200
2779 * 3TB - 2PB one per 16GB
2780 * > 2PB ~131,072
2781 * --------------------------------
2783 * Finally, note that all of the above calculate the initial
2784 * number of metaslabs. Expanding a top-level vdev will result
2785 * in additional metaslabs being allocated making it possible
2786 * to exceed the zfs_vdev_ms_count_limit.
2789 if (ms_count < zfs_vdev_min_ms_count)
2790 ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
2791 else if (ms_count > zfs_vdev_default_ms_count)
2792 ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
2793 else
2794 ms_shift = zfs_vdev_default_ms_shift;
2796 if (ms_shift < SPA_MAXBLOCKSHIFT) {
2797 ms_shift = SPA_MAXBLOCKSHIFT;
2798 } else if (ms_shift > zfs_vdev_max_ms_shift) {
2799 ms_shift = zfs_vdev_max_ms_shift;
2800 /* cap the total count to constrain memory footprint */
2801 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
2802 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
2805 vd->vdev_ms_shift = ms_shift;
2806 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2809 void
2810 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2812 ASSERT(vd == vd->vdev_top);
2813 /* indirect vdevs don't have metaslabs or dtls */
2814 ASSERT(vdev_is_concrete(vd) || flags == 0);
2815 ASSERT(ISP2(flags));
2816 ASSERT(spa_writeable(vd->vdev_spa));
2818 if (flags & VDD_METASLAB)
2819 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2821 if (flags & VDD_DTL)
2822 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2824 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2827 void
2828 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2830 for (int c = 0; c < vd->vdev_children; c++)
2831 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2833 if (vd->vdev_ops->vdev_op_leaf)
2834 vdev_dirty(vd->vdev_top, flags, vd, txg);
2838 * DTLs.
2840 * A vdev's DTL (dirty time log) is the set of transaction groups for which
2841 * the vdev has less than perfect replication. There are four kinds of DTL:
2843 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2845 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2847 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2848 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2849 * txgs that was scrubbed.
2851 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2852 * persistent errors or just some device being offline.
2853 * Unlike the other three, the DTL_OUTAGE map is not generally
2854 * maintained; it's only computed when needed, typically to
2855 * determine whether a device can be detached.
2857 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2858 * either has the data or it doesn't.
2860 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2861 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2862 * if any child is less than fully replicated, then so is its parent.
2863 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2864 * comprising only those txgs which appear in 'maxfaults' or more children;
2865 * those are the txgs we don't have enough replication to read. For example,
2866 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2867 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2868 * two child DTL_MISSING maps.
2870 * It should be clear from the above that to compute the DTLs and outage maps
2871 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2872 * Therefore, that is all we keep on disk. When loading the pool, or after
2873 * a configuration change, we generate all other DTLs from first principles.
2875 void
2876 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2878 range_tree_t *rt = vd->vdev_dtl[t];
2880 ASSERT(t < DTL_TYPES);
2881 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2882 ASSERT(spa_writeable(vd->vdev_spa));
2884 mutex_enter(&vd->vdev_dtl_lock);
2885 if (!range_tree_contains(rt, txg, size))
2886 range_tree_add(rt, txg, size);
2887 mutex_exit(&vd->vdev_dtl_lock);
2890 boolean_t
2891 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2893 range_tree_t *rt = vd->vdev_dtl[t];
2894 boolean_t dirty = B_FALSE;
2896 ASSERT(t < DTL_TYPES);
2897 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2900 * While we are loading the pool, the DTLs have not been loaded yet.
2901 * This isn't a problem but it can result in devices being tried
2902 * which are known to not have the data. In which case, the import
2903 * is relying on the checksum to ensure that we get the right data.
2904 * Note that while importing we are only reading the MOS, which is
2905 * always checksummed.
2907 mutex_enter(&vd->vdev_dtl_lock);
2908 if (!range_tree_is_empty(rt))
2909 dirty = range_tree_contains(rt, txg, size);
2910 mutex_exit(&vd->vdev_dtl_lock);
2912 return (dirty);
2915 boolean_t
2916 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2918 range_tree_t *rt = vd->vdev_dtl[t];
2919 boolean_t empty;
2921 mutex_enter(&vd->vdev_dtl_lock);
2922 empty = range_tree_is_empty(rt);
2923 mutex_exit(&vd->vdev_dtl_lock);
2925 return (empty);
2929 * Check if the txg falls within the range which must be
2930 * resilvered. DVAs outside this range can always be skipped.
2932 boolean_t
2933 vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
2934 uint64_t phys_birth)
2936 (void) dva, (void) psize;
2938 /* Set by sequential resilver. */
2939 if (phys_birth == TXG_UNKNOWN)
2940 return (B_TRUE);
2942 return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
2946 * Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
2948 boolean_t
2949 vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
2950 uint64_t phys_birth)
2952 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2954 if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
2955 vd->vdev_ops->vdev_op_leaf)
2956 return (B_TRUE);
2958 return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
2959 phys_birth));
2963 * Returns the lowest txg in the DTL range.
2965 static uint64_t
2966 vdev_dtl_min(vdev_t *vd)
2968 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2969 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2970 ASSERT0(vd->vdev_children);
2972 return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
2976 * Returns the highest txg in the DTL.
2978 static uint64_t
2979 vdev_dtl_max(vdev_t *vd)
2981 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2982 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2983 ASSERT0(vd->vdev_children);
2985 return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
2989 * Determine if a resilvering vdev should remove any DTL entries from
2990 * its range. If the vdev was resilvering for the entire duration of the
2991 * scan then it should excise that range from its DTLs. Otherwise, this
2992 * vdev is considered partially resilvered and should leave its DTL
2993 * entries intact. The comment in vdev_dtl_reassess() describes how we
2994 * excise the DTLs.
2996 static boolean_t
2997 vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
2999 ASSERT0(vd->vdev_children);
3001 if (vd->vdev_state < VDEV_STATE_DEGRADED)
3002 return (B_FALSE);
3004 if (vd->vdev_resilver_deferred)
3005 return (B_FALSE);
3007 if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
3008 return (B_TRUE);
3010 if (rebuild_done) {
3011 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3012 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
3014 /* Rebuild not initiated by attach */
3015 if (vd->vdev_rebuild_txg == 0)
3016 return (B_TRUE);
3019 * When a rebuild completes without error then all missing data
3020 * up to the rebuild max txg has been reconstructed and the DTL
3021 * is eligible for excision.
3023 if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
3024 vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
3025 ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
3026 ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
3027 ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
3028 return (B_TRUE);
3030 } else {
3031 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
3032 dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
3034 /* Resilver not initiated by attach */
3035 if (vd->vdev_resilver_txg == 0)
3036 return (B_TRUE);
3039 * When a resilver is initiated the scan will assign the
3040 * scn_max_txg value to the highest txg value that exists
3041 * in all DTLs. If this device's max DTL is not part of this
3042 * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
3043 * then it is not eligible for excision.
3045 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
3046 ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
3047 ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
3048 ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
3049 return (B_TRUE);
3053 return (B_FALSE);
3057 * Reassess DTLs after a config change or scrub completion. If txg == 0 no
3058 * write operations will be issued to the pool.
3060 void
3061 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
3062 boolean_t scrub_done, boolean_t rebuild_done)
3064 spa_t *spa = vd->vdev_spa;
3065 avl_tree_t reftree;
3066 int minref;
3068 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3070 for (int c = 0; c < vd->vdev_children; c++)
3071 vdev_dtl_reassess(vd->vdev_child[c], txg,
3072 scrub_txg, scrub_done, rebuild_done);
3074 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
3075 return;
3077 if (vd->vdev_ops->vdev_op_leaf) {
3078 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
3079 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
3080 boolean_t check_excise = B_FALSE;
3081 boolean_t wasempty = B_TRUE;
3083 mutex_enter(&vd->vdev_dtl_lock);
3086 * If requested, pretend the scan or rebuild completed cleanly.
3088 if (zfs_scan_ignore_errors) {
3089 if (scn != NULL)
3090 scn->scn_phys.scn_errors = 0;
3091 if (vr != NULL)
3092 vr->vr_rebuild_phys.vrp_errors = 0;
3095 if (scrub_txg != 0 &&
3096 !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3097 wasempty = B_FALSE;
3098 zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
3099 "dtl:%llu/%llu errors:%llu",
3100 (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
3101 (u_longlong_t)scrub_txg, spa->spa_scrub_started,
3102 (u_longlong_t)vdev_dtl_min(vd),
3103 (u_longlong_t)vdev_dtl_max(vd),
3104 (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
3108 * If we've completed a scrub/resilver or a rebuild cleanly
3109 * then determine if this vdev should remove any DTLs. We
3110 * only want to excise regions on vdevs that were available
3111 * during the entire duration of this scan.
3113 if (rebuild_done &&
3114 vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
3115 check_excise = B_TRUE;
3116 } else {
3117 if (spa->spa_scrub_started ||
3118 (scn != NULL && scn->scn_phys.scn_errors == 0)) {
3119 check_excise = B_TRUE;
3123 if (scrub_txg && check_excise &&
3124 vdev_dtl_should_excise(vd, rebuild_done)) {
3126 * We completed a scrub, resilver or rebuild up to
3127 * scrub_txg. If we did it without rebooting, then
3128 * the scrub dtl will be valid, so excise the old
3129 * region and fold in the scrub dtl. Otherwise,
3130 * leave the dtl as-is if there was an error.
3132 * There's little trick here: to excise the beginning
3133 * of the DTL_MISSING map, we put it into a reference
3134 * tree and then add a segment with refcnt -1 that
3135 * covers the range [0, scrub_txg). This means
3136 * that each txg in that range has refcnt -1 or 0.
3137 * We then add DTL_SCRUB with a refcnt of 2, so that
3138 * entries in the range [0, scrub_txg) will have a
3139 * positive refcnt -- either 1 or 2. We then convert
3140 * the reference tree into the new DTL_MISSING map.
3142 space_reftree_create(&reftree);
3143 space_reftree_add_map(&reftree,
3144 vd->vdev_dtl[DTL_MISSING], 1);
3145 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
3146 space_reftree_add_map(&reftree,
3147 vd->vdev_dtl[DTL_SCRUB], 2);
3148 space_reftree_generate_map(&reftree,
3149 vd->vdev_dtl[DTL_MISSING], 1);
3150 space_reftree_destroy(&reftree);
3152 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
3153 zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
3154 (u_longlong_t)vdev_dtl_min(vd),
3155 (u_longlong_t)vdev_dtl_max(vd));
3156 } else if (!wasempty) {
3157 zfs_dbgmsg("DTL_MISSING is now empty");
3160 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
3161 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3162 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
3163 if (scrub_done)
3164 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
3165 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
3166 if (!vdev_readable(vd))
3167 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
3168 else
3169 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
3170 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
3173 * If the vdev was resilvering or rebuilding and no longer
3174 * has any DTLs then reset the appropriate flag and dirty
3175 * the top level so that we persist the change.
3177 if (txg != 0 &&
3178 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3179 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
3180 if (vd->vdev_rebuild_txg != 0) {
3181 vd->vdev_rebuild_txg = 0;
3182 vdev_config_dirty(vd->vdev_top);
3183 } else if (vd->vdev_resilver_txg != 0) {
3184 vd->vdev_resilver_txg = 0;
3185 vdev_config_dirty(vd->vdev_top);
3189 mutex_exit(&vd->vdev_dtl_lock);
3191 if (txg != 0)
3192 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
3193 return;
3196 mutex_enter(&vd->vdev_dtl_lock);
3197 for (int t = 0; t < DTL_TYPES; t++) {
3198 /* account for child's outage in parent's missing map */
3199 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
3200 if (t == DTL_SCRUB)
3201 continue; /* leaf vdevs only */
3202 if (t == DTL_PARTIAL)
3203 minref = 1; /* i.e. non-zero */
3204 else if (vdev_get_nparity(vd) != 0)
3205 minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
3206 else
3207 minref = vd->vdev_children; /* any kind of mirror */
3208 space_reftree_create(&reftree);
3209 for (int c = 0; c < vd->vdev_children; c++) {
3210 vdev_t *cvd = vd->vdev_child[c];
3211 mutex_enter(&cvd->vdev_dtl_lock);
3212 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
3213 mutex_exit(&cvd->vdev_dtl_lock);
3215 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
3216 space_reftree_destroy(&reftree);
3218 mutex_exit(&vd->vdev_dtl_lock);
3222 * Iterate over all the vdevs except spare, and post kobj events
3224 void
3225 vdev_post_kobj_evt(vdev_t *vd)
3227 if (vd->vdev_ops->vdev_op_kobj_evt_post &&
3228 vd->vdev_kobj_flag == B_FALSE) {
3229 vd->vdev_kobj_flag = B_TRUE;
3230 vd->vdev_ops->vdev_op_kobj_evt_post(vd);
3233 for (int c = 0; c < vd->vdev_children; c++)
3234 vdev_post_kobj_evt(vd->vdev_child[c]);
3238 * Iterate over all the vdevs except spare, and clear kobj events
3240 void
3241 vdev_clear_kobj_evt(vdev_t *vd)
3243 vd->vdev_kobj_flag = B_FALSE;
3245 for (int c = 0; c < vd->vdev_children; c++)
3246 vdev_clear_kobj_evt(vd->vdev_child[c]);
3250 vdev_dtl_load(vdev_t *vd)
3252 spa_t *spa = vd->vdev_spa;
3253 objset_t *mos = spa->spa_meta_objset;
3254 range_tree_t *rt;
3255 int error = 0;
3257 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
3258 ASSERT(vdev_is_concrete(vd));
3261 * If the dtl cannot be sync'd there is no need to open it.
3263 if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
3264 return (0);
3266 error = space_map_open(&vd->vdev_dtl_sm, mos,
3267 vd->vdev_dtl_object, 0, -1ULL, 0);
3268 if (error)
3269 return (error);
3270 ASSERT(vd->vdev_dtl_sm != NULL);
3272 rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3273 error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
3274 if (error == 0) {
3275 mutex_enter(&vd->vdev_dtl_lock);
3276 range_tree_walk(rt, range_tree_add,
3277 vd->vdev_dtl[DTL_MISSING]);
3278 mutex_exit(&vd->vdev_dtl_lock);
3281 range_tree_vacate(rt, NULL, NULL);
3282 range_tree_destroy(rt);
3284 return (error);
3287 for (int c = 0; c < vd->vdev_children; c++) {
3288 error = vdev_dtl_load(vd->vdev_child[c]);
3289 if (error != 0)
3290 break;
3293 return (error);
3296 static void
3297 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
3299 spa_t *spa = vd->vdev_spa;
3300 objset_t *mos = spa->spa_meta_objset;
3301 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
3302 const char *string;
3304 ASSERT(alloc_bias != VDEV_BIAS_NONE);
3306 string =
3307 (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
3308 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
3309 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
3311 ASSERT(string != NULL);
3312 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
3313 1, strlen(string) + 1, string, tx));
3315 if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
3316 spa_activate_allocation_classes(spa, tx);
3320 void
3321 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
3323 spa_t *spa = vd->vdev_spa;
3325 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
3326 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3327 zapobj, tx));
3330 uint64_t
3331 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
3333 spa_t *spa = vd->vdev_spa;
3334 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
3335 DMU_OT_NONE, 0, tx);
3337 ASSERT(zap != 0);
3338 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3339 zap, tx));
3341 return (zap);
3344 void
3345 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
3347 if (vd->vdev_ops != &vdev_hole_ops &&
3348 vd->vdev_ops != &vdev_missing_ops &&
3349 vd->vdev_ops != &vdev_root_ops &&
3350 !vd->vdev_top->vdev_removing) {
3351 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
3352 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
3354 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
3355 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
3356 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
3357 vdev_zap_allocation_data(vd, tx);
3360 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
3361 spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
3362 if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
3363 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
3364 vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
3367 for (uint64_t i = 0; i < vd->vdev_children; i++) {
3368 vdev_construct_zaps(vd->vdev_child[i], tx);
3372 static void
3373 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
3375 spa_t *spa = vd->vdev_spa;
3376 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
3377 objset_t *mos = spa->spa_meta_objset;
3378 range_tree_t *rtsync;
3379 dmu_tx_t *tx;
3380 uint64_t object = space_map_object(vd->vdev_dtl_sm);
3382 ASSERT(vdev_is_concrete(vd));
3383 ASSERT(vd->vdev_ops->vdev_op_leaf);
3385 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3387 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
3388 mutex_enter(&vd->vdev_dtl_lock);
3389 space_map_free(vd->vdev_dtl_sm, tx);
3390 space_map_close(vd->vdev_dtl_sm);
3391 vd->vdev_dtl_sm = NULL;
3392 mutex_exit(&vd->vdev_dtl_lock);
3395 * We only destroy the leaf ZAP for detached leaves or for
3396 * removed log devices. Removed data devices handle leaf ZAP
3397 * cleanup later, once cancellation is no longer possible.
3399 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
3400 vd->vdev_top->vdev_islog)) {
3401 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
3402 vd->vdev_leaf_zap = 0;
3405 dmu_tx_commit(tx);
3406 return;
3409 if (vd->vdev_dtl_sm == NULL) {
3410 uint64_t new_object;
3412 new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
3413 VERIFY3U(new_object, !=, 0);
3415 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
3416 0, -1ULL, 0));
3417 ASSERT(vd->vdev_dtl_sm != NULL);
3420 rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
3422 mutex_enter(&vd->vdev_dtl_lock);
3423 range_tree_walk(rt, range_tree_add, rtsync);
3424 mutex_exit(&vd->vdev_dtl_lock);
3426 space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
3427 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
3428 range_tree_vacate(rtsync, NULL, NULL);
3430 range_tree_destroy(rtsync);
3433 * If the object for the space map has changed then dirty
3434 * the top level so that we update the config.
3436 if (object != space_map_object(vd->vdev_dtl_sm)) {
3437 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
3438 "new object %llu", (u_longlong_t)txg, spa_name(spa),
3439 (u_longlong_t)object,
3440 (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
3441 vdev_config_dirty(vd->vdev_top);
3444 dmu_tx_commit(tx);
3448 * Determine whether the specified vdev can be offlined/detached/removed
3449 * without losing data.
3451 boolean_t
3452 vdev_dtl_required(vdev_t *vd)
3454 spa_t *spa = vd->vdev_spa;
3455 vdev_t *tvd = vd->vdev_top;
3456 uint8_t cant_read = vd->vdev_cant_read;
3457 boolean_t required;
3459 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3461 if (vd == spa->spa_root_vdev || vd == tvd)
3462 return (B_TRUE);
3465 * Temporarily mark the device as unreadable, and then determine
3466 * whether this results in any DTL outages in the top-level vdev.
3467 * If not, we can safely offline/detach/remove the device.
3469 vd->vdev_cant_read = B_TRUE;
3470 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3471 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
3472 vd->vdev_cant_read = cant_read;
3473 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
3475 if (!required && zio_injection_enabled) {
3476 required = !!zio_handle_device_injection(vd, NULL,
3477 SET_ERROR(ECHILD));
3480 return (required);
3484 * Determine if resilver is needed, and if so the txg range.
3486 boolean_t
3487 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
3489 boolean_t needed = B_FALSE;
3490 uint64_t thismin = UINT64_MAX;
3491 uint64_t thismax = 0;
3493 if (vd->vdev_children == 0) {
3494 mutex_enter(&vd->vdev_dtl_lock);
3495 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
3496 vdev_writeable(vd)) {
3498 thismin = vdev_dtl_min(vd);
3499 thismax = vdev_dtl_max(vd);
3500 needed = B_TRUE;
3502 mutex_exit(&vd->vdev_dtl_lock);
3503 } else {
3504 for (int c = 0; c < vd->vdev_children; c++) {
3505 vdev_t *cvd = vd->vdev_child[c];
3506 uint64_t cmin, cmax;
3508 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
3509 thismin = MIN(thismin, cmin);
3510 thismax = MAX(thismax, cmax);
3511 needed = B_TRUE;
3516 if (needed && minp) {
3517 *minp = thismin;
3518 *maxp = thismax;
3520 return (needed);
3524 * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
3525 * will contain either the checkpoint spacemap object or zero if none exists.
3526 * All other errors are returned to the caller.
3529 vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
3531 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
3533 if (vd->vdev_top_zap == 0) {
3534 *sm_obj = 0;
3535 return (0);
3538 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
3539 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
3540 if (error == ENOENT) {
3541 *sm_obj = 0;
3542 error = 0;
3545 return (error);
3549 vdev_load(vdev_t *vd)
3551 int children = vd->vdev_children;
3552 int error = 0;
3553 taskq_t *tq = NULL;
3556 * It's only worthwhile to use the taskq for the root vdev, because the
3557 * slow part is metaslab_init, and that only happens for top-level
3558 * vdevs.
3560 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
3561 tq = taskq_create("vdev_load", children, minclsyspri,
3562 children, children, TASKQ_PREPOPULATE);
3566 * Recursively load all children.
3568 for (int c = 0; c < vd->vdev_children; c++) {
3569 vdev_t *cvd = vd->vdev_child[c];
3571 if (tq == NULL || vdev_uses_zvols(cvd)) {
3572 cvd->vdev_load_error = vdev_load(cvd);
3573 } else {
3574 VERIFY(taskq_dispatch(tq, vdev_load_child,
3575 cvd, TQ_SLEEP) != TASKQID_INVALID);
3579 if (tq != NULL) {
3580 taskq_wait(tq);
3581 taskq_destroy(tq);
3584 for (int c = 0; c < vd->vdev_children; c++) {
3585 int error = vd->vdev_child[c]->vdev_load_error;
3587 if (error != 0)
3588 return (error);
3591 vdev_set_deflate_ratio(vd);
3594 * On spa_load path, grab the allocation bias from our zap
3596 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3597 spa_t *spa = vd->vdev_spa;
3598 char bias_str[64];
3600 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3601 VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
3602 bias_str);
3603 if (error == 0) {
3604 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
3605 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
3606 } else if (error != ENOENT) {
3607 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3608 VDEV_AUX_CORRUPT_DATA);
3609 vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
3610 "failed [error=%d]",
3611 (u_longlong_t)vd->vdev_top_zap, error);
3612 return (error);
3616 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3617 spa_t *spa = vd->vdev_spa;
3618 uint64_t failfast;
3620 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3621 vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
3622 1, &failfast);
3623 if (error == 0) {
3624 vd->vdev_failfast = failfast & 1;
3625 } else if (error == ENOENT) {
3626 vd->vdev_failfast = vdev_prop_default_numeric(
3627 VDEV_PROP_FAILFAST);
3628 } else {
3629 vdev_dbgmsg(vd,
3630 "vdev_load: zap_lookup(top_zap=%llu) "
3631 "failed [error=%d]",
3632 (u_longlong_t)vd->vdev_top_zap, error);
3637 * Load any rebuild state from the top-level vdev zap.
3639 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
3640 error = vdev_rebuild_load(vd);
3641 if (error && error != ENOTSUP) {
3642 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3643 VDEV_AUX_CORRUPT_DATA);
3644 vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
3645 "failed [error=%d]", error);
3646 return (error);
3650 if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
3651 uint64_t zapobj;
3653 if (vd->vdev_top_zap != 0)
3654 zapobj = vd->vdev_top_zap;
3655 else
3656 zapobj = vd->vdev_leaf_zap;
3658 error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
3659 &vd->vdev_checksum_n);
3660 if (error && error != ENOENT)
3661 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3662 "failed [error=%d]", (u_longlong_t)zapobj, error);
3664 error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
3665 &vd->vdev_checksum_t);
3666 if (error && error != ENOENT)
3667 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3668 "failed [error=%d]", (u_longlong_t)zapobj, error);
3670 error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
3671 &vd->vdev_io_n);
3672 if (error && error != ENOENT)
3673 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3674 "failed [error=%d]", (u_longlong_t)zapobj, error);
3676 error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
3677 &vd->vdev_io_t);
3678 if (error && error != ENOENT)
3679 vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
3680 "failed [error=%d]", (u_longlong_t)zapobj, error);
3684 * If this is a top-level vdev, initialize its metaslabs.
3686 if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
3687 vdev_metaslab_group_create(vd);
3689 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
3690 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3691 VDEV_AUX_CORRUPT_DATA);
3692 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
3693 "asize=%llu", (u_longlong_t)vd->vdev_ashift,
3694 (u_longlong_t)vd->vdev_asize);
3695 return (SET_ERROR(ENXIO));
3698 error = vdev_metaslab_init(vd, 0);
3699 if (error != 0) {
3700 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
3701 "[error=%d]", error);
3702 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3703 VDEV_AUX_CORRUPT_DATA);
3704 return (error);
3707 uint64_t checkpoint_sm_obj;
3708 error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
3709 if (error == 0 && checkpoint_sm_obj != 0) {
3710 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3711 ASSERT(vd->vdev_asize != 0);
3712 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
3714 error = space_map_open(&vd->vdev_checkpoint_sm,
3715 mos, checkpoint_sm_obj, 0, vd->vdev_asize,
3716 vd->vdev_ashift);
3717 if (error != 0) {
3718 vdev_dbgmsg(vd, "vdev_load: space_map_open "
3719 "failed for checkpoint spacemap (obj %llu) "
3720 "[error=%d]",
3721 (u_longlong_t)checkpoint_sm_obj, error);
3722 return (error);
3724 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3727 * Since the checkpoint_sm contains free entries
3728 * exclusively we can use space_map_allocated() to
3729 * indicate the cumulative checkpointed space that
3730 * has been freed.
3732 vd->vdev_stat.vs_checkpoint_space =
3733 -space_map_allocated(vd->vdev_checkpoint_sm);
3734 vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
3735 vd->vdev_stat.vs_checkpoint_space;
3736 } else if (error != 0) {
3737 vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
3738 "checkpoint space map object from vdev ZAP "
3739 "[error=%d]", error);
3740 return (error);
3745 * If this is a leaf vdev, load its DTL.
3747 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
3748 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3749 VDEV_AUX_CORRUPT_DATA);
3750 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
3751 "[error=%d]", error);
3752 return (error);
3755 uint64_t obsolete_sm_object;
3756 error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
3757 if (error == 0 && obsolete_sm_object != 0) {
3758 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3759 ASSERT(vd->vdev_asize != 0);
3760 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
3762 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
3763 obsolete_sm_object, 0, vd->vdev_asize, 0))) {
3764 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3765 VDEV_AUX_CORRUPT_DATA);
3766 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
3767 "obsolete spacemap (obj %llu) [error=%d]",
3768 (u_longlong_t)obsolete_sm_object, error);
3769 return (error);
3771 } else if (error != 0) {
3772 vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
3773 "space map object from vdev ZAP [error=%d]", error);
3774 return (error);
3777 return (0);
3781 * The special vdev case is used for hot spares and l2cache devices. Its
3782 * sole purpose it to set the vdev state for the associated vdev. To do this,
3783 * we make sure that we can open the underlying device, then try to read the
3784 * label, and make sure that the label is sane and that it hasn't been
3785 * repurposed to another pool.
3788 vdev_validate_aux(vdev_t *vd)
3790 nvlist_t *label;
3791 uint64_t guid, version;
3792 uint64_t state;
3794 if (!vdev_readable(vd))
3795 return (0);
3797 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
3798 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3799 VDEV_AUX_CORRUPT_DATA);
3800 return (-1);
3803 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
3804 !SPA_VERSION_IS_SUPPORTED(version) ||
3805 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
3806 guid != vd->vdev_guid ||
3807 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
3808 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3809 VDEV_AUX_CORRUPT_DATA);
3810 nvlist_free(label);
3811 return (-1);
3815 * We don't actually check the pool state here. If it's in fact in
3816 * use by another pool, we update this fact on the fly when requested.
3818 nvlist_free(label);
3819 return (0);
3822 static void
3823 vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
3825 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3827 if (vd->vdev_top_zap == 0)
3828 return;
3830 uint64_t object = 0;
3831 int err = zap_lookup(mos, vd->vdev_top_zap,
3832 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
3833 if (err == ENOENT)
3834 return;
3835 VERIFY0(err);
3837 VERIFY0(dmu_object_free(mos, object, tx));
3838 VERIFY0(zap_remove(mos, vd->vdev_top_zap,
3839 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
3843 * Free the objects used to store this vdev's spacemaps, and the array
3844 * that points to them.
3846 void
3847 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
3849 if (vd->vdev_ms_array == 0)
3850 return;
3852 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3853 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
3854 size_t array_bytes = array_count * sizeof (uint64_t);
3855 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
3856 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
3857 array_bytes, smobj_array, 0));
3859 for (uint64_t i = 0; i < array_count; i++) {
3860 uint64_t smobj = smobj_array[i];
3861 if (smobj == 0)
3862 continue;
3864 space_map_free_obj(mos, smobj, tx);
3867 kmem_free(smobj_array, array_bytes);
3868 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
3869 vdev_destroy_ms_flush_data(vd, tx);
3870 vd->vdev_ms_array = 0;
3873 static void
3874 vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
3876 spa_t *spa = vd->vdev_spa;
3878 ASSERT(vd->vdev_islog);
3879 ASSERT(vd == vd->vdev_top);
3880 ASSERT3U(txg, ==, spa_syncing_txg(spa));
3882 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3884 vdev_destroy_spacemaps(vd, tx);
3885 if (vd->vdev_top_zap != 0) {
3886 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
3887 vd->vdev_top_zap = 0;
3890 dmu_tx_commit(tx);
3893 void
3894 vdev_sync_done(vdev_t *vd, uint64_t txg)
3896 metaslab_t *msp;
3897 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
3899 ASSERT(vdev_is_concrete(vd));
3901 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
3902 != NULL)
3903 metaslab_sync_done(msp, txg);
3905 if (reassess) {
3906 metaslab_sync_reassess(vd->vdev_mg);
3907 if (vd->vdev_log_mg != NULL)
3908 metaslab_sync_reassess(vd->vdev_log_mg);
3912 void
3913 vdev_sync(vdev_t *vd, uint64_t txg)
3915 spa_t *spa = vd->vdev_spa;
3916 vdev_t *lvd;
3917 metaslab_t *msp;
3919 ASSERT3U(txg, ==, spa->spa_syncing_txg);
3920 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3921 if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
3922 ASSERT(vd->vdev_removing ||
3923 vd->vdev_ops == &vdev_indirect_ops);
3925 vdev_indirect_sync_obsolete(vd, tx);
3928 * If the vdev is indirect, it can't have dirty
3929 * metaslabs or DTLs.
3931 if (vd->vdev_ops == &vdev_indirect_ops) {
3932 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
3933 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
3934 dmu_tx_commit(tx);
3935 return;
3939 ASSERT(vdev_is_concrete(vd));
3941 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
3942 !vd->vdev_removing) {
3943 ASSERT(vd == vd->vdev_top);
3944 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3945 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
3946 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
3947 ASSERT(vd->vdev_ms_array != 0);
3948 vdev_config_dirty(vd);
3951 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
3952 metaslab_sync(msp, txg);
3953 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
3956 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
3957 vdev_dtl_sync(lvd, txg);
3960 * If this is an empty log device being removed, destroy the
3961 * metadata associated with it.
3963 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
3964 vdev_remove_empty_log(vd, txg);
3966 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
3967 dmu_tx_commit(tx);
3970 uint64_t
3971 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
3973 return (vd->vdev_ops->vdev_op_asize(vd, psize));
3977 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
3978 * not be opened, and no I/O is attempted.
3981 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3983 vdev_t *vd, *tvd;
3985 spa_vdev_state_enter(spa, SCL_NONE);
3987 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3988 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
3990 if (!vd->vdev_ops->vdev_op_leaf)
3991 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
3993 tvd = vd->vdev_top;
3996 * If user did a 'zpool offline -f' then make the fault persist across
3997 * reboots.
3999 if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
4001 * There are two kinds of forced faults: temporary and
4002 * persistent. Temporary faults go away at pool import, while
4003 * persistent faults stay set. Both types of faults can be
4004 * cleared with a zpool clear.
4006 * We tell if a vdev is persistently faulted by looking at the
4007 * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
4008 * import then it's a persistent fault. Otherwise, it's
4009 * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
4010 * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
4011 * tells vdev_config_generate() (which gets run later) to set
4012 * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
4014 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
4015 vd->vdev_tmpoffline = B_FALSE;
4016 aux = VDEV_AUX_EXTERNAL;
4017 } else {
4018 vd->vdev_tmpoffline = B_TRUE;
4022 * We don't directly use the aux state here, but if we do a
4023 * vdev_reopen(), we need this value to be present to remember why we
4024 * were faulted.
4026 vd->vdev_label_aux = aux;
4029 * Faulted state takes precedence over degraded.
4031 vd->vdev_delayed_close = B_FALSE;
4032 vd->vdev_faulted = 1ULL;
4033 vd->vdev_degraded = 0ULL;
4034 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
4037 * If this device has the only valid copy of the data, then
4038 * back off and simply mark the vdev as degraded instead.
4040 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
4041 vd->vdev_degraded = 1ULL;
4042 vd->vdev_faulted = 0ULL;
4045 * If we reopen the device and it's not dead, only then do we
4046 * mark it degraded.
4048 vdev_reopen(tvd);
4050 if (vdev_readable(vd))
4051 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
4054 return (spa_vdev_state_exit(spa, vd, 0));
4058 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
4059 * user that something is wrong. The vdev continues to operate as normal as far
4060 * as I/O is concerned.
4063 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4065 vdev_t *vd;
4067 spa_vdev_state_enter(spa, SCL_NONE);
4069 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4070 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4072 if (!vd->vdev_ops->vdev_op_leaf)
4073 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4076 * If the vdev is already faulted, then don't do anything.
4078 if (vd->vdev_faulted || vd->vdev_degraded)
4079 return (spa_vdev_state_exit(spa, NULL, 0));
4081 vd->vdev_degraded = 1ULL;
4082 if (!vdev_is_dead(vd))
4083 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
4084 aux);
4086 return (spa_vdev_state_exit(spa, vd, 0));
4090 vdev_remove_wanted(spa_t *spa, uint64_t guid)
4092 vdev_t *vd;
4094 spa_vdev_state_enter(spa, SCL_NONE);
4096 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4097 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4100 * If the vdev is already removed, then don't do anything.
4102 if (vd->vdev_removed)
4103 return (spa_vdev_state_exit(spa, NULL, 0));
4105 vd->vdev_remove_wanted = B_TRUE;
4106 spa_async_request(spa, SPA_ASYNC_REMOVE);
4108 return (spa_vdev_state_exit(spa, vd, 0));
4113 * Online the given vdev.
4115 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
4116 * spare device should be detached when the device finishes resilvering.
4117 * Second, the online should be treated like a 'test' online case, so no FMA
4118 * events are generated if the device fails to open.
4121 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
4123 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
4124 boolean_t wasoffline;
4125 vdev_state_t oldstate;
4127 spa_vdev_state_enter(spa, SCL_NONE);
4129 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4130 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4132 if (!vd->vdev_ops->vdev_op_leaf)
4133 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4135 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
4136 oldstate = vd->vdev_state;
4138 tvd = vd->vdev_top;
4139 vd->vdev_offline = B_FALSE;
4140 vd->vdev_tmpoffline = B_FALSE;
4141 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
4142 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
4144 /* XXX - L2ARC 1.0 does not support expansion */
4145 if (!vd->vdev_aux) {
4146 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4147 pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
4148 spa->spa_autoexpand);
4149 vd->vdev_expansion_time = gethrestime_sec();
4152 vdev_reopen(tvd);
4153 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
4155 if (!vd->vdev_aux) {
4156 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4157 pvd->vdev_expanding = B_FALSE;
4160 if (newstate)
4161 *newstate = vd->vdev_state;
4162 if ((flags & ZFS_ONLINE_UNSPARE) &&
4163 !vdev_is_dead(vd) && vd->vdev_parent &&
4164 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4165 vd->vdev_parent->vdev_child[0] == vd)
4166 vd->vdev_unspare = B_TRUE;
4168 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
4170 /* XXX - L2ARC 1.0 does not support expansion */
4171 if (vd->vdev_aux)
4172 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
4173 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4176 /* Restart initializing if necessary */
4177 mutex_enter(&vd->vdev_initialize_lock);
4178 if (vdev_writeable(vd) &&
4179 vd->vdev_initialize_thread == NULL &&
4180 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
4181 (void) vdev_initialize(vd);
4183 mutex_exit(&vd->vdev_initialize_lock);
4186 * Restart trimming if necessary. We do not restart trimming for cache
4187 * devices here. This is triggered by l2arc_rebuild_vdev()
4188 * asynchronously for the whole device or in l2arc_evict() as it evicts
4189 * space for upcoming writes.
4191 mutex_enter(&vd->vdev_trim_lock);
4192 if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
4193 vd->vdev_trim_thread == NULL &&
4194 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
4195 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
4196 vd->vdev_trim_secure);
4198 mutex_exit(&vd->vdev_trim_lock);
4200 if (wasoffline ||
4201 (oldstate < VDEV_STATE_DEGRADED &&
4202 vd->vdev_state >= VDEV_STATE_DEGRADED)) {
4203 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
4206 * Asynchronously detach spare vdev if resilver or
4207 * rebuild is not required
4209 if (vd->vdev_unspare &&
4210 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4211 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
4212 !vdev_rebuild_active(tvd))
4213 spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
4215 return (spa_vdev_state_exit(spa, vd, 0));
4218 static int
4219 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
4221 vdev_t *vd, *tvd;
4222 int error = 0;
4223 uint64_t generation;
4224 metaslab_group_t *mg;
4226 top:
4227 spa_vdev_state_enter(spa, SCL_ALLOC);
4229 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4230 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4232 if (!vd->vdev_ops->vdev_op_leaf)
4233 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4235 if (vd->vdev_ops == &vdev_draid_spare_ops)
4236 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4238 tvd = vd->vdev_top;
4239 mg = tvd->vdev_mg;
4240 generation = spa->spa_config_generation + 1;
4243 * If the device isn't already offline, try to offline it.
4245 if (!vd->vdev_offline) {
4247 * If this device has the only valid copy of some data,
4248 * don't allow it to be offlined. Log devices are always
4249 * expendable.
4251 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4252 vdev_dtl_required(vd))
4253 return (spa_vdev_state_exit(spa, NULL,
4254 SET_ERROR(EBUSY)));
4257 * If the top-level is a slog and it has had allocations
4258 * then proceed. We check that the vdev's metaslab group
4259 * is not NULL since it's possible that we may have just
4260 * added this vdev but not yet initialized its metaslabs.
4262 if (tvd->vdev_islog && mg != NULL) {
4264 * Prevent any future allocations.
4266 ASSERT3P(tvd->vdev_log_mg, ==, NULL);
4267 metaslab_group_passivate(mg);
4268 (void) spa_vdev_state_exit(spa, vd, 0);
4270 error = spa_reset_logs(spa);
4273 * If the log device was successfully reset but has
4274 * checkpointed data, do not offline it.
4276 if (error == 0 &&
4277 tvd->vdev_checkpoint_sm != NULL) {
4278 ASSERT3U(space_map_allocated(
4279 tvd->vdev_checkpoint_sm), !=, 0);
4280 error = ZFS_ERR_CHECKPOINT_EXISTS;
4283 spa_vdev_state_enter(spa, SCL_ALLOC);
4286 * Check to see if the config has changed.
4288 if (error || generation != spa->spa_config_generation) {
4289 metaslab_group_activate(mg);
4290 if (error)
4291 return (spa_vdev_state_exit(spa,
4292 vd, error));
4293 (void) spa_vdev_state_exit(spa, vd, 0);
4294 goto top;
4296 ASSERT0(tvd->vdev_stat.vs_alloc);
4300 * Offline this device and reopen its top-level vdev.
4301 * If the top-level vdev is a log device then just offline
4302 * it. Otherwise, if this action results in the top-level
4303 * vdev becoming unusable, undo it and fail the request.
4305 vd->vdev_offline = B_TRUE;
4306 vdev_reopen(tvd);
4308 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
4309 vdev_is_dead(tvd)) {
4310 vd->vdev_offline = B_FALSE;
4311 vdev_reopen(tvd);
4312 return (spa_vdev_state_exit(spa, NULL,
4313 SET_ERROR(EBUSY)));
4317 * Add the device back into the metaslab rotor so that
4318 * once we online the device it's open for business.
4320 if (tvd->vdev_islog && mg != NULL)
4321 metaslab_group_activate(mg);
4324 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
4326 return (spa_vdev_state_exit(spa, vd, 0));
4330 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
4332 int error;
4334 mutex_enter(&spa->spa_vdev_top_lock);
4335 error = vdev_offline_locked(spa, guid, flags);
4336 mutex_exit(&spa->spa_vdev_top_lock);
4338 return (error);
4342 * Clear the error counts associated with this vdev. Unlike vdev_online() and
4343 * vdev_offline(), we assume the spa config is locked. We also clear all
4344 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
4346 void
4347 vdev_clear(spa_t *spa, vdev_t *vd)
4349 vdev_t *rvd = spa->spa_root_vdev;
4351 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
4353 if (vd == NULL)
4354 vd = rvd;
4356 vd->vdev_stat.vs_read_errors = 0;
4357 vd->vdev_stat.vs_write_errors = 0;
4358 vd->vdev_stat.vs_checksum_errors = 0;
4359 vd->vdev_stat.vs_slow_ios = 0;
4361 for (int c = 0; c < vd->vdev_children; c++)
4362 vdev_clear(spa, vd->vdev_child[c]);
4365 * It makes no sense to "clear" an indirect or removed vdev.
4367 if (!vdev_is_concrete(vd) || vd->vdev_removed)
4368 return;
4371 * If we're in the FAULTED state or have experienced failed I/O, then
4372 * clear the persistent state and attempt to reopen the device. We
4373 * also mark the vdev config dirty, so that the new faulted state is
4374 * written out to disk.
4376 if (vd->vdev_faulted || vd->vdev_degraded ||
4377 !vdev_readable(vd) || !vdev_writeable(vd)) {
4379 * When reopening in response to a clear event, it may be due to
4380 * a fmadm repair request. In this case, if the device is
4381 * still broken, we want to still post the ereport again.
4383 vd->vdev_forcefault = B_TRUE;
4385 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
4386 vd->vdev_cant_read = B_FALSE;
4387 vd->vdev_cant_write = B_FALSE;
4388 vd->vdev_stat.vs_aux = 0;
4390 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
4392 vd->vdev_forcefault = B_FALSE;
4394 if (vd != rvd && vdev_writeable(vd->vdev_top))
4395 vdev_state_dirty(vd->vdev_top);
4397 /* If a resilver isn't required, check if vdevs can be culled */
4398 if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
4399 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4400 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
4401 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4403 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
4407 * When clearing a FMA-diagnosed fault, we always want to
4408 * unspare the device, as we assume that the original spare was
4409 * done in response to the FMA fault.
4411 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
4412 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4413 vd->vdev_parent->vdev_child[0] == vd)
4414 vd->vdev_unspare = B_TRUE;
4416 /* Clear recent error events cache (i.e. duplicate events tracking) */
4417 zfs_ereport_clear(spa, vd);
4420 boolean_t
4421 vdev_is_dead(vdev_t *vd)
4424 * Holes and missing devices are always considered "dead".
4425 * This simplifies the code since we don't have to check for
4426 * these types of devices in the various code paths.
4427 * Instead we rely on the fact that we skip over dead devices
4428 * before issuing I/O to them.
4430 return (vd->vdev_state < VDEV_STATE_DEGRADED ||
4431 vd->vdev_ops == &vdev_hole_ops ||
4432 vd->vdev_ops == &vdev_missing_ops);
4435 boolean_t
4436 vdev_readable(vdev_t *vd)
4438 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
4441 boolean_t
4442 vdev_writeable(vdev_t *vd)
4444 return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
4445 vdev_is_concrete(vd));
4448 boolean_t
4449 vdev_allocatable(vdev_t *vd)
4451 uint64_t state = vd->vdev_state;
4454 * We currently allow allocations from vdevs which may be in the
4455 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
4456 * fails to reopen then we'll catch it later when we're holding
4457 * the proper locks. Note that we have to get the vdev state
4458 * in a local variable because although it changes atomically,
4459 * we're asking two separate questions about it.
4461 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
4462 !vd->vdev_cant_write && vdev_is_concrete(vd) &&
4463 vd->vdev_mg->mg_initialized);
4466 boolean_t
4467 vdev_accessible(vdev_t *vd, zio_t *zio)
4469 ASSERT(zio->io_vd == vd);
4471 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
4472 return (B_FALSE);
4474 if (zio->io_type == ZIO_TYPE_READ)
4475 return (!vd->vdev_cant_read);
4477 if (zio->io_type == ZIO_TYPE_WRITE)
4478 return (!vd->vdev_cant_write);
4480 return (B_TRUE);
4483 static void
4484 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
4487 * Exclude the dRAID spare when aggregating to avoid double counting
4488 * the ops and bytes. These IOs are counted by the physical leaves.
4490 if (cvd->vdev_ops == &vdev_draid_spare_ops)
4491 return;
4493 for (int t = 0; t < VS_ZIO_TYPES; t++) {
4494 vs->vs_ops[t] += cvs->vs_ops[t];
4495 vs->vs_bytes[t] += cvs->vs_bytes[t];
4498 cvs->vs_scan_removing = cvd->vdev_removing;
4502 * Get extended stats
4504 static void
4505 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
4507 (void) cvd;
4509 int t, b;
4510 for (t = 0; t < ZIO_TYPES; t++) {
4511 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
4512 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
4514 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
4515 vsx->vsx_total_histo[t][b] +=
4516 cvsx->vsx_total_histo[t][b];
4520 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
4521 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
4522 vsx->vsx_queue_histo[t][b] +=
4523 cvsx->vsx_queue_histo[t][b];
4525 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
4526 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
4528 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
4529 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
4531 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
4532 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
4537 boolean_t
4538 vdev_is_spacemap_addressable(vdev_t *vd)
4540 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
4541 return (B_TRUE);
4544 * If double-word space map entries are not enabled we assume
4545 * 47 bits of the space map entry are dedicated to the entry's
4546 * offset (see SM_OFFSET_BITS in space_map.h). We then use that
4547 * to calculate the maximum address that can be described by a
4548 * space map entry for the given device.
4550 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
4552 if (shift >= 63) /* detect potential overflow */
4553 return (B_TRUE);
4555 return (vd->vdev_asize < (1ULL << shift));
4559 * Get statistics for the given vdev.
4561 static void
4562 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4564 int t;
4566 * If we're getting stats on the root vdev, aggregate the I/O counts
4567 * over all top-level vdevs (i.e. the direct children of the root).
4569 if (!vd->vdev_ops->vdev_op_leaf) {
4570 if (vs) {
4571 memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
4572 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
4574 if (vsx)
4575 memset(vsx, 0, sizeof (*vsx));
4577 for (int c = 0; c < vd->vdev_children; c++) {
4578 vdev_t *cvd = vd->vdev_child[c];
4579 vdev_stat_t *cvs = &cvd->vdev_stat;
4580 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
4582 vdev_get_stats_ex_impl(cvd, cvs, cvsx);
4583 if (vs)
4584 vdev_get_child_stat(cvd, vs, cvs);
4585 if (vsx)
4586 vdev_get_child_stat_ex(cvd, vsx, cvsx);
4588 } else {
4590 * We're a leaf. Just copy our ZIO active queue stats in. The
4591 * other leaf stats are updated in vdev_stat_update().
4593 if (!vsx)
4594 return;
4596 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
4598 for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
4599 vsx->vsx_active_queue[t] =
4600 vd->vdev_queue.vq_class[t].vqc_active;
4601 vsx->vsx_pend_queue[t] = avl_numnodes(
4602 &vd->vdev_queue.vq_class[t].vqc_queued_tree);
4607 void
4608 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
4610 vdev_t *tvd = vd->vdev_top;
4611 mutex_enter(&vd->vdev_stat_lock);
4612 if (vs) {
4613 memcpy(vs, &vd->vdev_stat, sizeof (*vs));
4614 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
4615 vs->vs_state = vd->vdev_state;
4616 vs->vs_rsize = vdev_get_min_asize(vd);
4618 if (vd->vdev_ops->vdev_op_leaf) {
4619 vs->vs_pspace = vd->vdev_psize;
4620 vs->vs_rsize += VDEV_LABEL_START_SIZE +
4621 VDEV_LABEL_END_SIZE;
4623 * Report initializing progress. Since we don't
4624 * have the initializing locks held, this is only
4625 * an estimate (although a fairly accurate one).
4627 vs->vs_initialize_bytes_done =
4628 vd->vdev_initialize_bytes_done;
4629 vs->vs_initialize_bytes_est =
4630 vd->vdev_initialize_bytes_est;
4631 vs->vs_initialize_state = vd->vdev_initialize_state;
4632 vs->vs_initialize_action_time =
4633 vd->vdev_initialize_action_time;
4636 * Report manual TRIM progress. Since we don't have
4637 * the manual TRIM locks held, this is only an
4638 * estimate (although fairly accurate one).
4640 vs->vs_trim_notsup = !vd->vdev_has_trim;
4641 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
4642 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
4643 vs->vs_trim_state = vd->vdev_trim_state;
4644 vs->vs_trim_action_time = vd->vdev_trim_action_time;
4646 /* Set when there is a deferred resilver. */
4647 vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
4651 * Report expandable space on top-level, non-auxiliary devices
4652 * only. The expandable space is reported in terms of metaslab
4653 * sized units since that determines how much space the pool
4654 * can expand.
4656 if (vd->vdev_aux == NULL && tvd != NULL) {
4657 vs->vs_esize = P2ALIGN(
4658 vd->vdev_max_asize - vd->vdev_asize,
4659 1ULL << tvd->vdev_ms_shift);
4662 vs->vs_configured_ashift = vd->vdev_top != NULL
4663 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
4664 vs->vs_logical_ashift = vd->vdev_logical_ashift;
4665 if (vd->vdev_physical_ashift <= ASHIFT_MAX)
4666 vs->vs_physical_ashift = vd->vdev_physical_ashift;
4667 else
4668 vs->vs_physical_ashift = 0;
4671 * Report fragmentation and rebuild progress for top-level,
4672 * non-auxiliary, concrete devices.
4674 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
4675 vdev_is_concrete(vd)) {
4677 * The vdev fragmentation rating doesn't take into
4678 * account the embedded slog metaslab (vdev_log_mg).
4679 * Since it's only one metaslab, it would have a tiny
4680 * impact on the overall fragmentation.
4682 vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
4683 vd->vdev_mg->mg_fragmentation : 0;
4685 vs->vs_noalloc = MAX(vd->vdev_noalloc,
4686 tvd ? tvd->vdev_noalloc : 0);
4689 vdev_get_stats_ex_impl(vd, vs, vsx);
4690 mutex_exit(&vd->vdev_stat_lock);
4693 void
4694 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
4696 return (vdev_get_stats_ex(vd, vs, NULL));
4699 void
4700 vdev_clear_stats(vdev_t *vd)
4702 mutex_enter(&vd->vdev_stat_lock);
4703 vd->vdev_stat.vs_space = 0;
4704 vd->vdev_stat.vs_dspace = 0;
4705 vd->vdev_stat.vs_alloc = 0;
4706 mutex_exit(&vd->vdev_stat_lock);
4709 void
4710 vdev_scan_stat_init(vdev_t *vd)
4712 vdev_stat_t *vs = &vd->vdev_stat;
4714 for (int c = 0; c < vd->vdev_children; c++)
4715 vdev_scan_stat_init(vd->vdev_child[c]);
4717 mutex_enter(&vd->vdev_stat_lock);
4718 vs->vs_scan_processed = 0;
4719 mutex_exit(&vd->vdev_stat_lock);
4722 void
4723 vdev_stat_update(zio_t *zio, uint64_t psize)
4725 spa_t *spa = zio->io_spa;
4726 vdev_t *rvd = spa->spa_root_vdev;
4727 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
4728 vdev_t *pvd;
4729 uint64_t txg = zio->io_txg;
4730 /* Suppress ASAN false positive */
4731 #ifdef __SANITIZE_ADDRESS__
4732 vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
4733 vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
4734 #else
4735 vdev_stat_t *vs = &vd->vdev_stat;
4736 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
4737 #endif
4738 zio_type_t type = zio->io_type;
4739 int flags = zio->io_flags;
4742 * If this i/o is a gang leader, it didn't do any actual work.
4744 if (zio->io_gang_tree)
4745 return;
4747 if (zio->io_error == 0) {
4749 * If this is a root i/o, don't count it -- we've already
4750 * counted the top-level vdevs, and vdev_get_stats() will
4751 * aggregate them when asked. This reduces contention on
4752 * the root vdev_stat_lock and implicitly handles blocks
4753 * that compress away to holes, for which there is no i/o.
4754 * (Holes never create vdev children, so all the counters
4755 * remain zero, which is what we want.)
4757 * Note: this only applies to successful i/o (io_error == 0)
4758 * because unlike i/o counts, errors are not additive.
4759 * When reading a ditto block, for example, failure of
4760 * one top-level vdev does not imply a root-level error.
4762 if (vd == rvd)
4763 return;
4765 ASSERT(vd == zio->io_vd);
4767 if (flags & ZIO_FLAG_IO_BYPASS)
4768 return;
4770 mutex_enter(&vd->vdev_stat_lock);
4772 if (flags & ZIO_FLAG_IO_REPAIR) {
4774 * Repair is the result of a resilver issued by the
4775 * scan thread (spa_sync).
4777 if (flags & ZIO_FLAG_SCAN_THREAD) {
4778 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
4779 dsl_scan_phys_t *scn_phys = &scn->scn_phys;
4780 uint64_t *processed = &scn_phys->scn_processed;
4782 if (vd->vdev_ops->vdev_op_leaf)
4783 atomic_add_64(processed, psize);
4784 vs->vs_scan_processed += psize;
4788 * Repair is the result of a rebuild issued by the
4789 * rebuild thread (vdev_rebuild_thread). To avoid
4790 * double counting repaired bytes the virtual dRAID
4791 * spare vdev is excluded from the processed bytes.
4793 if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
4794 vdev_t *tvd = vd->vdev_top;
4795 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
4796 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
4797 uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
4799 if (vd->vdev_ops->vdev_op_leaf &&
4800 vd->vdev_ops != &vdev_draid_spare_ops) {
4801 atomic_add_64(rebuilt, psize);
4803 vs->vs_rebuild_processed += psize;
4806 if (flags & ZIO_FLAG_SELF_HEAL)
4807 vs->vs_self_healed += psize;
4811 * The bytes/ops/histograms are recorded at the leaf level and
4812 * aggregated into the higher level vdevs in vdev_get_stats().
4814 if (vd->vdev_ops->vdev_op_leaf &&
4815 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
4816 zio_type_t vs_type = type;
4817 zio_priority_t priority = zio->io_priority;
4820 * TRIM ops and bytes are reported to user space as
4821 * ZIO_TYPE_IOCTL. This is done to preserve the
4822 * vdev_stat_t structure layout for user space.
4824 if (type == ZIO_TYPE_TRIM)
4825 vs_type = ZIO_TYPE_IOCTL;
4828 * Solely for the purposes of 'zpool iostat -lqrw'
4829 * reporting use the priority to categorize the IO.
4830 * Only the following are reported to user space:
4832 * ZIO_PRIORITY_SYNC_READ,
4833 * ZIO_PRIORITY_SYNC_WRITE,
4834 * ZIO_PRIORITY_ASYNC_READ,
4835 * ZIO_PRIORITY_ASYNC_WRITE,
4836 * ZIO_PRIORITY_SCRUB,
4837 * ZIO_PRIORITY_TRIM,
4838 * ZIO_PRIORITY_REBUILD.
4840 if (priority == ZIO_PRIORITY_INITIALIZING) {
4841 ASSERT3U(type, ==, ZIO_TYPE_WRITE);
4842 priority = ZIO_PRIORITY_ASYNC_WRITE;
4843 } else if (priority == ZIO_PRIORITY_REMOVAL) {
4844 priority = ((type == ZIO_TYPE_WRITE) ?
4845 ZIO_PRIORITY_ASYNC_WRITE :
4846 ZIO_PRIORITY_ASYNC_READ);
4849 vs->vs_ops[vs_type]++;
4850 vs->vs_bytes[vs_type] += psize;
4852 if (flags & ZIO_FLAG_DELEGATED) {
4853 vsx->vsx_agg_histo[priority]
4854 [RQ_HISTO(zio->io_size)]++;
4855 } else {
4856 vsx->vsx_ind_histo[priority]
4857 [RQ_HISTO(zio->io_size)]++;
4860 if (zio->io_delta && zio->io_delay) {
4861 vsx->vsx_queue_histo[priority]
4862 [L_HISTO(zio->io_delta - zio->io_delay)]++;
4863 vsx->vsx_disk_histo[type]
4864 [L_HISTO(zio->io_delay)]++;
4865 vsx->vsx_total_histo[type]
4866 [L_HISTO(zio->io_delta)]++;
4870 mutex_exit(&vd->vdev_stat_lock);
4871 return;
4874 if (flags & ZIO_FLAG_SPECULATIVE)
4875 return;
4878 * If this is an I/O error that is going to be retried, then ignore the
4879 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
4880 * hard errors, when in reality they can happen for any number of
4881 * innocuous reasons (bus resets, MPxIO link failure, etc).
4883 if (zio->io_error == EIO &&
4884 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
4885 return;
4888 * Intent logs writes won't propagate their error to the root
4889 * I/O so don't mark these types of failures as pool-level
4890 * errors.
4892 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
4893 return;
4895 if (type == ZIO_TYPE_WRITE && txg != 0 &&
4896 (!(flags & ZIO_FLAG_IO_REPAIR) ||
4897 (flags & ZIO_FLAG_SCAN_THREAD) ||
4898 spa->spa_claiming)) {
4900 * This is either a normal write (not a repair), or it's
4901 * a repair induced by the scrub thread, or it's a repair
4902 * made by zil_claim() during spa_load() in the first txg.
4903 * In the normal case, we commit the DTL change in the same
4904 * txg as the block was born. In the scrub-induced repair
4905 * case, we know that scrubs run in first-pass syncing context,
4906 * so we commit the DTL change in spa_syncing_txg(spa).
4907 * In the zil_claim() case, we commit in spa_first_txg(spa).
4909 * We currently do not make DTL entries for failed spontaneous
4910 * self-healing writes triggered by normal (non-scrubbing)
4911 * reads, because we have no transactional context in which to
4912 * do so -- and it's not clear that it'd be desirable anyway.
4914 if (vd->vdev_ops->vdev_op_leaf) {
4915 uint64_t commit_txg = txg;
4916 if (flags & ZIO_FLAG_SCAN_THREAD) {
4917 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
4918 ASSERT(spa_sync_pass(spa) == 1);
4919 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
4920 commit_txg = spa_syncing_txg(spa);
4921 } else if (spa->spa_claiming) {
4922 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
4923 commit_txg = spa_first_txg(spa);
4925 ASSERT(commit_txg >= spa_syncing_txg(spa));
4926 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
4927 return;
4928 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4929 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
4930 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
4932 if (vd != rvd)
4933 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
4937 int64_t
4938 vdev_deflated_space(vdev_t *vd, int64_t space)
4940 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
4941 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
4943 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
4947 * Update the in-core space usage stats for this vdev, its metaslab class,
4948 * and the root vdev.
4950 void
4951 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
4952 int64_t space_delta)
4954 (void) defer_delta;
4955 int64_t dspace_delta;
4956 spa_t *spa = vd->vdev_spa;
4957 vdev_t *rvd = spa->spa_root_vdev;
4959 ASSERT(vd == vd->vdev_top);
4962 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
4963 * factor. We must calculate this here and not at the root vdev
4964 * because the root vdev's psize-to-asize is simply the max of its
4965 * children's, thus not accurate enough for us.
4967 dspace_delta = vdev_deflated_space(vd, space_delta);
4969 mutex_enter(&vd->vdev_stat_lock);
4970 /* ensure we won't underflow */
4971 if (alloc_delta < 0) {
4972 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
4975 vd->vdev_stat.vs_alloc += alloc_delta;
4976 vd->vdev_stat.vs_space += space_delta;
4977 vd->vdev_stat.vs_dspace += dspace_delta;
4978 mutex_exit(&vd->vdev_stat_lock);
4980 /* every class but log contributes to root space stats */
4981 if (vd->vdev_mg != NULL && !vd->vdev_islog) {
4982 ASSERT(!vd->vdev_isl2cache);
4983 mutex_enter(&rvd->vdev_stat_lock);
4984 rvd->vdev_stat.vs_alloc += alloc_delta;
4985 rvd->vdev_stat.vs_space += space_delta;
4986 rvd->vdev_stat.vs_dspace += dspace_delta;
4987 mutex_exit(&rvd->vdev_stat_lock);
4989 /* Note: metaslab_class_space_update moved to metaslab_space_update */
4993 * Mark a top-level vdev's config as dirty, placing it on the dirty list
4994 * so that it will be written out next time the vdev configuration is synced.
4995 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
4997 void
4998 vdev_config_dirty(vdev_t *vd)
5000 spa_t *spa = vd->vdev_spa;
5001 vdev_t *rvd = spa->spa_root_vdev;
5002 int c;
5004 ASSERT(spa_writeable(spa));
5007 * If this is an aux vdev (as with l2cache and spare devices), then we
5008 * update the vdev config manually and set the sync flag.
5010 if (vd->vdev_aux != NULL) {
5011 spa_aux_vdev_t *sav = vd->vdev_aux;
5012 nvlist_t **aux;
5013 uint_t naux;
5015 for (c = 0; c < sav->sav_count; c++) {
5016 if (sav->sav_vdevs[c] == vd)
5017 break;
5020 if (c == sav->sav_count) {
5022 * We're being removed. There's nothing more to do.
5024 ASSERT(sav->sav_sync == B_TRUE);
5025 return;
5028 sav->sav_sync = B_TRUE;
5030 if (nvlist_lookup_nvlist_array(sav->sav_config,
5031 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
5032 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
5033 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
5036 ASSERT(c < naux);
5039 * Setting the nvlist in the middle if the array is a little
5040 * sketchy, but it will work.
5042 nvlist_free(aux[c]);
5043 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
5045 return;
5049 * The dirty list is protected by the SCL_CONFIG lock. The caller
5050 * must either hold SCL_CONFIG as writer, or must be the sync thread
5051 * (which holds SCL_CONFIG as reader). There's only one sync thread,
5052 * so this is sufficient to ensure mutual exclusion.
5054 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5055 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5056 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5058 if (vd == rvd) {
5059 for (c = 0; c < rvd->vdev_children; c++)
5060 vdev_config_dirty(rvd->vdev_child[c]);
5061 } else {
5062 ASSERT(vd == vd->vdev_top);
5064 if (!list_link_active(&vd->vdev_config_dirty_node) &&
5065 vdev_is_concrete(vd)) {
5066 list_insert_head(&spa->spa_config_dirty_list, vd);
5071 void
5072 vdev_config_clean(vdev_t *vd)
5074 spa_t *spa = vd->vdev_spa;
5076 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5077 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5078 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5080 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
5081 list_remove(&spa->spa_config_dirty_list, vd);
5085 * Mark a top-level vdev's state as dirty, so that the next pass of
5086 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
5087 * the state changes from larger config changes because they require
5088 * much less locking, and are often needed for administrative actions.
5090 void
5091 vdev_state_dirty(vdev_t *vd)
5093 spa_t *spa = vd->vdev_spa;
5095 ASSERT(spa_writeable(spa));
5096 ASSERT(vd == vd->vdev_top);
5099 * The state list is protected by the SCL_STATE lock. The caller
5100 * must either hold SCL_STATE as writer, or must be the sync thread
5101 * (which holds SCL_STATE as reader). There's only one sync thread,
5102 * so this is sufficient to ensure mutual exclusion.
5104 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5105 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5106 spa_config_held(spa, SCL_STATE, RW_READER)));
5108 if (!list_link_active(&vd->vdev_state_dirty_node) &&
5109 vdev_is_concrete(vd))
5110 list_insert_head(&spa->spa_state_dirty_list, vd);
5113 void
5114 vdev_state_clean(vdev_t *vd)
5116 spa_t *spa = vd->vdev_spa;
5118 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5119 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5120 spa_config_held(spa, SCL_STATE, RW_READER)));
5122 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
5123 list_remove(&spa->spa_state_dirty_list, vd);
5127 * Propagate vdev state up from children to parent.
5129 void
5130 vdev_propagate_state(vdev_t *vd)
5132 spa_t *spa = vd->vdev_spa;
5133 vdev_t *rvd = spa->spa_root_vdev;
5134 int degraded = 0, faulted = 0;
5135 int corrupted = 0;
5136 vdev_t *child;
5138 if (vd->vdev_children > 0) {
5139 for (int c = 0; c < vd->vdev_children; c++) {
5140 child = vd->vdev_child[c];
5143 * Don't factor holes or indirect vdevs into the
5144 * decision.
5146 if (!vdev_is_concrete(child))
5147 continue;
5149 if (!vdev_readable(child) ||
5150 (!vdev_writeable(child) && spa_writeable(spa))) {
5152 * Root special: if there is a top-level log
5153 * device, treat the root vdev as if it were
5154 * degraded.
5156 if (child->vdev_islog && vd == rvd)
5157 degraded++;
5158 else
5159 faulted++;
5160 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
5161 degraded++;
5164 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
5165 corrupted++;
5168 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
5171 * Root special: if there is a top-level vdev that cannot be
5172 * opened due to corrupted metadata, then propagate the root
5173 * vdev's aux state as 'corrupt' rather than 'insufficient
5174 * replicas'.
5176 if (corrupted && vd == rvd &&
5177 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
5178 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
5179 VDEV_AUX_CORRUPT_DATA);
5182 if (vd->vdev_parent)
5183 vdev_propagate_state(vd->vdev_parent);
5187 * Set a vdev's state. If this is during an open, we don't update the parent
5188 * state, because we're in the process of opening children depth-first.
5189 * Otherwise, we propagate the change to the parent.
5191 * If this routine places a device in a faulted state, an appropriate ereport is
5192 * generated.
5194 void
5195 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
5197 uint64_t save_state;
5198 spa_t *spa = vd->vdev_spa;
5200 if (state == vd->vdev_state) {
5202 * Since vdev_offline() code path is already in an offline
5203 * state we can miss a statechange event to OFFLINE. Check
5204 * the previous state to catch this condition.
5206 if (vd->vdev_ops->vdev_op_leaf &&
5207 (state == VDEV_STATE_OFFLINE) &&
5208 (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
5209 /* post an offline state change */
5210 zfs_post_state_change(spa, vd, vd->vdev_prevstate);
5212 vd->vdev_stat.vs_aux = aux;
5213 return;
5216 save_state = vd->vdev_state;
5218 vd->vdev_state = state;
5219 vd->vdev_stat.vs_aux = aux;
5222 * If we are setting the vdev state to anything but an open state, then
5223 * always close the underlying device unless the device has requested
5224 * a delayed close (i.e. we're about to remove or fault the device).
5225 * Otherwise, we keep accessible but invalid devices open forever.
5226 * We don't call vdev_close() itself, because that implies some extra
5227 * checks (offline, etc) that we don't want here. This is limited to
5228 * leaf devices, because otherwise closing the device will affect other
5229 * children.
5231 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
5232 vd->vdev_ops->vdev_op_leaf)
5233 vd->vdev_ops->vdev_op_close(vd);
5235 if (vd->vdev_removed &&
5236 state == VDEV_STATE_CANT_OPEN &&
5237 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
5239 * If the previous state is set to VDEV_STATE_REMOVED, then this
5240 * device was previously marked removed and someone attempted to
5241 * reopen it. If this failed due to a nonexistent device, then
5242 * keep the device in the REMOVED state. We also let this be if
5243 * it is one of our special test online cases, which is only
5244 * attempting to online the device and shouldn't generate an FMA
5245 * fault.
5247 vd->vdev_state = VDEV_STATE_REMOVED;
5248 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
5249 } else if (state == VDEV_STATE_REMOVED) {
5250 vd->vdev_removed = B_TRUE;
5251 } else if (state == VDEV_STATE_CANT_OPEN) {
5253 * If we fail to open a vdev during an import or recovery, we
5254 * mark it as "not available", which signifies that it was
5255 * never there to begin with. Failure to open such a device
5256 * is not considered an error.
5258 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
5259 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
5260 vd->vdev_ops->vdev_op_leaf)
5261 vd->vdev_not_present = 1;
5264 * Post the appropriate ereport. If the 'prevstate' field is
5265 * set to something other than VDEV_STATE_UNKNOWN, it indicates
5266 * that this is part of a vdev_reopen(). In this case, we don't
5267 * want to post the ereport if the device was already in the
5268 * CANT_OPEN state beforehand.
5270 * If the 'checkremove' flag is set, then this is an attempt to
5271 * online the device in response to an insertion event. If we
5272 * hit this case, then we have detected an insertion event for a
5273 * faulted or offline device that wasn't in the removed state.
5274 * In this scenario, we don't post an ereport because we are
5275 * about to replace the device, or attempt an online with
5276 * vdev_forcefault, which will generate the fault for us.
5278 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
5279 !vd->vdev_not_present && !vd->vdev_checkremove &&
5280 vd != spa->spa_root_vdev) {
5281 const char *class;
5283 switch (aux) {
5284 case VDEV_AUX_OPEN_FAILED:
5285 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
5286 break;
5287 case VDEV_AUX_CORRUPT_DATA:
5288 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
5289 break;
5290 case VDEV_AUX_NO_REPLICAS:
5291 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
5292 break;
5293 case VDEV_AUX_BAD_GUID_SUM:
5294 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
5295 break;
5296 case VDEV_AUX_TOO_SMALL:
5297 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
5298 break;
5299 case VDEV_AUX_BAD_LABEL:
5300 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
5301 break;
5302 case VDEV_AUX_BAD_ASHIFT:
5303 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
5304 break;
5305 default:
5306 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
5309 (void) zfs_ereport_post(class, spa, vd, NULL, NULL,
5310 save_state);
5313 /* Erase any notion of persistent removed state */
5314 vd->vdev_removed = B_FALSE;
5315 } else {
5316 vd->vdev_removed = B_FALSE;
5320 * Notify ZED of any significant state-change on a leaf vdev.
5323 if (vd->vdev_ops->vdev_op_leaf) {
5324 /* preserve original state from a vdev_reopen() */
5325 if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
5326 (vd->vdev_prevstate != vd->vdev_state) &&
5327 (save_state <= VDEV_STATE_CLOSED))
5328 save_state = vd->vdev_prevstate;
5330 /* filter out state change due to initial vdev_open */
5331 if (save_state > VDEV_STATE_CLOSED)
5332 zfs_post_state_change(spa, vd, save_state);
5335 if (!isopen && vd->vdev_parent)
5336 vdev_propagate_state(vd->vdev_parent);
5339 boolean_t
5340 vdev_children_are_offline(vdev_t *vd)
5342 ASSERT(!vd->vdev_ops->vdev_op_leaf);
5344 for (uint64_t i = 0; i < vd->vdev_children; i++) {
5345 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
5346 return (B_FALSE);
5349 return (B_TRUE);
5353 * Check the vdev configuration to ensure that it's capable of supporting
5354 * a root pool. We do not support partial configuration.
5356 boolean_t
5357 vdev_is_bootable(vdev_t *vd)
5359 if (!vd->vdev_ops->vdev_op_leaf) {
5360 const char *vdev_type = vd->vdev_ops->vdev_op_type;
5362 if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
5363 return (B_FALSE);
5366 for (int c = 0; c < vd->vdev_children; c++) {
5367 if (!vdev_is_bootable(vd->vdev_child[c]))
5368 return (B_FALSE);
5370 return (B_TRUE);
5373 boolean_t
5374 vdev_is_concrete(vdev_t *vd)
5376 vdev_ops_t *ops = vd->vdev_ops;
5377 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
5378 ops == &vdev_missing_ops || ops == &vdev_root_ops) {
5379 return (B_FALSE);
5380 } else {
5381 return (B_TRUE);
5386 * Determine if a log device has valid content. If the vdev was
5387 * removed or faulted in the MOS config then we know that
5388 * the content on the log device has already been written to the pool.
5390 boolean_t
5391 vdev_log_state_valid(vdev_t *vd)
5393 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
5394 !vd->vdev_removed)
5395 return (B_TRUE);
5397 for (int c = 0; c < vd->vdev_children; c++)
5398 if (vdev_log_state_valid(vd->vdev_child[c]))
5399 return (B_TRUE);
5401 return (B_FALSE);
5405 * Expand a vdev if possible.
5407 void
5408 vdev_expand(vdev_t *vd, uint64_t txg)
5410 ASSERT(vd->vdev_top == vd);
5411 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5412 ASSERT(vdev_is_concrete(vd));
5414 vdev_set_deflate_ratio(vd);
5416 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
5417 vdev_is_concrete(vd)) {
5418 vdev_metaslab_group_create(vd);
5419 VERIFY(vdev_metaslab_init(vd, txg) == 0);
5420 vdev_config_dirty(vd);
5425 * Split a vdev.
5427 void
5428 vdev_split(vdev_t *vd)
5430 vdev_t *cvd, *pvd = vd->vdev_parent;
5432 VERIFY3U(pvd->vdev_children, >, 1);
5434 vdev_remove_child(pvd, vd);
5435 vdev_compact_children(pvd);
5437 ASSERT3P(pvd->vdev_child, !=, NULL);
5439 cvd = pvd->vdev_child[0];
5440 if (pvd->vdev_children == 1) {
5441 vdev_remove_parent(cvd);
5442 cvd->vdev_splitting = B_TRUE;
5444 vdev_propagate_state(cvd);
5447 void
5448 vdev_deadman(vdev_t *vd, const char *tag)
5450 for (int c = 0; c < vd->vdev_children; c++) {
5451 vdev_t *cvd = vd->vdev_child[c];
5453 vdev_deadman(cvd, tag);
5456 if (vd->vdev_ops->vdev_op_leaf) {
5457 vdev_queue_t *vq = &vd->vdev_queue;
5459 mutex_enter(&vq->vq_lock);
5460 if (avl_numnodes(&vq->vq_active_tree) > 0) {
5461 spa_t *spa = vd->vdev_spa;
5462 zio_t *fio;
5463 uint64_t delta;
5465 zfs_dbgmsg("slow vdev: %s has %lu active IOs",
5466 vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
5469 * Look at the head of all the pending queues,
5470 * if any I/O has been outstanding for longer than
5471 * the spa_deadman_synctime invoke the deadman logic.
5473 fio = avl_first(&vq->vq_active_tree);
5474 delta = gethrtime() - fio->io_timestamp;
5475 if (delta > spa_deadman_synctime(spa))
5476 zio_deadman(fio, tag);
5478 mutex_exit(&vq->vq_lock);
5482 void
5483 vdev_defer_resilver(vdev_t *vd)
5485 ASSERT(vd->vdev_ops->vdev_op_leaf);
5487 vd->vdev_resilver_deferred = B_TRUE;
5488 vd->vdev_spa->spa_resilver_deferred = B_TRUE;
5492 * Clears the resilver deferred flag on all leaf devs under vd. Returns
5493 * B_TRUE if we have devices that need to be resilvered and are available to
5494 * accept resilver I/Os.
5496 boolean_t
5497 vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
5499 boolean_t resilver_needed = B_FALSE;
5500 spa_t *spa = vd->vdev_spa;
5502 for (int c = 0; c < vd->vdev_children; c++) {
5503 vdev_t *cvd = vd->vdev_child[c];
5504 resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
5507 if (vd == spa->spa_root_vdev &&
5508 spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
5509 spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
5510 vdev_config_dirty(vd);
5511 spa->spa_resilver_deferred = B_FALSE;
5512 return (resilver_needed);
5515 if (!vdev_is_concrete(vd) || vd->vdev_aux ||
5516 !vd->vdev_ops->vdev_op_leaf)
5517 return (resilver_needed);
5519 vd->vdev_resilver_deferred = B_FALSE;
5521 return (!vdev_is_dead(vd) && !vd->vdev_offline &&
5522 vdev_resilver_needed(vd, NULL, NULL));
5525 boolean_t
5526 vdev_xlate_is_empty(range_seg64_t *rs)
5528 return (rs->rs_start == rs->rs_end);
5532 * Translate a logical range to the first contiguous physical range for the
5533 * specified vdev_t. This function is initially called with a leaf vdev and
5534 * will walk each parent vdev until it reaches a top-level vdev. Once the
5535 * top-level is reached the physical range is initialized and the recursive
5536 * function begins to unwind. As it unwinds it calls the parent's vdev
5537 * specific translation function to do the real conversion.
5539 void
5540 vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
5541 range_seg64_t *physical_rs, range_seg64_t *remain_rs)
5544 * Walk up the vdev tree
5546 if (vd != vd->vdev_top) {
5547 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
5548 remain_rs);
5549 } else {
5551 * We've reached the top-level vdev, initialize the physical
5552 * range to the logical range and set an empty remaining
5553 * range then start to unwind.
5555 physical_rs->rs_start = logical_rs->rs_start;
5556 physical_rs->rs_end = logical_rs->rs_end;
5558 remain_rs->rs_start = logical_rs->rs_start;
5559 remain_rs->rs_end = logical_rs->rs_start;
5561 return;
5564 vdev_t *pvd = vd->vdev_parent;
5565 ASSERT3P(pvd, !=, NULL);
5566 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
5569 * As this recursive function unwinds, translate the logical
5570 * range into its physical and any remaining components by calling
5571 * the vdev specific translate function.
5573 range_seg64_t intermediate = { 0 };
5574 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
5576 physical_rs->rs_start = intermediate.rs_start;
5577 physical_rs->rs_end = intermediate.rs_end;
5580 void
5581 vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
5582 vdev_xlate_func_t *func, void *arg)
5584 range_seg64_t iter_rs = *logical_rs;
5585 range_seg64_t physical_rs;
5586 range_seg64_t remain_rs;
5588 while (!vdev_xlate_is_empty(&iter_rs)) {
5590 vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
5593 * With raidz and dRAID, it's possible that the logical range
5594 * does not live on this leaf vdev. Only when there is a non-
5595 * zero physical size call the provided function.
5597 if (!vdev_xlate_is_empty(&physical_rs))
5598 func(arg, &physical_rs);
5600 iter_rs = remain_rs;
5604 static char *
5605 vdev_name(vdev_t *vd, char *buf, int buflen)
5607 if (vd->vdev_path == NULL) {
5608 if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
5609 strlcpy(buf, vd->vdev_spa->spa_name, buflen);
5610 } else if (!vd->vdev_ops->vdev_op_leaf) {
5611 snprintf(buf, buflen, "%s-%llu",
5612 vd->vdev_ops->vdev_op_type,
5613 (u_longlong_t)vd->vdev_id);
5615 } else {
5616 strlcpy(buf, vd->vdev_path, buflen);
5618 return (buf);
5622 * Look at the vdev tree and determine whether any devices are currently being
5623 * replaced.
5625 boolean_t
5626 vdev_replace_in_progress(vdev_t *vdev)
5628 ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
5630 if (vdev->vdev_ops == &vdev_replacing_ops)
5631 return (B_TRUE);
5634 * A 'spare' vdev indicates that we have a replace in progress, unless
5635 * it has exactly two children, and the second, the hot spare, has
5636 * finished being resilvered.
5638 if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
5639 !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
5640 return (B_TRUE);
5642 for (int i = 0; i < vdev->vdev_children; i++) {
5643 if (vdev_replace_in_progress(vdev->vdev_child[i]))
5644 return (B_TRUE);
5647 return (B_FALSE);
5651 * Add a (source=src, propname=propval) list to an nvlist.
5653 static void
5654 vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
5655 uint64_t intval, zprop_source_t src)
5657 nvlist_t *propval;
5659 propval = fnvlist_alloc();
5660 fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
5662 if (strval != NULL)
5663 fnvlist_add_string(propval, ZPROP_VALUE, strval);
5664 else
5665 fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
5667 fnvlist_add_nvlist(nvl, propname, propval);
5668 nvlist_free(propval);
5671 static void
5672 vdev_props_set_sync(void *arg, dmu_tx_t *tx)
5674 vdev_t *vd;
5675 nvlist_t *nvp = arg;
5676 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5677 objset_t *mos = spa->spa_meta_objset;
5678 nvpair_t *elem = NULL;
5679 uint64_t vdev_guid;
5680 nvlist_t *nvprops;
5682 vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
5683 nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
5684 vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
5686 /* this vdev could get removed while waiting for this sync task */
5687 if (vd == NULL)
5688 return;
5690 mutex_enter(&spa->spa_props_lock);
5692 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5693 uint64_t intval, objid = 0;
5694 const char *strval;
5695 vdev_prop_t prop;
5696 const char *propname = nvpair_name(elem);
5697 zprop_type_t proptype;
5700 * Set vdev property values in the vdev props mos object.
5702 if (vd->vdev_root_zap != 0) {
5703 objid = vd->vdev_root_zap;
5704 } else if (vd->vdev_top_zap != 0) {
5705 objid = vd->vdev_top_zap;
5706 } else if (vd->vdev_leaf_zap != 0) {
5707 objid = vd->vdev_leaf_zap;
5708 } else {
5710 * XXX: implement vdev_props_set_check()
5712 panic("vdev not root/top/leaf");
5715 switch (prop = vdev_name_to_prop(propname)) {
5716 case VDEV_PROP_USERPROP:
5717 if (vdev_prop_user(propname)) {
5718 strval = fnvpair_value_string(elem);
5719 if (strlen(strval) == 0) {
5720 /* remove the property if value == "" */
5721 (void) zap_remove(mos, objid, propname,
5722 tx);
5723 } else {
5724 VERIFY0(zap_update(mos, objid, propname,
5725 1, strlen(strval) + 1, strval, tx));
5727 spa_history_log_internal(spa, "vdev set", tx,
5728 "vdev_guid=%llu: %s=%s",
5729 (u_longlong_t)vdev_guid, nvpair_name(elem),
5730 strval);
5732 break;
5733 default:
5734 /* normalize the property name */
5735 propname = vdev_prop_to_name(prop);
5736 proptype = vdev_prop_get_type(prop);
5738 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5739 ASSERT(proptype == PROP_TYPE_STRING);
5740 strval = fnvpair_value_string(elem);
5741 VERIFY0(zap_update(mos, objid, propname,
5742 1, strlen(strval) + 1, strval, tx));
5743 spa_history_log_internal(spa, "vdev set", tx,
5744 "vdev_guid=%llu: %s=%s",
5745 (u_longlong_t)vdev_guid, nvpair_name(elem),
5746 strval);
5747 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5748 intval = fnvpair_value_uint64(elem);
5750 if (proptype == PROP_TYPE_INDEX) {
5751 const char *unused;
5752 VERIFY0(vdev_prop_index_to_string(
5753 prop, intval, &unused));
5755 VERIFY0(zap_update(mos, objid, propname,
5756 sizeof (uint64_t), 1, &intval, tx));
5757 spa_history_log_internal(spa, "vdev set", tx,
5758 "vdev_guid=%llu: %s=%lld",
5759 (u_longlong_t)vdev_guid,
5760 nvpair_name(elem), (longlong_t)intval);
5761 } else {
5762 panic("invalid vdev property type %u",
5763 nvpair_type(elem));
5769 mutex_exit(&spa->spa_props_lock);
5773 vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
5775 spa_t *spa = vd->vdev_spa;
5776 nvpair_t *elem = NULL;
5777 uint64_t vdev_guid;
5778 nvlist_t *nvprops;
5779 int error = 0;
5781 ASSERT(vd != NULL);
5783 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
5784 &vdev_guid) != 0)
5785 return (SET_ERROR(EINVAL));
5787 if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
5788 &nvprops) != 0)
5789 return (SET_ERROR(EINVAL));
5791 if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
5792 return (SET_ERROR(EINVAL));
5794 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5795 const char *propname = nvpair_name(elem);
5796 vdev_prop_t prop = vdev_name_to_prop(propname);
5797 uint64_t intval = 0;
5798 const char *strval = NULL;
5800 if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
5801 error = EINVAL;
5802 goto end;
5805 if (vdev_prop_readonly(prop)) {
5806 error = EROFS;
5807 goto end;
5810 /* Special Processing */
5811 switch (prop) {
5812 case VDEV_PROP_PATH:
5813 if (vd->vdev_path == NULL) {
5814 error = EROFS;
5815 break;
5817 if (nvpair_value_string(elem, &strval) != 0) {
5818 error = EINVAL;
5819 break;
5821 /* New path must start with /dev/ */
5822 if (strncmp(strval, "/dev/", 5)) {
5823 error = EINVAL;
5824 break;
5826 error = spa_vdev_setpath(spa, vdev_guid, strval);
5827 break;
5828 case VDEV_PROP_ALLOCATING:
5829 if (nvpair_value_uint64(elem, &intval) != 0) {
5830 error = EINVAL;
5831 break;
5833 if (intval != vd->vdev_noalloc)
5834 break;
5835 if (intval == 0)
5836 error = spa_vdev_noalloc(spa, vdev_guid);
5837 else
5838 error = spa_vdev_alloc(spa, vdev_guid);
5839 break;
5840 case VDEV_PROP_FAILFAST:
5841 if (nvpair_value_uint64(elem, &intval) != 0) {
5842 error = EINVAL;
5843 break;
5845 vd->vdev_failfast = intval & 1;
5846 break;
5847 case VDEV_PROP_CHECKSUM_N:
5848 if (nvpair_value_uint64(elem, &intval) != 0) {
5849 error = EINVAL;
5850 break;
5852 vd->vdev_checksum_n = intval;
5853 break;
5854 case VDEV_PROP_CHECKSUM_T:
5855 if (nvpair_value_uint64(elem, &intval) != 0) {
5856 error = EINVAL;
5857 break;
5859 vd->vdev_checksum_t = intval;
5860 break;
5861 case VDEV_PROP_IO_N:
5862 if (nvpair_value_uint64(elem, &intval) != 0) {
5863 error = EINVAL;
5864 break;
5866 vd->vdev_io_n = intval;
5867 break;
5868 case VDEV_PROP_IO_T:
5869 if (nvpair_value_uint64(elem, &intval) != 0) {
5870 error = EINVAL;
5871 break;
5873 vd->vdev_io_t = intval;
5874 break;
5875 default:
5876 /* Most processing is done in vdev_props_set_sync */
5877 break;
5879 end:
5880 if (error != 0) {
5881 intval = error;
5882 vdev_prop_add_list(outnvl, propname, strval, intval, 0);
5883 return (error);
5887 return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
5888 innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
5892 vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
5894 spa_t *spa = vd->vdev_spa;
5895 objset_t *mos = spa->spa_meta_objset;
5896 int err = 0;
5897 uint64_t objid;
5898 uint64_t vdev_guid;
5899 nvpair_t *elem = NULL;
5900 nvlist_t *nvprops = NULL;
5901 uint64_t intval = 0;
5902 char *strval = NULL;
5903 const char *propname = NULL;
5904 vdev_prop_t prop;
5906 ASSERT(vd != NULL);
5907 ASSERT(mos != NULL);
5909 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
5910 &vdev_guid) != 0)
5911 return (SET_ERROR(EINVAL));
5913 nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
5915 if (vd->vdev_root_zap != 0) {
5916 objid = vd->vdev_root_zap;
5917 } else if (vd->vdev_top_zap != 0) {
5918 objid = vd->vdev_top_zap;
5919 } else if (vd->vdev_leaf_zap != 0) {
5920 objid = vd->vdev_leaf_zap;
5921 } else {
5922 return (SET_ERROR(EINVAL));
5924 ASSERT(objid != 0);
5926 mutex_enter(&spa->spa_props_lock);
5928 if (nvprops != NULL) {
5929 char namebuf[64] = { 0 };
5931 while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
5932 intval = 0;
5933 strval = NULL;
5934 propname = nvpair_name(elem);
5935 prop = vdev_name_to_prop(propname);
5936 zprop_source_t src = ZPROP_SRC_DEFAULT;
5937 uint64_t integer_size, num_integers;
5939 switch (prop) {
5940 /* Special Read-only Properties */
5941 case VDEV_PROP_NAME:
5942 strval = vdev_name(vd, namebuf,
5943 sizeof (namebuf));
5944 if (strval == NULL)
5945 continue;
5946 vdev_prop_add_list(outnvl, propname, strval, 0,
5947 ZPROP_SRC_NONE);
5948 continue;
5949 case VDEV_PROP_CAPACITY:
5950 /* percent used */
5951 intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
5952 (vd->vdev_stat.vs_alloc * 100 /
5953 vd->vdev_stat.vs_dspace);
5954 vdev_prop_add_list(outnvl, propname, NULL,
5955 intval, ZPROP_SRC_NONE);
5956 continue;
5957 case VDEV_PROP_STATE:
5958 vdev_prop_add_list(outnvl, propname, NULL,
5959 vd->vdev_state, ZPROP_SRC_NONE);
5960 continue;
5961 case VDEV_PROP_GUID:
5962 vdev_prop_add_list(outnvl, propname, NULL,
5963 vd->vdev_guid, ZPROP_SRC_NONE);
5964 continue;
5965 case VDEV_PROP_ASIZE:
5966 vdev_prop_add_list(outnvl, propname, NULL,
5967 vd->vdev_asize, ZPROP_SRC_NONE);
5968 continue;
5969 case VDEV_PROP_PSIZE:
5970 vdev_prop_add_list(outnvl, propname, NULL,
5971 vd->vdev_psize, ZPROP_SRC_NONE);
5972 continue;
5973 case VDEV_PROP_ASHIFT:
5974 vdev_prop_add_list(outnvl, propname, NULL,
5975 vd->vdev_ashift, ZPROP_SRC_NONE);
5976 continue;
5977 case VDEV_PROP_SIZE:
5978 vdev_prop_add_list(outnvl, propname, NULL,
5979 vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
5980 continue;
5981 case VDEV_PROP_FREE:
5982 vdev_prop_add_list(outnvl, propname, NULL,
5983 vd->vdev_stat.vs_dspace -
5984 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
5985 continue;
5986 case VDEV_PROP_ALLOCATED:
5987 vdev_prop_add_list(outnvl, propname, NULL,
5988 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
5989 continue;
5990 case VDEV_PROP_EXPANDSZ:
5991 vdev_prop_add_list(outnvl, propname, NULL,
5992 vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
5993 continue;
5994 case VDEV_PROP_FRAGMENTATION:
5995 vdev_prop_add_list(outnvl, propname, NULL,
5996 vd->vdev_stat.vs_fragmentation,
5997 ZPROP_SRC_NONE);
5998 continue;
5999 case VDEV_PROP_PARITY:
6000 vdev_prop_add_list(outnvl, propname, NULL,
6001 vdev_get_nparity(vd), ZPROP_SRC_NONE);
6002 continue;
6003 case VDEV_PROP_PATH:
6004 if (vd->vdev_path == NULL)
6005 continue;
6006 vdev_prop_add_list(outnvl, propname,
6007 vd->vdev_path, 0, ZPROP_SRC_NONE);
6008 continue;
6009 case VDEV_PROP_DEVID:
6010 if (vd->vdev_devid == NULL)
6011 continue;
6012 vdev_prop_add_list(outnvl, propname,
6013 vd->vdev_devid, 0, ZPROP_SRC_NONE);
6014 continue;
6015 case VDEV_PROP_PHYS_PATH:
6016 if (vd->vdev_physpath == NULL)
6017 continue;
6018 vdev_prop_add_list(outnvl, propname,
6019 vd->vdev_physpath, 0, ZPROP_SRC_NONE);
6020 continue;
6021 case VDEV_PROP_ENC_PATH:
6022 if (vd->vdev_enc_sysfs_path == NULL)
6023 continue;
6024 vdev_prop_add_list(outnvl, propname,
6025 vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
6026 continue;
6027 case VDEV_PROP_FRU:
6028 if (vd->vdev_fru == NULL)
6029 continue;
6030 vdev_prop_add_list(outnvl, propname,
6031 vd->vdev_fru, 0, ZPROP_SRC_NONE);
6032 continue;
6033 case VDEV_PROP_PARENT:
6034 if (vd->vdev_parent != NULL) {
6035 strval = vdev_name(vd->vdev_parent,
6036 namebuf, sizeof (namebuf));
6037 vdev_prop_add_list(outnvl, propname,
6038 strval, 0, ZPROP_SRC_NONE);
6040 continue;
6041 case VDEV_PROP_CHILDREN:
6042 if (vd->vdev_children > 0)
6043 strval = kmem_zalloc(ZAP_MAXVALUELEN,
6044 KM_SLEEP);
6045 for (uint64_t i = 0; i < vd->vdev_children;
6046 i++) {
6047 const char *vname;
6049 vname = vdev_name(vd->vdev_child[i],
6050 namebuf, sizeof (namebuf));
6051 if (vname == NULL)
6052 vname = "(unknown)";
6053 if (strlen(strval) > 0)
6054 strlcat(strval, ",",
6055 ZAP_MAXVALUELEN);
6056 strlcat(strval, vname, ZAP_MAXVALUELEN);
6058 if (strval != NULL) {
6059 vdev_prop_add_list(outnvl, propname,
6060 strval, 0, ZPROP_SRC_NONE);
6061 kmem_free(strval, ZAP_MAXVALUELEN);
6063 continue;
6064 case VDEV_PROP_NUMCHILDREN:
6065 vdev_prop_add_list(outnvl, propname, NULL,
6066 vd->vdev_children, ZPROP_SRC_NONE);
6067 continue;
6068 case VDEV_PROP_READ_ERRORS:
6069 vdev_prop_add_list(outnvl, propname, NULL,
6070 vd->vdev_stat.vs_read_errors,
6071 ZPROP_SRC_NONE);
6072 continue;
6073 case VDEV_PROP_WRITE_ERRORS:
6074 vdev_prop_add_list(outnvl, propname, NULL,
6075 vd->vdev_stat.vs_write_errors,
6076 ZPROP_SRC_NONE);
6077 continue;
6078 case VDEV_PROP_CHECKSUM_ERRORS:
6079 vdev_prop_add_list(outnvl, propname, NULL,
6080 vd->vdev_stat.vs_checksum_errors,
6081 ZPROP_SRC_NONE);
6082 continue;
6083 case VDEV_PROP_INITIALIZE_ERRORS:
6084 vdev_prop_add_list(outnvl, propname, NULL,
6085 vd->vdev_stat.vs_initialize_errors,
6086 ZPROP_SRC_NONE);
6087 continue;
6088 case VDEV_PROP_OPS_NULL:
6089 vdev_prop_add_list(outnvl, propname, NULL,
6090 vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
6091 ZPROP_SRC_NONE);
6092 continue;
6093 case VDEV_PROP_OPS_READ:
6094 vdev_prop_add_list(outnvl, propname, NULL,
6095 vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
6096 ZPROP_SRC_NONE);
6097 continue;
6098 case VDEV_PROP_OPS_WRITE:
6099 vdev_prop_add_list(outnvl, propname, NULL,
6100 vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
6101 ZPROP_SRC_NONE);
6102 continue;
6103 case VDEV_PROP_OPS_FREE:
6104 vdev_prop_add_list(outnvl, propname, NULL,
6105 vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
6106 ZPROP_SRC_NONE);
6107 continue;
6108 case VDEV_PROP_OPS_CLAIM:
6109 vdev_prop_add_list(outnvl, propname, NULL,
6110 vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
6111 ZPROP_SRC_NONE);
6112 continue;
6113 case VDEV_PROP_OPS_TRIM:
6115 * TRIM ops and bytes are reported to user
6116 * space as ZIO_TYPE_IOCTL. This is done to
6117 * preserve the vdev_stat_t structure layout
6118 * for user space.
6120 vdev_prop_add_list(outnvl, propname, NULL,
6121 vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL],
6122 ZPROP_SRC_NONE);
6123 continue;
6124 case VDEV_PROP_BYTES_NULL:
6125 vdev_prop_add_list(outnvl, propname, NULL,
6126 vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
6127 ZPROP_SRC_NONE);
6128 continue;
6129 case VDEV_PROP_BYTES_READ:
6130 vdev_prop_add_list(outnvl, propname, NULL,
6131 vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
6132 ZPROP_SRC_NONE);
6133 continue;
6134 case VDEV_PROP_BYTES_WRITE:
6135 vdev_prop_add_list(outnvl, propname, NULL,
6136 vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
6137 ZPROP_SRC_NONE);
6138 continue;
6139 case VDEV_PROP_BYTES_FREE:
6140 vdev_prop_add_list(outnvl, propname, NULL,
6141 vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
6142 ZPROP_SRC_NONE);
6143 continue;
6144 case VDEV_PROP_BYTES_CLAIM:
6145 vdev_prop_add_list(outnvl, propname, NULL,
6146 vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
6147 ZPROP_SRC_NONE);
6148 continue;
6149 case VDEV_PROP_BYTES_TRIM:
6151 * TRIM ops and bytes are reported to user
6152 * space as ZIO_TYPE_IOCTL. This is done to
6153 * preserve the vdev_stat_t structure layout
6154 * for user space.
6156 vdev_prop_add_list(outnvl, propname, NULL,
6157 vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL],
6158 ZPROP_SRC_NONE);
6159 continue;
6160 case VDEV_PROP_REMOVING:
6161 vdev_prop_add_list(outnvl, propname, NULL,
6162 vd->vdev_removing, ZPROP_SRC_NONE);
6163 continue;
6164 /* Numeric Properites */
6165 case VDEV_PROP_ALLOCATING:
6166 /* Leaf vdevs cannot have this property */
6167 if (vd->vdev_mg == NULL &&
6168 vd->vdev_top != NULL) {
6169 src = ZPROP_SRC_NONE;
6170 intval = ZPROP_BOOLEAN_NA;
6171 } else {
6172 err = vdev_prop_get_int(vd, prop,
6173 &intval);
6174 if (err && err != ENOENT)
6175 break;
6177 if (intval ==
6178 vdev_prop_default_numeric(prop))
6179 src = ZPROP_SRC_DEFAULT;
6180 else
6181 src = ZPROP_SRC_LOCAL;
6184 vdev_prop_add_list(outnvl, propname, NULL,
6185 intval, src);
6186 break;
6187 case VDEV_PROP_FAILFAST:
6188 src = ZPROP_SRC_LOCAL;
6189 strval = NULL;
6191 err = zap_lookup(mos, objid, nvpair_name(elem),
6192 sizeof (uint64_t), 1, &intval);
6193 if (err == ENOENT) {
6194 intval = vdev_prop_default_numeric(
6195 prop);
6196 err = 0;
6197 } else if (err) {
6198 break;
6200 if (intval == vdev_prop_default_numeric(prop))
6201 src = ZPROP_SRC_DEFAULT;
6203 vdev_prop_add_list(outnvl, propname, strval,
6204 intval, src);
6205 break;
6206 case VDEV_PROP_CHECKSUM_N:
6207 case VDEV_PROP_CHECKSUM_T:
6208 case VDEV_PROP_IO_N:
6209 case VDEV_PROP_IO_T:
6210 err = vdev_prop_get_int(vd, prop, &intval);
6211 if (err && err != ENOENT)
6212 break;
6214 if (intval == vdev_prop_default_numeric(prop))
6215 src = ZPROP_SRC_DEFAULT;
6216 else
6217 src = ZPROP_SRC_LOCAL;
6219 vdev_prop_add_list(outnvl, propname, NULL,
6220 intval, src);
6221 break;
6222 /* Text Properties */
6223 case VDEV_PROP_COMMENT:
6224 /* Exists in the ZAP below */
6225 /* FALLTHRU */
6226 case VDEV_PROP_USERPROP:
6227 /* User Properites */
6228 src = ZPROP_SRC_LOCAL;
6230 err = zap_length(mos, objid, nvpair_name(elem),
6231 &integer_size, &num_integers);
6232 if (err)
6233 break;
6235 switch (integer_size) {
6236 case 8:
6237 /* User properties cannot be integers */
6238 err = EINVAL;
6239 break;
6240 case 1:
6241 /* string property */
6242 strval = kmem_alloc(num_integers,
6243 KM_SLEEP);
6244 err = zap_lookup(mos, objid,
6245 nvpair_name(elem), 1,
6246 num_integers, strval);
6247 if (err) {
6248 kmem_free(strval,
6249 num_integers);
6250 break;
6252 vdev_prop_add_list(outnvl, propname,
6253 strval, 0, src);
6254 kmem_free(strval, num_integers);
6255 break;
6257 break;
6258 default:
6259 err = ENOENT;
6260 break;
6262 if (err)
6263 break;
6265 } else {
6267 * Get all properties from the MOS vdev property object.
6269 zap_cursor_t zc;
6270 zap_attribute_t za;
6271 for (zap_cursor_init(&zc, mos, objid);
6272 (err = zap_cursor_retrieve(&zc, &za)) == 0;
6273 zap_cursor_advance(&zc)) {
6274 intval = 0;
6275 strval = NULL;
6276 zprop_source_t src = ZPROP_SRC_DEFAULT;
6277 propname = za.za_name;
6279 switch (za.za_integer_length) {
6280 case 8:
6281 /* We do not allow integer user properties */
6282 /* This is likely an internal value */
6283 break;
6284 case 1:
6285 /* string property */
6286 strval = kmem_alloc(za.za_num_integers,
6287 KM_SLEEP);
6288 err = zap_lookup(mos, objid, za.za_name, 1,
6289 za.za_num_integers, strval);
6290 if (err) {
6291 kmem_free(strval, za.za_num_integers);
6292 break;
6294 vdev_prop_add_list(outnvl, propname, strval, 0,
6295 src);
6296 kmem_free(strval, za.za_num_integers);
6297 break;
6299 default:
6300 break;
6303 zap_cursor_fini(&zc);
6306 mutex_exit(&spa->spa_props_lock);
6307 if (err && err != ENOENT) {
6308 return (err);
6311 return (0);
6314 EXPORT_SYMBOL(vdev_fault);
6315 EXPORT_SYMBOL(vdev_degrade);
6316 EXPORT_SYMBOL(vdev_online);
6317 EXPORT_SYMBOL(vdev_offline);
6318 EXPORT_SYMBOL(vdev_clear);
6320 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
6321 "Target number of metaslabs per top-level vdev");
6323 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
6324 "Default lower limit for metaslab size");
6326 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
6327 "Default upper limit for metaslab size");
6329 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
6330 "Minimum number of metaslabs per top-level vdev");
6332 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
6333 "Practical upper limit of total metaslabs per top-level vdev");
6335 ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
6336 "Rate limit slow IO (delay) events to this many per second");
6338 /* BEGIN CSTYLED */
6339 ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
6340 "Rate limit checksum events to this many checksum errors per second "
6341 "(do not set below ZED threshold).");
6342 /* END CSTYLED */
6344 ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
6345 "Ignore errors during resilver/scrub");
6347 ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
6348 "Bypass vdev_validate()");
6350 ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
6351 "Disable cache flushes");
6353 ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
6354 "Minimum number of metaslabs required to dedicate one for log blocks");
6356 /* BEGIN CSTYLED */
6357 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
6358 param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
6359 "Minimum ashift used when creating new top-level vdevs");
6361 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
6362 param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
6363 "Maximum ashift used when optimizing for logical -> physical sector "
6364 "size on new top-level vdevs");
6365 /* END CSTYLED */