4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2017 Datto Inc.
28 * Copyright (c) 2017, Intel Corporation.
29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
30 * Copyright (c) 2023, 2024, Klara Inc.
33 #include <sys/zfs_context.h>
34 #include <sys/zfs_chksum.h>
35 #include <sys/spa_impl.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/zio_compress.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/vdev_initialize.h>
45 #include <sys/vdev_trim.h>
46 #include <sys/vdev_file.h>
47 #include <sys/vdev_raidz.h>
48 #include <sys/metaslab.h>
49 #include <sys/uberblock_impl.h>
52 #include <sys/unique.h>
53 #include <sys/dsl_pool.h>
54 #include <sys/dsl_dir.h>
55 #include <sys/dsl_prop.h>
56 #include <sys/fm/util.h>
57 #include <sys/dsl_scan.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/metaslab_impl.h>
63 #include <sys/kstat.h>
65 #include <sys/btree.h>
66 #include <sys/zfeature.h>
68 #include <sys/zstd/zstd.h>
73 * There are three basic locks for managing spa_t structures:
75 * spa_namespace_lock (global mutex)
77 * This lock must be acquired to do any of the following:
79 * - Lookup a spa_t by name
80 * - Add or remove a spa_t from the namespace
81 * - Increase spa_refcount from non-zero
82 * - Check if spa_refcount is zero
84 * - add/remove/attach/detach devices
85 * - Held for the duration of create/destroy
86 * - Held at the start and end of import and export
88 * It does not need to handle recursion. A create or destroy may
89 * reference objects (files or zvols) in other pools, but by
90 * definition they must have an existing reference, and will never need
91 * to lookup a spa_t by name.
93 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
95 * This reference count keep track of any active users of the spa_t. The
96 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
97 * the refcount is never really 'zero' - opening a pool implicitly keeps
98 * some references in the DMU. Internally we check against spa_minref, but
99 * present the image of a zero/non-zero value to consumers.
101 * spa_config_lock[] (per-spa array of rwlocks)
103 * This protects the spa_t from config changes, and must be held in
104 * the following circumstances:
106 * - RW_READER to perform I/O to the spa
107 * - RW_WRITER to change the vdev config
109 * The locking order is fairly straightforward:
111 * spa_namespace_lock -> spa_refcount
113 * The namespace lock must be acquired to increase the refcount from 0
114 * or to check if it is zero.
116 * spa_refcount -> spa_config_lock[]
118 * There must be at least one valid reference on the spa_t to acquire
121 * spa_namespace_lock -> spa_config_lock[]
123 * The namespace lock must always be taken before the config lock.
126 * The spa_namespace_lock can be acquired directly and is globally visible.
128 * The namespace is manipulated using the following functions, all of which
129 * require the spa_namespace_lock to be held.
131 * spa_lookup() Lookup a spa_t by name.
133 * spa_add() Create a new spa_t in the namespace.
135 * spa_remove() Remove a spa_t from the namespace. This also
136 * frees up any memory associated with the spa_t.
138 * spa_next() Returns the next spa_t in the system, or the
139 * first if NULL is passed.
141 * spa_evict_all() Shutdown and remove all spa_t structures in
144 * spa_guid_exists() Determine whether a pool/device guid exists.
146 * The spa_refcount is manipulated using the following functions:
148 * spa_open_ref() Adds a reference to the given spa_t. Must be
149 * called with spa_namespace_lock held if the
150 * refcount is currently zero.
152 * spa_close() Remove a reference from the spa_t. This will
153 * not free the spa_t or remove it from the
154 * namespace. No locking is required.
156 * spa_refcount_zero() Returns true if the refcount is currently
157 * zero. Must be called with spa_namespace_lock
160 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
161 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
162 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
164 * To read the configuration, it suffices to hold one of these locks as reader.
165 * To modify the configuration, you must hold all locks as writer. To modify
166 * vdev state without altering the vdev tree's topology (e.g. online/offline),
167 * you must hold SCL_STATE and SCL_ZIO as writer.
169 * We use these distinct config locks to avoid recursive lock entry.
170 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
171 * block allocations (SCL_ALLOC), which may require reading space maps
172 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
174 * The spa config locks cannot be normal rwlocks because we need the
175 * ability to hand off ownership. For example, SCL_ZIO is acquired
176 * by the issuing thread and later released by an interrupt thread.
177 * They do, however, obey the usual write-wanted semantics to prevent
178 * writer (i.e. system administrator) starvation.
180 * The lock acquisition rules are as follows:
183 * Protects changes to the vdev tree topology, such as vdev
184 * add/remove/attach/detach. Protects the dirty config list
185 * (spa_config_dirty_list) and the set of spares and l2arc devices.
188 * Protects changes to pool state and vdev state, such as vdev
189 * online/offline/fault/degrade/clear. Protects the dirty state list
190 * (spa_state_dirty_list) and global pool state (spa_state).
193 * Protects changes to metaslab groups and classes.
194 * Held as reader by metaslab_alloc() and metaslab_claim().
197 * Held by bp-level zios (those which have no io_vd upon entry)
198 * to prevent changes to the vdev tree. The bp-level zio implicitly
199 * protects all of its vdev child zios, which do not hold SCL_ZIO.
202 * Protects changes to metaslab groups and classes.
203 * Held as reader by metaslab_free(). SCL_FREE is distinct from
204 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
205 * blocks in zio_done() while another i/o that holds either
206 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
209 * Held as reader to prevent changes to the vdev tree during trivial
210 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
211 * other locks, and lower than all of them, to ensure that it's safe
212 * to acquire regardless of caller context.
214 * In addition, the following rules apply:
216 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
217 * The lock ordering is SCL_CONFIG > spa_props_lock.
219 * (b) I/O operations on leaf vdevs. For any zio operation that takes
220 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
221 * or zio_write_phys() -- the caller must ensure that the config cannot
222 * cannot change in the interim, and that the vdev cannot be reopened.
223 * SCL_STATE as reader suffices for both.
225 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
227 * spa_vdev_enter() Acquire the namespace lock and the config lock
230 * spa_vdev_exit() Release the config lock, wait for all I/O
231 * to complete, sync the updated configs to the
232 * cache, and release the namespace lock.
234 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
235 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
236 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
239 avl_tree_t spa_namespace_avl
;
240 kmutex_t spa_namespace_lock
;
241 kcondvar_t spa_namespace_cv
;
242 static const int spa_max_replication_override
= SPA_DVAS_PER_BP
;
244 static kmutex_t spa_spare_lock
;
245 static avl_tree_t spa_spare_avl
;
246 static kmutex_t spa_l2cache_lock
;
247 static avl_tree_t spa_l2cache_avl
;
249 spa_mode_t spa_mode_global
= SPA_MODE_UNINIT
;
253 * Everything except dprintf, set_error, spa, and indirect_remap is on
254 * by default in debug builds.
256 int zfs_flags
= ~(ZFS_DEBUG_DPRINTF
| ZFS_DEBUG_SET_ERROR
|
257 ZFS_DEBUG_INDIRECT_REMAP
);
263 * zfs_recover can be set to nonzero to attempt to recover from
264 * otherwise-fatal errors, typically caused by on-disk corruption. When
265 * set, calls to zfs_panic_recover() will turn into warning messages.
266 * This should only be used as a last resort, as it typically results
267 * in leaked space, or worse.
269 int zfs_recover
= B_FALSE
;
272 * If destroy encounters an EIO while reading metadata (e.g. indirect
273 * blocks), space referenced by the missing metadata can not be freed.
274 * Normally this causes the background destroy to become "stalled", as
275 * it is unable to make forward progress. While in this stalled state,
276 * all remaining space to free from the error-encountering filesystem is
277 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
278 * permanently leak the space from indirect blocks that can not be read,
279 * and continue to free everything else that it can.
281 * The default, "stalling" behavior is useful if the storage partially
282 * fails (i.e. some but not all i/os fail), and then later recovers. In
283 * this case, we will be able to continue pool operations while it is
284 * partially failed, and when it recovers, we can continue to free the
285 * space, with no leaks. However, note that this case is actually
288 * Typically pools either (a) fail completely (but perhaps temporarily,
289 * e.g. a top-level vdev going offline), or (b) have localized,
290 * permanent errors (e.g. disk returns the wrong data due to bit flip or
291 * firmware bug). In case (a), this setting does not matter because the
292 * pool will be suspended and the sync thread will not be able to make
293 * forward progress regardless. In case (b), because the error is
294 * permanent, the best we can do is leak the minimum amount of space,
295 * which is what setting this flag will do. Therefore, it is reasonable
296 * for this flag to normally be set, but we chose the more conservative
297 * approach of not setting it, so that there is no possibility of
298 * leaking space in the "partial temporary" failure case.
300 int zfs_free_leak_on_eio
= B_FALSE
;
303 * Expiration time in milliseconds. This value has two meanings. First it is
304 * used to determine when the spa_deadman() logic should fire. By default the
305 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
306 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
307 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
308 * in one of three behaviors controlled by zfs_deadman_failmode.
310 uint64_t zfs_deadman_synctime_ms
= 600000UL; /* 10 min. */
313 * This value controls the maximum amount of time zio_wait() will block for an
314 * outstanding IO. By default this is 300 seconds at which point the "hung"
315 * behavior will be applied as described for zfs_deadman_synctime_ms.
317 uint64_t zfs_deadman_ziotime_ms
= 300000UL; /* 5 min. */
320 * Check time in milliseconds. This defines the frequency at which we check
323 uint64_t zfs_deadman_checktime_ms
= 60000UL; /* 1 min. */
326 * By default the deadman is enabled.
328 int zfs_deadman_enabled
= B_TRUE
;
331 * Controls the behavior of the deadman when it detects a "hung" I/O.
332 * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
334 * wait - Wait for the "hung" I/O (default)
335 * continue - Attempt to recover from a "hung" I/O
336 * panic - Panic the system
338 const char *zfs_deadman_failmode
= "wait";
341 * The worst case is single-sector max-parity RAID-Z blocks, in which
342 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
343 * times the size; so just assume that. Add to this the fact that
344 * we can have up to 3 DVAs per bp, and one more factor of 2 because
345 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
347 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
349 uint_t spa_asize_inflation
= 24;
352 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
353 * the pool to be consumed (bounded by spa_max_slop). This ensures that we
354 * don't run the pool completely out of space, due to unaccounted changes (e.g.
355 * to the MOS). It also limits the worst-case time to allocate space. If we
356 * have less than this amount of free space, most ZPL operations (e.g. write,
357 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
358 * also part of this 3.2% of space which can't be consumed by normal writes;
359 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
362 * Certain operations (e.g. file removal, most administrative actions) can
363 * use half the slop space. They will only return ENOSPC if less than half
364 * the slop space is free. Typically, once the pool has less than the slop
365 * space free, the user will use these operations to free up space in the pool.
366 * These are the operations that call dsl_pool_adjustedsize() with the netfree
367 * argument set to TRUE.
369 * Operations that are almost guaranteed to free up space in the absence of
370 * a pool checkpoint can use up to three quarters of the slop space
373 * A very restricted set of operations are always permitted, regardless of
374 * the amount of free space. These are the operations that call
375 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
376 * increase in the amount of space used, it is possible to run the pool
377 * completely out of space, causing it to be permanently read-only.
379 * Note that on very small pools, the slop space will be larger than
380 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
381 * but we never allow it to be more than half the pool size.
383 * Further, on very large pools, the slop space will be smaller than
384 * 3.2%, to avoid reserving much more space than we actually need; bounded
385 * by spa_max_slop (128GB).
387 * See also the comments in zfs_space_check_t.
389 uint_t spa_slop_shift
= 5;
390 static const uint64_t spa_min_slop
= 128ULL * 1024 * 1024;
391 static const uint64_t spa_max_slop
= 128ULL * 1024 * 1024 * 1024;
394 * Number of allocators to use, per spa instance
396 static int spa_num_allocators
= 4;
397 static int spa_cpus_per_allocator
= 4;
400 * Spa active allocator.
401 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>.
403 const char *zfs_active_allocator
= "dynamic";
406 spa_load_failed(spa_t
*spa
, const char *fmt
, ...)
412 (void) vsnprintf(buf
, sizeof (buf
), fmt
, adx
);
415 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa
->spa_name
,
416 spa
->spa_trust_config
? "trusted" : "untrusted", buf
);
420 spa_load_note(spa_t
*spa
, const char *fmt
, ...)
426 (void) vsnprintf(buf
, sizeof (buf
), fmt
, adx
);
429 zfs_dbgmsg("spa_load(%s, config %s): %s", spa
->spa_name
,
430 spa
->spa_trust_config
? "trusted" : "untrusted", buf
);
432 spa_import_progress_set_notes_nolog(spa
, "%s", buf
);
436 * By default dedup and user data indirects land in the special class
438 static int zfs_ddt_data_is_special
= B_TRUE
;
439 static int zfs_user_indirect_is_special
= B_TRUE
;
442 * The percentage of special class final space reserved for metadata only.
443 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
444 * let metadata into the class.
446 static uint_t zfs_special_class_metadata_reserve_pct
= 25;
449 * ==========================================================================
451 * ==========================================================================
454 spa_config_lock_init(spa_t
*spa
)
456 for (int i
= 0; i
< SCL_LOCKS
; i
++) {
457 spa_config_lock_t
*scl
= &spa
->spa_config_lock
[i
];
458 mutex_init(&scl
->scl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
459 cv_init(&scl
->scl_cv
, NULL
, CV_DEFAULT
, NULL
);
460 scl
->scl_writer
= NULL
;
461 scl
->scl_write_wanted
= 0;
467 spa_config_lock_destroy(spa_t
*spa
)
469 for (int i
= 0; i
< SCL_LOCKS
; i
++) {
470 spa_config_lock_t
*scl
= &spa
->spa_config_lock
[i
];
471 mutex_destroy(&scl
->scl_lock
);
472 cv_destroy(&scl
->scl_cv
);
473 ASSERT(scl
->scl_writer
== NULL
);
474 ASSERT(scl
->scl_write_wanted
== 0);
475 ASSERT(scl
->scl_count
== 0);
480 spa_config_tryenter(spa_t
*spa
, int locks
, const void *tag
, krw_t rw
)
482 for (int i
= 0; i
< SCL_LOCKS
; i
++) {
483 spa_config_lock_t
*scl
= &spa
->spa_config_lock
[i
];
484 if (!(locks
& (1 << i
)))
486 mutex_enter(&scl
->scl_lock
);
487 if (rw
== RW_READER
) {
488 if (scl
->scl_writer
|| scl
->scl_write_wanted
) {
489 mutex_exit(&scl
->scl_lock
);
490 spa_config_exit(spa
, locks
& ((1 << i
) - 1),
495 ASSERT(scl
->scl_writer
!= curthread
);
496 if (scl
->scl_count
!= 0) {
497 mutex_exit(&scl
->scl_lock
);
498 spa_config_exit(spa
, locks
& ((1 << i
) - 1),
502 scl
->scl_writer
= curthread
;
505 mutex_exit(&scl
->scl_lock
);
511 spa_config_enter_impl(spa_t
*spa
, int locks
, const void *tag
, krw_t rw
,
517 ASSERT3U(SCL_LOCKS
, <, sizeof (wlocks_held
) * NBBY
);
519 for (int i
= 0; i
< SCL_LOCKS
; i
++) {
520 spa_config_lock_t
*scl
= &spa
->spa_config_lock
[i
];
521 if (scl
->scl_writer
== curthread
)
522 wlocks_held
|= (1 << i
);
523 if (!(locks
& (1 << i
)))
525 mutex_enter(&scl
->scl_lock
);
526 if (rw
== RW_READER
) {
527 while (scl
->scl_writer
||
528 (!mmp_flag
&& scl
->scl_write_wanted
)) {
529 cv_wait(&scl
->scl_cv
, &scl
->scl_lock
);
532 ASSERT(scl
->scl_writer
!= curthread
);
533 while (scl
->scl_count
!= 0) {
534 scl
->scl_write_wanted
++;
535 cv_wait(&scl
->scl_cv
, &scl
->scl_lock
);
536 scl
->scl_write_wanted
--;
538 scl
->scl_writer
= curthread
;
541 mutex_exit(&scl
->scl_lock
);
543 ASSERT3U(wlocks_held
, <=, locks
);
547 spa_config_enter(spa_t
*spa
, int locks
, const void *tag
, krw_t rw
)
549 spa_config_enter_impl(spa
, locks
, tag
, rw
, 0);
553 * The spa_config_enter_mmp() allows the mmp thread to cut in front of
554 * outstanding write lock requests. This is needed since the mmp updates are
555 * time sensitive and failure to service them promptly will result in a
556 * suspended pool. This pool suspension has been seen in practice when there is
557 * a single disk in a pool that is responding slowly and presumably about to
562 spa_config_enter_mmp(spa_t
*spa
, int locks
, const void *tag
, krw_t rw
)
564 spa_config_enter_impl(spa
, locks
, tag
, rw
, 1);
568 spa_config_exit(spa_t
*spa
, int locks
, const void *tag
)
571 for (int i
= SCL_LOCKS
- 1; i
>= 0; i
--) {
572 spa_config_lock_t
*scl
= &spa
->spa_config_lock
[i
];
573 if (!(locks
& (1 << i
)))
575 mutex_enter(&scl
->scl_lock
);
576 ASSERT(scl
->scl_count
> 0);
577 if (--scl
->scl_count
== 0) {
578 ASSERT(scl
->scl_writer
== NULL
||
579 scl
->scl_writer
== curthread
);
580 scl
->scl_writer
= NULL
; /* OK in either case */
581 cv_broadcast(&scl
->scl_cv
);
583 mutex_exit(&scl
->scl_lock
);
588 spa_config_held(spa_t
*spa
, int locks
, krw_t rw
)
592 for (int i
= 0; i
< SCL_LOCKS
; i
++) {
593 spa_config_lock_t
*scl
= &spa
->spa_config_lock
[i
];
594 if (!(locks
& (1 << i
)))
596 if ((rw
== RW_READER
&& scl
->scl_count
!= 0) ||
597 (rw
== RW_WRITER
&& scl
->scl_writer
== curthread
))
598 locks_held
|= 1 << i
;
605 * ==========================================================================
606 * SPA namespace functions
607 * ==========================================================================
611 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
612 * Returns NULL if no matching spa_t is found.
615 spa_lookup(const char *name
)
617 static spa_t search
; /* spa_t is large; don't allocate on stack */
622 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
625 (void) strlcpy(search
.spa_name
, name
, sizeof (search
.spa_name
));
628 * If it's a full dataset name, figure out the pool name and
631 cp
= strpbrk(search
.spa_name
, "/@#");
635 spa
= avl_find(&spa_namespace_avl
, &search
, &where
);
640 * Avoid racing with import/export, which don't hold the namespace
641 * lock for their entire duration.
643 if ((spa
->spa_load_thread
!= NULL
&&
644 spa
->spa_load_thread
!= curthread
) ||
645 (spa
->spa_export_thread
!= NULL
&&
646 spa
->spa_export_thread
!= curthread
)) {
647 cv_wait(&spa_namespace_cv
, &spa_namespace_lock
);
655 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
656 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
657 * looking for potentially hung I/Os.
660 spa_deadman(void *arg
)
664 /* Disable the deadman if the pool is suspended. */
665 if (spa_suspended(spa
))
668 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
669 (gethrtime() - spa
->spa_sync_starttime
) / NANOSEC
,
670 (u_longlong_t
)++spa
->spa_deadman_calls
);
671 if (zfs_deadman_enabled
)
672 vdev_deadman(spa
->spa_root_vdev
, FTAG
);
674 spa
->spa_deadman_tqid
= taskq_dispatch_delay(system_delay_taskq
,
675 spa_deadman
, spa
, TQ_SLEEP
, ddi_get_lbolt() +
676 MSEC_TO_TICK(zfs_deadman_checktime_ms
));
680 spa_log_sm_sort_by_txg(const void *va
, const void *vb
)
682 const spa_log_sm_t
*a
= va
;
683 const spa_log_sm_t
*b
= vb
;
685 return (TREE_CMP(a
->sls_txg
, b
->sls_txg
));
689 * Create an uninitialized spa_t with the given name. Requires
690 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
691 * exist by calling spa_lookup() first.
694 spa_add(const char *name
, nvlist_t
*config
, const char *altroot
)
697 spa_config_dirent_t
*dp
;
699 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
701 spa
= kmem_zalloc(sizeof (spa_t
), KM_SLEEP
);
703 mutex_init(&spa
->spa_async_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
704 mutex_init(&spa
->spa_errlist_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
705 mutex_init(&spa
->spa_errlog_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
706 mutex_init(&spa
->spa_evicting_os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
707 mutex_init(&spa
->spa_history_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
708 mutex_init(&spa
->spa_proc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
709 mutex_init(&spa
->spa_props_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
710 mutex_init(&spa
->spa_cksum_tmpls_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
711 mutex_init(&spa
->spa_scrub_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
712 mutex_init(&spa
->spa_suspend_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
713 mutex_init(&spa
->spa_vdev_top_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
714 mutex_init(&spa
->spa_feat_stats_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
715 mutex_init(&spa
->spa_flushed_ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
716 mutex_init(&spa
->spa_activities_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
718 cv_init(&spa
->spa_async_cv
, NULL
, CV_DEFAULT
, NULL
);
719 cv_init(&spa
->spa_evicting_os_cv
, NULL
, CV_DEFAULT
, NULL
);
720 cv_init(&spa
->spa_proc_cv
, NULL
, CV_DEFAULT
, NULL
);
721 cv_init(&spa
->spa_scrub_io_cv
, NULL
, CV_DEFAULT
, NULL
);
722 cv_init(&spa
->spa_suspend_cv
, NULL
, CV_DEFAULT
, NULL
);
723 cv_init(&spa
->spa_activities_cv
, NULL
, CV_DEFAULT
, NULL
);
724 cv_init(&spa
->spa_waiters_cv
, NULL
, CV_DEFAULT
, NULL
);
726 for (int t
= 0; t
< TXG_SIZE
; t
++)
727 bplist_create(&spa
->spa_free_bplist
[t
]);
729 (void) strlcpy(spa
->spa_name
, name
, sizeof (spa
->spa_name
));
730 spa
->spa_state
= POOL_STATE_UNINITIALIZED
;
731 spa
->spa_freeze_txg
= UINT64_MAX
;
732 spa
->spa_final_txg
= UINT64_MAX
;
733 spa
->spa_load_max_txg
= UINT64_MAX
;
735 spa
->spa_proc_state
= SPA_PROC_NONE
;
736 spa
->spa_trust_config
= B_TRUE
;
737 spa
->spa_hostid
= zone_get_hostid(NULL
);
739 spa
->spa_deadman_synctime
= MSEC2NSEC(zfs_deadman_synctime_ms
);
740 spa
->spa_deadman_ziotime
= MSEC2NSEC(zfs_deadman_ziotime_ms
);
741 spa_set_deadman_failmode(spa
, zfs_deadman_failmode
);
742 spa_set_allocator(spa
, zfs_active_allocator
);
744 zfs_refcount_create(&spa
->spa_refcount
);
745 spa_config_lock_init(spa
);
748 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
749 avl_add(&spa_namespace_avl
, spa
);
752 * Set the alternate root, if there is one.
755 spa
->spa_root
= spa_strdup(altroot
);
757 /* Do not allow more allocators than fraction of CPUs. */
758 spa
->spa_alloc_count
= MAX(MIN(spa_num_allocators
,
759 boot_ncpus
/ MAX(spa_cpus_per_allocator
, 1)), 1);
761 spa
->spa_allocs
= kmem_zalloc(spa
->spa_alloc_count
*
762 sizeof (spa_alloc_t
), KM_SLEEP
);
763 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++) {
764 mutex_init(&spa
->spa_allocs
[i
].spaa_lock
, NULL
, MUTEX_DEFAULT
,
766 avl_create(&spa
->spa_allocs
[i
].spaa_tree
, zio_bookmark_compare
,
767 sizeof (zio_t
), offsetof(zio_t
, io_queue_node
.a
));
769 if (spa
->spa_alloc_count
> 1) {
770 spa
->spa_allocs_use
= kmem_zalloc(offsetof(spa_allocs_use_t
,
771 sau_inuse
[spa
->spa_alloc_count
]), KM_SLEEP
);
772 mutex_init(&spa
->spa_allocs_use
->sau_lock
, NULL
, MUTEX_DEFAULT
,
776 avl_create(&spa
->spa_metaslabs_by_flushed
, metaslab_sort_by_flushed
,
777 sizeof (metaslab_t
), offsetof(metaslab_t
, ms_spa_txg_node
));
778 avl_create(&spa
->spa_sm_logs_by_txg
, spa_log_sm_sort_by_txg
,
779 sizeof (spa_log_sm_t
), offsetof(spa_log_sm_t
, sls_node
));
780 list_create(&spa
->spa_log_summary
, sizeof (log_summary_entry_t
),
781 offsetof(log_summary_entry_t
, lse_node
));
784 * Every pool starts with the default cachefile
786 list_create(&spa
->spa_config_list
, sizeof (spa_config_dirent_t
),
787 offsetof(spa_config_dirent_t
, scd_link
));
789 dp
= kmem_zalloc(sizeof (spa_config_dirent_t
), KM_SLEEP
);
790 dp
->scd_path
= altroot
? NULL
: spa_strdup(spa_config_path
);
791 list_insert_head(&spa
->spa_config_list
, dp
);
793 VERIFY(nvlist_alloc(&spa
->spa_load_info
, NV_UNIQUE_NAME
,
796 if (config
!= NULL
) {
799 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_FEATURES_FOR_READ
,
801 VERIFY(nvlist_dup(features
, &spa
->spa_label_features
,
805 VERIFY(nvlist_dup(config
, &spa
->spa_config
, 0) == 0);
808 if (spa
->spa_label_features
== NULL
) {
809 VERIFY(nvlist_alloc(&spa
->spa_label_features
, NV_UNIQUE_NAME
,
813 spa
->spa_min_ashift
= INT_MAX
;
814 spa
->spa_max_ashift
= 0;
815 spa
->spa_min_alloc
= INT_MAX
;
816 spa
->spa_gcd_alloc
= INT_MAX
;
818 /* Reset cached value */
819 spa
->spa_dedup_dspace
= ~0ULL;
822 * As a pool is being created, treat all features as disabled by
823 * setting SPA_FEATURE_DISABLED for all entries in the feature
826 for (int i
= 0; i
< SPA_FEATURES
; i
++) {
827 spa
->spa_feat_refcount_cache
[i
] = SPA_FEATURE_DISABLED
;
830 list_create(&spa
->spa_leaf_list
, sizeof (vdev_t
),
831 offsetof(vdev_t
, vdev_leaf_node
));
837 * Removes a spa_t from the namespace, freeing up any memory used. Requires
838 * spa_namespace_lock. This is called only after the spa_t has been closed and
842 spa_remove(spa_t
*spa
)
844 spa_config_dirent_t
*dp
;
846 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
847 ASSERT(spa_state(spa
) == POOL_STATE_UNINITIALIZED
);
848 ASSERT3U(zfs_refcount_count(&spa
->spa_refcount
), ==, 0);
849 ASSERT0(spa
->spa_waiters
);
851 nvlist_free(spa
->spa_config_splitting
);
853 avl_remove(&spa_namespace_avl
, spa
);
856 spa_strfree(spa
->spa_root
);
858 while ((dp
= list_remove_head(&spa
->spa_config_list
)) != NULL
) {
859 if (dp
->scd_path
!= NULL
)
860 spa_strfree(dp
->scd_path
);
861 kmem_free(dp
, sizeof (spa_config_dirent_t
));
864 for (int i
= 0; i
< spa
->spa_alloc_count
; i
++) {
865 avl_destroy(&spa
->spa_allocs
[i
].spaa_tree
);
866 mutex_destroy(&spa
->spa_allocs
[i
].spaa_lock
);
868 kmem_free(spa
->spa_allocs
, spa
->spa_alloc_count
*
869 sizeof (spa_alloc_t
));
870 if (spa
->spa_alloc_count
> 1) {
871 mutex_destroy(&spa
->spa_allocs_use
->sau_lock
);
872 kmem_free(spa
->spa_allocs_use
, offsetof(spa_allocs_use_t
,
873 sau_inuse
[spa
->spa_alloc_count
]));
876 avl_destroy(&spa
->spa_metaslabs_by_flushed
);
877 avl_destroy(&spa
->spa_sm_logs_by_txg
);
878 list_destroy(&spa
->spa_log_summary
);
879 list_destroy(&spa
->spa_config_list
);
880 list_destroy(&spa
->spa_leaf_list
);
882 nvlist_free(spa
->spa_label_features
);
883 nvlist_free(spa
->spa_load_info
);
884 nvlist_free(spa
->spa_feat_stats
);
885 spa_config_set(spa
, NULL
);
887 zfs_refcount_destroy(&spa
->spa_refcount
);
889 spa_stats_destroy(spa
);
890 spa_config_lock_destroy(spa
);
892 for (int t
= 0; t
< TXG_SIZE
; t
++)
893 bplist_destroy(&spa
->spa_free_bplist
[t
]);
895 zio_checksum_templates_free(spa
);
897 cv_destroy(&spa
->spa_async_cv
);
898 cv_destroy(&spa
->spa_evicting_os_cv
);
899 cv_destroy(&spa
->spa_proc_cv
);
900 cv_destroy(&spa
->spa_scrub_io_cv
);
901 cv_destroy(&spa
->spa_suspend_cv
);
902 cv_destroy(&spa
->spa_activities_cv
);
903 cv_destroy(&spa
->spa_waiters_cv
);
905 mutex_destroy(&spa
->spa_flushed_ms_lock
);
906 mutex_destroy(&spa
->spa_async_lock
);
907 mutex_destroy(&spa
->spa_errlist_lock
);
908 mutex_destroy(&spa
->spa_errlog_lock
);
909 mutex_destroy(&spa
->spa_evicting_os_lock
);
910 mutex_destroy(&spa
->spa_history_lock
);
911 mutex_destroy(&spa
->spa_proc_lock
);
912 mutex_destroy(&spa
->spa_props_lock
);
913 mutex_destroy(&spa
->spa_cksum_tmpls_lock
);
914 mutex_destroy(&spa
->spa_scrub_lock
);
915 mutex_destroy(&spa
->spa_suspend_lock
);
916 mutex_destroy(&spa
->spa_vdev_top_lock
);
917 mutex_destroy(&spa
->spa_feat_stats_lock
);
918 mutex_destroy(&spa
->spa_activities_lock
);
920 kmem_free(spa
, sizeof (spa_t
));
924 * Given a pool, return the next pool in the namespace, or NULL if there is
925 * none. If 'prev' is NULL, return the first pool.
928 spa_next(spa_t
*prev
)
930 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
933 return (AVL_NEXT(&spa_namespace_avl
, prev
));
935 return (avl_first(&spa_namespace_avl
));
939 * ==========================================================================
940 * SPA refcount functions
941 * ==========================================================================
945 * Add a reference to the given spa_t. Must have at least one reference, or
946 * have the namespace lock held.
949 spa_open_ref(spa_t
*spa
, const void *tag
)
951 ASSERT(zfs_refcount_count(&spa
->spa_refcount
) >= spa
->spa_minref
||
952 MUTEX_HELD(&spa_namespace_lock
) ||
953 spa
->spa_load_thread
== curthread
);
954 (void) zfs_refcount_add(&spa
->spa_refcount
, tag
);
958 * Remove a reference to the given spa_t. Must have at least one reference, or
959 * have the namespace lock held or be part of a pool import/export.
962 spa_close(spa_t
*spa
, const void *tag
)
964 ASSERT(zfs_refcount_count(&spa
->spa_refcount
) > spa
->spa_minref
||
965 MUTEX_HELD(&spa_namespace_lock
) ||
966 spa
->spa_load_thread
== curthread
||
967 spa
->spa_export_thread
== curthread
);
968 (void) zfs_refcount_remove(&spa
->spa_refcount
, tag
);
972 * Remove a reference to the given spa_t held by a dsl dir that is
973 * being asynchronously released. Async releases occur from a taskq
974 * performing eviction of dsl datasets and dirs. The namespace lock
975 * isn't held and the hold by the object being evicted may contribute to
976 * spa_minref (e.g. dataset or directory released during pool export),
977 * so the asserts in spa_close() do not apply.
980 spa_async_close(spa_t
*spa
, const void *tag
)
982 (void) zfs_refcount_remove(&spa
->spa_refcount
, tag
);
986 * Check to see if the spa refcount is zero. Must be called with
987 * spa_namespace_lock held or be the spa export thread. We really
988 * compare against spa_minref, which is the number of references
989 * acquired when opening a pool
992 spa_refcount_zero(spa_t
*spa
)
994 ASSERT(MUTEX_HELD(&spa_namespace_lock
) ||
995 spa
->spa_export_thread
== curthread
);
997 return (zfs_refcount_count(&spa
->spa_refcount
) == spa
->spa_minref
);
1001 * ==========================================================================
1002 * SPA spare and l2cache tracking
1003 * ==========================================================================
1007 * Hot spares and cache devices are tracked using the same code below,
1008 * for 'auxiliary' devices.
1011 typedef struct spa_aux
{
1019 spa_aux_compare(const void *a
, const void *b
)
1021 const spa_aux_t
*sa
= (const spa_aux_t
*)a
;
1022 const spa_aux_t
*sb
= (const spa_aux_t
*)b
;
1024 return (TREE_CMP(sa
->aux_guid
, sb
->aux_guid
));
1028 spa_aux_add(vdev_t
*vd
, avl_tree_t
*avl
)
1034 search
.aux_guid
= vd
->vdev_guid
;
1035 if ((aux
= avl_find(avl
, &search
, &where
)) != NULL
) {
1038 aux
= kmem_zalloc(sizeof (spa_aux_t
), KM_SLEEP
);
1039 aux
->aux_guid
= vd
->vdev_guid
;
1041 avl_insert(avl
, aux
, where
);
1046 spa_aux_remove(vdev_t
*vd
, avl_tree_t
*avl
)
1052 search
.aux_guid
= vd
->vdev_guid
;
1053 aux
= avl_find(avl
, &search
, &where
);
1055 ASSERT(aux
!= NULL
);
1057 if (--aux
->aux_count
== 0) {
1058 avl_remove(avl
, aux
);
1059 kmem_free(aux
, sizeof (spa_aux_t
));
1060 } else if (aux
->aux_pool
== spa_guid(vd
->vdev_spa
)) {
1061 aux
->aux_pool
= 0ULL;
1066 spa_aux_exists(uint64_t guid
, uint64_t *pool
, int *refcnt
, avl_tree_t
*avl
)
1068 spa_aux_t search
, *found
;
1070 search
.aux_guid
= guid
;
1071 found
= avl_find(avl
, &search
, NULL
);
1075 *pool
= found
->aux_pool
;
1082 *refcnt
= found
->aux_count
;
1087 return (found
!= NULL
);
1091 spa_aux_activate(vdev_t
*vd
, avl_tree_t
*avl
)
1093 spa_aux_t search
, *found
;
1096 search
.aux_guid
= vd
->vdev_guid
;
1097 found
= avl_find(avl
, &search
, &where
);
1098 ASSERT(found
!= NULL
);
1099 ASSERT(found
->aux_pool
== 0ULL);
1101 found
->aux_pool
= spa_guid(vd
->vdev_spa
);
1105 * Spares are tracked globally due to the following constraints:
1107 * - A spare may be part of multiple pools.
1108 * - A spare may be added to a pool even if it's actively in use within
1110 * - A spare in use in any pool can only be the source of a replacement if
1111 * the target is a spare in the same pool.
1113 * We keep track of all spares on the system through the use of a reference
1114 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
1115 * spare, then we bump the reference count in the AVL tree. In addition, we set
1116 * the 'vdev_isspare' member to indicate that the device is a spare (active or
1117 * inactive). When a spare is made active (used to replace a device in the
1118 * pool), we also keep track of which pool its been made a part of.
1120 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
1121 * called under the spa_namespace lock as part of vdev reconfiguration. The
1122 * separate spare lock exists for the status query path, which does not need to
1123 * be completely consistent with respect to other vdev configuration changes.
1127 spa_spare_compare(const void *a
, const void *b
)
1129 return (spa_aux_compare(a
, b
));
1133 spa_spare_add(vdev_t
*vd
)
1135 mutex_enter(&spa_spare_lock
);
1136 ASSERT(!vd
->vdev_isspare
);
1137 spa_aux_add(vd
, &spa_spare_avl
);
1138 vd
->vdev_isspare
= B_TRUE
;
1139 mutex_exit(&spa_spare_lock
);
1143 spa_spare_remove(vdev_t
*vd
)
1145 mutex_enter(&spa_spare_lock
);
1146 ASSERT(vd
->vdev_isspare
);
1147 spa_aux_remove(vd
, &spa_spare_avl
);
1148 vd
->vdev_isspare
= B_FALSE
;
1149 mutex_exit(&spa_spare_lock
);
1153 spa_spare_exists(uint64_t guid
, uint64_t *pool
, int *refcnt
)
1157 mutex_enter(&spa_spare_lock
);
1158 found
= spa_aux_exists(guid
, pool
, refcnt
, &spa_spare_avl
);
1159 mutex_exit(&spa_spare_lock
);
1165 spa_spare_activate(vdev_t
*vd
)
1167 mutex_enter(&spa_spare_lock
);
1168 ASSERT(vd
->vdev_isspare
);
1169 spa_aux_activate(vd
, &spa_spare_avl
);
1170 mutex_exit(&spa_spare_lock
);
1174 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1175 * Cache devices currently only support one pool per cache device, and so
1176 * for these devices the aux reference count is currently unused beyond 1.
1180 spa_l2cache_compare(const void *a
, const void *b
)
1182 return (spa_aux_compare(a
, b
));
1186 spa_l2cache_add(vdev_t
*vd
)
1188 mutex_enter(&spa_l2cache_lock
);
1189 ASSERT(!vd
->vdev_isl2cache
);
1190 spa_aux_add(vd
, &spa_l2cache_avl
);
1191 vd
->vdev_isl2cache
= B_TRUE
;
1192 mutex_exit(&spa_l2cache_lock
);
1196 spa_l2cache_remove(vdev_t
*vd
)
1198 mutex_enter(&spa_l2cache_lock
);
1199 ASSERT(vd
->vdev_isl2cache
);
1200 spa_aux_remove(vd
, &spa_l2cache_avl
);
1201 vd
->vdev_isl2cache
= B_FALSE
;
1202 mutex_exit(&spa_l2cache_lock
);
1206 spa_l2cache_exists(uint64_t guid
, uint64_t *pool
)
1210 mutex_enter(&spa_l2cache_lock
);
1211 found
= spa_aux_exists(guid
, pool
, NULL
, &spa_l2cache_avl
);
1212 mutex_exit(&spa_l2cache_lock
);
1218 spa_l2cache_activate(vdev_t
*vd
)
1220 mutex_enter(&spa_l2cache_lock
);
1221 ASSERT(vd
->vdev_isl2cache
);
1222 spa_aux_activate(vd
, &spa_l2cache_avl
);
1223 mutex_exit(&spa_l2cache_lock
);
1227 * ==========================================================================
1229 * ==========================================================================
1233 * Lock the given spa_t for the purpose of adding or removing a vdev.
1234 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1235 * It returns the next transaction group for the spa_t.
1238 spa_vdev_enter(spa_t
*spa
)
1240 mutex_enter(&spa
->spa_vdev_top_lock
);
1241 mutex_enter(&spa_namespace_lock
);
1243 ASSERT0(spa
->spa_export_thread
);
1245 vdev_autotrim_stop_all(spa
);
1247 return (spa_vdev_config_enter(spa
));
1251 * The same as spa_vdev_enter() above but additionally takes the guid of
1252 * the vdev being detached. When there is a rebuild in process it will be
1253 * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
1254 * The rebuild is canceled if only a single child remains after the detach.
1257 spa_vdev_detach_enter(spa_t
*spa
, uint64_t guid
)
1259 mutex_enter(&spa
->spa_vdev_top_lock
);
1260 mutex_enter(&spa_namespace_lock
);
1262 ASSERT0(spa
->spa_export_thread
);
1264 vdev_autotrim_stop_all(spa
);
1267 vdev_t
*vd
= spa_lookup_by_guid(spa
, guid
, B_FALSE
);
1269 vdev_rebuild_stop_wait(vd
->vdev_top
);
1273 return (spa_vdev_config_enter(spa
));
1277 * Internal implementation for spa_vdev_enter(). Used when a vdev
1278 * operation requires multiple syncs (i.e. removing a device) while
1279 * keeping the spa_namespace_lock held.
1282 spa_vdev_config_enter(spa_t
*spa
)
1284 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1286 spa_config_enter(spa
, SCL_ALL
, spa
, RW_WRITER
);
1288 return (spa_last_synced_txg(spa
) + 1);
1292 * Used in combination with spa_vdev_config_enter() to allow the syncing
1293 * of multiple transactions without releasing the spa_namespace_lock.
1296 spa_vdev_config_exit(spa_t
*spa
, vdev_t
*vd
, uint64_t txg
, int error
,
1299 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1301 int config_changed
= B_FALSE
;
1303 ASSERT(txg
> spa_last_synced_txg(spa
));
1305 spa
->spa_pending_vdev
= NULL
;
1308 * Reassess the DTLs.
1310 vdev_dtl_reassess(spa
->spa_root_vdev
, 0, 0, B_FALSE
, B_FALSE
);
1312 if (error
== 0 && !list_is_empty(&spa
->spa_config_dirty_list
)) {
1313 config_changed
= B_TRUE
;
1314 spa
->spa_config_generation
++;
1318 * Verify the metaslab classes.
1320 ASSERT(metaslab_class_validate(spa_normal_class(spa
)) == 0);
1321 ASSERT(metaslab_class_validate(spa_log_class(spa
)) == 0);
1322 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa
)) == 0);
1323 ASSERT(metaslab_class_validate(spa_special_class(spa
)) == 0);
1324 ASSERT(metaslab_class_validate(spa_dedup_class(spa
)) == 0);
1326 spa_config_exit(spa
, SCL_ALL
, spa
);
1329 * Panic the system if the specified tag requires it. This
1330 * is useful for ensuring that configurations are updated
1333 if (zio_injection_enabled
)
1334 zio_handle_panic_injection(spa
, tag
, 0);
1337 * Note: this txg_wait_synced() is important because it ensures
1338 * that there won't be more than one config change per txg.
1339 * This allows us to use the txg as the generation number.
1342 txg_wait_synced(spa
->spa_dsl_pool
, txg
);
1345 ASSERT(!vd
->vdev_detached
|| vd
->vdev_dtl_sm
== NULL
);
1346 if (vd
->vdev_ops
->vdev_op_leaf
) {
1347 mutex_enter(&vd
->vdev_initialize_lock
);
1348 vdev_initialize_stop(vd
, VDEV_INITIALIZE_CANCELED
,
1350 mutex_exit(&vd
->vdev_initialize_lock
);
1352 mutex_enter(&vd
->vdev_trim_lock
);
1353 vdev_trim_stop(vd
, VDEV_TRIM_CANCELED
, NULL
);
1354 mutex_exit(&vd
->vdev_trim_lock
);
1358 * The vdev may be both a leaf and top-level device.
1360 vdev_autotrim_stop_wait(vd
);
1362 spa_config_enter(spa
, SCL_STATE_ALL
, spa
, RW_WRITER
);
1364 spa_config_exit(spa
, SCL_STATE_ALL
, spa
);
1368 * If the config changed, update the config cache.
1371 spa_write_cachefile(spa
, B_FALSE
, B_TRUE
, B_TRUE
);
1375 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1376 * locking of spa_vdev_enter(), we also want make sure the transactions have
1377 * synced to disk, and then update the global configuration cache with the new
1381 spa_vdev_exit(spa_t
*spa
, vdev_t
*vd
, uint64_t txg
, int error
)
1383 vdev_autotrim_restart(spa
);
1384 vdev_rebuild_restart(spa
);
1386 spa_vdev_config_exit(spa
, vd
, txg
, error
, FTAG
);
1387 mutex_exit(&spa_namespace_lock
);
1388 mutex_exit(&spa
->spa_vdev_top_lock
);
1394 * Lock the given spa_t for the purpose of changing vdev state.
1397 spa_vdev_state_enter(spa_t
*spa
, int oplocks
)
1399 int locks
= SCL_STATE_ALL
| oplocks
;
1402 * Root pools may need to read of the underlying devfs filesystem
1403 * when opening up a vdev. Unfortunately if we're holding the
1404 * SCL_ZIO lock it will result in a deadlock when we try to issue
1405 * the read from the root filesystem. Instead we "prefetch"
1406 * the associated vnodes that we need prior to opening the
1407 * underlying devices and cache them so that we can prevent
1408 * any I/O when we are doing the actual open.
1410 if (spa_is_root(spa
)) {
1411 int low
= locks
& ~(SCL_ZIO
- 1);
1412 int high
= locks
& ~low
;
1414 spa_config_enter(spa
, high
, spa
, RW_WRITER
);
1415 vdev_hold(spa
->spa_root_vdev
);
1416 spa_config_enter(spa
, low
, spa
, RW_WRITER
);
1418 spa_config_enter(spa
, locks
, spa
, RW_WRITER
);
1420 spa
->spa_vdev_locks
= locks
;
1424 spa_vdev_state_exit(spa_t
*spa
, vdev_t
*vd
, int error
)
1426 boolean_t config_changed
= B_FALSE
;
1429 if (vd
== NULL
|| vd
== spa
->spa_root_vdev
) {
1430 vdev_top
= spa
->spa_root_vdev
;
1432 vdev_top
= vd
->vdev_top
;
1435 if (vd
!= NULL
|| error
== 0)
1436 vdev_dtl_reassess(vdev_top
, 0, 0, B_FALSE
, B_FALSE
);
1439 if (vd
!= spa
->spa_root_vdev
)
1440 vdev_state_dirty(vdev_top
);
1442 config_changed
= B_TRUE
;
1443 spa
->spa_config_generation
++;
1446 if (spa_is_root(spa
))
1447 vdev_rele(spa
->spa_root_vdev
);
1449 ASSERT3U(spa
->spa_vdev_locks
, >=, SCL_STATE_ALL
);
1450 spa_config_exit(spa
, spa
->spa_vdev_locks
, spa
);
1453 * If anything changed, wait for it to sync. This ensures that,
1454 * from the system administrator's perspective, zpool(8) commands
1455 * are synchronous. This is important for things like zpool offline:
1456 * when the command completes, you expect no further I/O from ZFS.
1459 txg_wait_synced(spa
->spa_dsl_pool
, 0);
1462 * If the config changed, update the config cache.
1464 if (config_changed
) {
1465 mutex_enter(&spa_namespace_lock
);
1466 spa_write_cachefile(spa
, B_FALSE
, B_TRUE
, B_FALSE
);
1467 mutex_exit(&spa_namespace_lock
);
1474 * ==========================================================================
1475 * Miscellaneous functions
1476 * ==========================================================================
1480 spa_activate_mos_feature(spa_t
*spa
, const char *feature
, dmu_tx_t
*tx
)
1482 if (!nvlist_exists(spa
->spa_label_features
, feature
)) {
1483 fnvlist_add_boolean(spa
->spa_label_features
, feature
);
1485 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1486 * dirty the vdev config because lock SCL_CONFIG is not held.
1487 * Thankfully, in this case we don't need to dirty the config
1488 * because it will be written out anyway when we finish
1489 * creating the pool.
1491 if (tx
->tx_txg
!= TXG_INITIAL
)
1492 vdev_config_dirty(spa
->spa_root_vdev
);
1497 spa_deactivate_mos_feature(spa_t
*spa
, const char *feature
)
1499 if (nvlist_remove_all(spa
->spa_label_features
, feature
) == 0)
1500 vdev_config_dirty(spa
->spa_root_vdev
);
1504 * Return the spa_t associated with given pool_guid, if it exists. If
1505 * device_guid is non-zero, determine whether the pool exists *and* contains
1506 * a device with the specified device_guid.
1509 spa_by_guid(uint64_t pool_guid
, uint64_t device_guid
)
1512 avl_tree_t
*t
= &spa_namespace_avl
;
1514 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1516 for (spa
= avl_first(t
); spa
!= NULL
; spa
= AVL_NEXT(t
, spa
)) {
1517 if (spa
->spa_state
== POOL_STATE_UNINITIALIZED
)
1519 if (spa
->spa_root_vdev
== NULL
)
1521 if (spa_guid(spa
) == pool_guid
) {
1522 if (device_guid
== 0)
1525 if (vdev_lookup_by_guid(spa
->spa_root_vdev
,
1526 device_guid
) != NULL
)
1530 * Check any devices we may be in the process of adding.
1532 if (spa
->spa_pending_vdev
) {
1533 if (vdev_lookup_by_guid(spa
->spa_pending_vdev
,
1534 device_guid
) != NULL
)
1544 * Determine whether a pool with the given pool_guid exists.
1547 spa_guid_exists(uint64_t pool_guid
, uint64_t device_guid
)
1549 return (spa_by_guid(pool_guid
, device_guid
) != NULL
);
1553 spa_strdup(const char *s
)
1559 new = kmem_alloc(len
+ 1, KM_SLEEP
);
1560 memcpy(new, s
, len
+ 1);
1566 spa_strfree(char *s
)
1568 kmem_free(s
, strlen(s
) + 1);
1572 spa_generate_guid(spa_t
*spa
)
1578 (void) random_get_pseudo_bytes((void *)&guid
,
1580 } while (guid
== 0 || spa_guid_exists(spa_guid(spa
), guid
));
1583 (void) random_get_pseudo_bytes((void *)&guid
,
1585 } while (guid
== 0 || spa_guid_exists(guid
, 0));
1592 spa_load_guid_exists(uint64_t guid
)
1594 avl_tree_t
*t
= &spa_namespace_avl
;
1596 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1598 for (spa_t
*spa
= avl_first(t
); spa
!= NULL
; spa
= AVL_NEXT(t
, spa
)) {
1599 if (spa_load_guid(spa
) == guid
)
1603 return (arc_async_flush_guid_inuse(guid
));
1607 spa_generate_load_guid(void)
1612 (void) random_get_pseudo_bytes((void *)&guid
,
1614 } while (guid
== 0 || spa_load_guid_exists(guid
));
1620 snprintf_blkptr(char *buf
, size_t buflen
, const blkptr_t
*bp
)
1623 const char *checksum
= NULL
;
1624 const char *compress
= NULL
;
1627 if (BP_GET_TYPE(bp
) & DMU_OT_NEWTYPE
) {
1628 dmu_object_byteswap_t bswap
=
1629 DMU_OT_BYTESWAP(BP_GET_TYPE(bp
));
1630 (void) snprintf(type
, sizeof (type
), "bswap %s %s",
1631 DMU_OT_IS_METADATA(BP_GET_TYPE(bp
)) ?
1632 "metadata" : "data",
1633 dmu_ot_byteswap
[bswap
].ob_name
);
1635 (void) strlcpy(type
, dmu_ot
[BP_GET_TYPE(bp
)].ot_name
,
1638 if (!BP_IS_EMBEDDED(bp
)) {
1640 zio_checksum_table
[BP_GET_CHECKSUM(bp
)].ci_name
;
1642 compress
= zio_compress_table
[BP_GET_COMPRESS(bp
)].ci_name
;
1645 SNPRINTF_BLKPTR(kmem_scnprintf
, ' ', buf
, buflen
, bp
, type
, checksum
,
1650 spa_freeze(spa_t
*spa
)
1652 uint64_t freeze_txg
= 0;
1654 spa_config_enter(spa
, SCL_ALL
, FTAG
, RW_WRITER
);
1655 if (spa
->spa_freeze_txg
== UINT64_MAX
) {
1656 freeze_txg
= spa_last_synced_txg(spa
) + TXG_SIZE
;
1657 spa
->spa_freeze_txg
= freeze_txg
;
1659 spa_config_exit(spa
, SCL_ALL
, FTAG
);
1660 if (freeze_txg
!= 0)
1661 txg_wait_synced(spa_get_dsl(spa
), freeze_txg
);
1665 zfs_panic_recover(const char *fmt
, ...)
1670 vcmn_err(zfs_recover
? CE_WARN
: CE_PANIC
, fmt
, adx
);
1675 * This is a stripped-down version of strtoull, suitable only for converting
1676 * lowercase hexadecimal numbers that don't overflow.
1679 zfs_strtonum(const char *str
, char **nptr
)
1685 while ((c
= *str
) != '\0') {
1686 if (c
>= '0' && c
<= '9')
1688 else if (c
>= 'a' && c
<= 'f')
1689 digit
= 10 + c
- 'a';
1700 *nptr
= (char *)str
;
1706 spa_activate_allocation_classes(spa_t
*spa
, dmu_tx_t
*tx
)
1709 * We bump the feature refcount for each special vdev added to the pool
1711 ASSERT(spa_feature_is_enabled(spa
, SPA_FEATURE_ALLOCATION_CLASSES
));
1712 spa_feature_incr(spa
, SPA_FEATURE_ALLOCATION_CLASSES
, tx
);
1716 * ==========================================================================
1717 * Accessor functions
1718 * ==========================================================================
1722 spa_shutting_down(spa_t
*spa
)
1724 return (spa
->spa_async_suspended
);
1728 spa_get_dsl(spa_t
*spa
)
1730 return (spa
->spa_dsl_pool
);
1734 spa_is_initializing(spa_t
*spa
)
1736 return (spa
->spa_is_initializing
);
1740 spa_indirect_vdevs_loaded(spa_t
*spa
)
1742 return (spa
->spa_indirect_vdevs_loaded
);
1746 spa_get_rootblkptr(spa_t
*spa
)
1748 return (&spa
->spa_ubsync
.ub_rootbp
);
1752 spa_set_rootblkptr(spa_t
*spa
, const blkptr_t
*bp
)
1754 spa
->spa_uberblock
.ub_rootbp
= *bp
;
1758 spa_altroot(spa_t
*spa
, char *buf
, size_t buflen
)
1760 if (spa
->spa_root
== NULL
)
1763 (void) strlcpy(buf
, spa
->spa_root
, buflen
);
1767 spa_sync_pass(spa_t
*spa
)
1769 return (spa
->spa_sync_pass
);
1773 spa_name(spa_t
*spa
)
1775 return (spa
->spa_name
);
1779 spa_guid(spa_t
*spa
)
1781 dsl_pool_t
*dp
= spa_get_dsl(spa
);
1785 * If we fail to parse the config during spa_load(), we can go through
1786 * the error path (which posts an ereport) and end up here with no root
1787 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1790 if (spa
->spa_root_vdev
== NULL
)
1791 return (spa
->spa_config_guid
);
1793 guid
= spa
->spa_last_synced_guid
!= 0 ?
1794 spa
->spa_last_synced_guid
: spa
->spa_root_vdev
->vdev_guid
;
1797 * Return the most recently synced out guid unless we're
1798 * in syncing context.
1800 if (dp
&& dsl_pool_sync_context(dp
))
1801 return (spa
->spa_root_vdev
->vdev_guid
);
1807 spa_load_guid(spa_t
*spa
)
1810 * This is a GUID that exists solely as a reference for the
1811 * purposes of the arc. It is generated at load time, and
1812 * is never written to persistent storage.
1814 return (spa
->spa_load_guid
);
1818 spa_last_synced_txg(spa_t
*spa
)
1820 return (spa
->spa_ubsync
.ub_txg
);
1824 spa_first_txg(spa_t
*spa
)
1826 return (spa
->spa_first_txg
);
1830 spa_syncing_txg(spa_t
*spa
)
1832 return (spa
->spa_syncing_txg
);
1836 * Return the last txg where data can be dirtied. The final txgs
1837 * will be used to just clear out any deferred frees that remain.
1840 spa_final_dirty_txg(spa_t
*spa
)
1842 return (spa
->spa_final_txg
- TXG_DEFER_SIZE
);
1846 spa_state(spa_t
*spa
)
1848 return (spa
->spa_state
);
1852 spa_load_state(spa_t
*spa
)
1854 return (spa
->spa_load_state
);
1858 spa_freeze_txg(spa_t
*spa
)
1860 return (spa
->spa_freeze_txg
);
1864 * Return the inflated asize for a logical write in bytes. This is used by the
1865 * DMU to calculate the space a logical write will require on disk.
1866 * If lsize is smaller than the largest physical block size allocatable on this
1867 * pool we use its value instead, since the write will end up using the whole
1871 spa_get_worst_case_asize(spa_t
*spa
, uint64_t lsize
)
1874 return (0); /* No inflation needed */
1875 return (MAX(lsize
, 1 << spa
->spa_max_ashift
) * spa_asize_inflation
);
1879 * Return the amount of slop space in bytes. It is typically 1/32 of the pool
1880 * (3.2%), minus the embedded log space. On very small pools, it may be
1881 * slightly larger than this. On very large pools, it will be capped to
1882 * the value of spa_max_slop. The embedded log space is not included in
1883 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a
1884 * constant 97% of the total space, regardless of metaslab size (assuming the
1885 * default spa_slop_shift=5 and a non-tiny pool).
1887 * See the comment above spa_slop_shift for more details.
1890 spa_get_slop_space(spa_t
*spa
)
1896 * Make sure spa_dedup_dspace has been set.
1898 if (spa
->spa_dedup_dspace
== ~0ULL)
1899 spa_update_dspace(spa
);
1901 space
= spa
->spa_rdspace
;
1902 slop
= MIN(space
>> spa_slop_shift
, spa_max_slop
);
1905 * Subtract the embedded log space, but no more than half the (3.2%)
1906 * unusable space. Note, the "no more than half" is only relevant if
1907 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
1910 uint64_t embedded_log
=
1911 metaslab_class_get_dspace(spa_embedded_log_class(spa
));
1912 slop
-= MIN(embedded_log
, slop
>> 1);
1915 * Slop space should be at least spa_min_slop, but no more than half
1918 slop
= MAX(slop
, MIN(space
>> 1, spa_min_slop
));
1923 spa_get_dspace(spa_t
*spa
)
1925 return (spa
->spa_dspace
);
1929 spa_get_checkpoint_space(spa_t
*spa
)
1931 return (spa
->spa_checkpoint_info
.sci_dspace
);
1935 spa_update_dspace(spa_t
*spa
)
1937 spa
->spa_rdspace
= metaslab_class_get_dspace(spa_normal_class(spa
));
1938 if (spa
->spa_nonallocating_dspace
> 0) {
1940 * Subtract the space provided by all non-allocating vdevs that
1941 * contribute to dspace. If a file is overwritten, its old
1942 * blocks are freed and new blocks are allocated. If there are
1943 * no snapshots of the file, the available space should remain
1944 * the same. The old blocks could be freed from the
1945 * non-allocating vdev, but the new blocks must be allocated on
1946 * other (allocating) vdevs. By reserving the entire size of
1947 * the non-allocating vdevs (including allocated space), we
1948 * ensure that there will be enough space on the allocating
1949 * vdevs for this file overwrite to succeed.
1951 * Note that the DMU/DSL doesn't actually know or care
1952 * how much space is allocated (it does its own tracking
1953 * of how much space has been logically used). So it
1954 * doesn't matter that the data we are moving may be
1955 * allocated twice (on the old device and the new device).
1957 ASSERT3U(spa
->spa_rdspace
, >=, spa
->spa_nonallocating_dspace
);
1958 spa
->spa_rdspace
-= spa
->spa_nonallocating_dspace
;
1960 spa
->spa_dspace
= spa
->spa_rdspace
+ ddt_get_dedup_dspace(spa
) +
1961 brt_get_dspace(spa
);
1965 * Return the failure mode that has been set to this pool. The default
1966 * behavior will be to block all I/Os when a complete failure occurs.
1969 spa_get_failmode(spa_t
*spa
)
1971 return (spa
->spa_failmode
);
1975 spa_suspended(spa_t
*spa
)
1977 return (spa
->spa_suspended
!= ZIO_SUSPEND_NONE
);
1981 spa_version(spa_t
*spa
)
1983 return (spa
->spa_ubsync
.ub_version
);
1987 spa_deflate(spa_t
*spa
)
1989 return (spa
->spa_deflate
);
1993 spa_normal_class(spa_t
*spa
)
1995 return (spa
->spa_normal_class
);
1999 spa_log_class(spa_t
*spa
)
2001 return (spa
->spa_log_class
);
2005 spa_embedded_log_class(spa_t
*spa
)
2007 return (spa
->spa_embedded_log_class
);
2011 spa_special_class(spa_t
*spa
)
2013 return (spa
->spa_special_class
);
2017 spa_dedup_class(spa_t
*spa
)
2019 return (spa
->spa_dedup_class
);
2023 spa_special_has_ddt(spa_t
*spa
)
2025 return (zfs_ddt_data_is_special
&&
2026 spa
->spa_special_class
->mc_groups
!= 0);
2030 * Locate an appropriate allocation class
2033 spa_preferred_class(spa_t
*spa
, const zio_t
*zio
)
2035 const zio_prop_t
*zp
= &zio
->io_prop
;
2038 * Override object type for the purposes of selecting a storage class.
2039 * Primarily for DMU_OTN_ types where we can't explicitly control their
2040 * storage class; instead, choose a static type most closely matches
2043 dmu_object_type_t objtype
=
2044 zp
->zp_storage_type
== DMU_OT_NONE
?
2045 zp
->zp_type
: zp
->zp_storage_type
;
2048 * ZIL allocations determine their class in zio_alloc_zil().
2050 ASSERT(objtype
!= DMU_OT_INTENT_LOG
);
2052 boolean_t has_special_class
= spa
->spa_special_class
->mc_groups
!= 0;
2054 if (DMU_OT_IS_DDT(objtype
)) {
2055 if (spa
->spa_dedup_class
->mc_groups
!= 0)
2056 return (spa_dedup_class(spa
));
2057 else if (has_special_class
&& zfs_ddt_data_is_special
)
2058 return (spa_special_class(spa
));
2060 return (spa_normal_class(spa
));
2063 /* Indirect blocks for user data can land in special if allowed */
2064 if (zp
->zp_level
> 0 &&
2065 (DMU_OT_IS_FILE(objtype
) || objtype
== DMU_OT_ZVOL
)) {
2066 if (has_special_class
&& zfs_user_indirect_is_special
)
2067 return (spa_special_class(spa
));
2069 return (spa_normal_class(spa
));
2072 if (DMU_OT_IS_METADATA(objtype
) || zp
->zp_level
> 0) {
2073 if (has_special_class
)
2074 return (spa_special_class(spa
));
2076 return (spa_normal_class(spa
));
2080 * Allow small file blocks in special class in some cases (like
2081 * for the dRAID vdev feature). But always leave a reserve of
2082 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
2084 if (DMU_OT_IS_FILE(objtype
) &&
2085 has_special_class
&& zio
->io_size
<= zp
->zp_zpl_smallblk
) {
2086 metaslab_class_t
*special
= spa_special_class(spa
);
2087 uint64_t alloc
= metaslab_class_get_alloc(special
);
2088 uint64_t space
= metaslab_class_get_space(special
);
2090 (space
* (100 - zfs_special_class_metadata_reserve_pct
))
2097 return (spa_normal_class(spa
));
2101 spa_evicting_os_register(spa_t
*spa
, objset_t
*os
)
2103 mutex_enter(&spa
->spa_evicting_os_lock
);
2104 list_insert_head(&spa
->spa_evicting_os_list
, os
);
2105 mutex_exit(&spa
->spa_evicting_os_lock
);
2109 spa_evicting_os_deregister(spa_t
*spa
, objset_t
*os
)
2111 mutex_enter(&spa
->spa_evicting_os_lock
);
2112 list_remove(&spa
->spa_evicting_os_list
, os
);
2113 cv_broadcast(&spa
->spa_evicting_os_cv
);
2114 mutex_exit(&spa
->spa_evicting_os_lock
);
2118 spa_evicting_os_wait(spa_t
*spa
)
2120 mutex_enter(&spa
->spa_evicting_os_lock
);
2121 while (!list_is_empty(&spa
->spa_evicting_os_list
))
2122 cv_wait(&spa
->spa_evicting_os_cv
, &spa
->spa_evicting_os_lock
);
2123 mutex_exit(&spa
->spa_evicting_os_lock
);
2125 dmu_buf_user_evict_wait();
2129 spa_max_replication(spa_t
*spa
)
2132 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
2133 * handle BPs with more than one DVA allocated. Set our max
2134 * replication level accordingly.
2136 if (spa_version(spa
) < SPA_VERSION_DITTO_BLOCKS
)
2138 return (MIN(SPA_DVAS_PER_BP
, spa_max_replication_override
));
2142 spa_prev_software_version(spa_t
*spa
)
2144 return (spa
->spa_prev_software_version
);
2148 spa_deadman_synctime(spa_t
*spa
)
2150 return (spa
->spa_deadman_synctime
);
2154 spa_get_autotrim(spa_t
*spa
)
2156 return (spa
->spa_autotrim
);
2160 spa_deadman_ziotime(spa_t
*spa
)
2162 return (spa
->spa_deadman_ziotime
);
2166 spa_get_deadman_failmode(spa_t
*spa
)
2168 return (spa
->spa_deadman_failmode
);
2172 spa_set_deadman_failmode(spa_t
*spa
, const char *failmode
)
2174 if (strcmp(failmode
, "wait") == 0)
2175 spa
->spa_deadman_failmode
= ZIO_FAILURE_MODE_WAIT
;
2176 else if (strcmp(failmode
, "continue") == 0)
2177 spa
->spa_deadman_failmode
= ZIO_FAILURE_MODE_CONTINUE
;
2178 else if (strcmp(failmode
, "panic") == 0)
2179 spa
->spa_deadman_failmode
= ZIO_FAILURE_MODE_PANIC
;
2181 spa
->spa_deadman_failmode
= ZIO_FAILURE_MODE_WAIT
;
2185 spa_set_deadman_ziotime(hrtime_t ns
)
2189 if (spa_mode_global
!= SPA_MODE_UNINIT
) {
2190 mutex_enter(&spa_namespace_lock
);
2191 while ((spa
= spa_next(spa
)) != NULL
)
2192 spa
->spa_deadman_ziotime
= ns
;
2193 mutex_exit(&spa_namespace_lock
);
2198 spa_set_deadman_synctime(hrtime_t ns
)
2202 if (spa_mode_global
!= SPA_MODE_UNINIT
) {
2203 mutex_enter(&spa_namespace_lock
);
2204 while ((spa
= spa_next(spa
)) != NULL
)
2205 spa
->spa_deadman_synctime
= ns
;
2206 mutex_exit(&spa_namespace_lock
);
2211 dva_get_dsize_sync(spa_t
*spa
, const dva_t
*dva
)
2213 uint64_t asize
= DVA_GET_ASIZE(dva
);
2214 uint64_t dsize
= asize
;
2216 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_READER
) != 0);
2218 if (asize
!= 0 && spa
->spa_deflate
) {
2219 vdev_t
*vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
2221 dsize
= (asize
>> SPA_MINBLOCKSHIFT
) *
2222 vd
->vdev_deflate_ratio
;
2229 bp_get_dsize_sync(spa_t
*spa
, const blkptr_t
*bp
)
2233 for (int d
= 0; d
< BP_GET_NDVAS(bp
); d
++)
2234 dsize
+= dva_get_dsize_sync(spa
, &bp
->blk_dva
[d
]);
2240 bp_get_dsize(spa_t
*spa
, const blkptr_t
*bp
)
2244 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2246 for (int d
= 0; d
< BP_GET_NDVAS(bp
); d
++)
2247 dsize
+= dva_get_dsize_sync(spa
, &bp
->blk_dva
[d
]);
2249 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2255 spa_dirty_data(spa_t
*spa
)
2257 return (spa
->spa_dsl_pool
->dp_dirty_total
);
2261 * ==========================================================================
2262 * SPA Import Progress Routines
2263 * ==========================================================================
2266 typedef struct spa_import_progress
{
2267 uint64_t pool_guid
; /* unique id for updates */
2269 spa_load_state_t spa_load_state
;
2270 char *spa_load_notes
;
2271 uint64_t mmp_sec_remaining
; /* MMP activity check */
2272 uint64_t spa_load_max_txg
; /* rewind txg */
2273 procfs_list_node_t smh_node
;
2274 } spa_import_progress_t
;
2276 spa_history_list_t
*spa_import_progress_list
= NULL
;
2279 spa_import_progress_show_header(struct seq_file
*f
)
2281 seq_printf(f
, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid",
2282 "load_state", "multihost_secs", "max_txg",
2283 "pool_name", "notes");
2288 spa_import_progress_show(struct seq_file
*f
, void *data
)
2290 spa_import_progress_t
*sip
= (spa_import_progress_t
*)data
;
2292 seq_printf(f
, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n",
2293 (u_longlong_t
)sip
->pool_guid
, (u_longlong_t
)sip
->spa_load_state
,
2294 (u_longlong_t
)sip
->mmp_sec_remaining
,
2295 (u_longlong_t
)sip
->spa_load_max_txg
,
2296 (sip
->pool_name
? sip
->pool_name
: "-"),
2297 (sip
->spa_load_notes
? sip
->spa_load_notes
: "-"));
2302 /* Remove oldest elements from list until there are no more than 'size' left */
2304 spa_import_progress_truncate(spa_history_list_t
*shl
, unsigned int size
)
2306 spa_import_progress_t
*sip
;
2307 while (shl
->size
> size
) {
2308 sip
= list_remove_head(&shl
->procfs_list
.pl_list
);
2310 spa_strfree(sip
->pool_name
);
2311 if (sip
->spa_load_notes
)
2312 kmem_strfree(sip
->spa_load_notes
);
2313 kmem_free(sip
, sizeof (spa_import_progress_t
));
2317 IMPLY(size
== 0, list_is_empty(&shl
->procfs_list
.pl_list
));
2321 spa_import_progress_init(void)
2323 spa_import_progress_list
= kmem_zalloc(sizeof (spa_history_list_t
),
2326 spa_import_progress_list
->size
= 0;
2328 spa_import_progress_list
->procfs_list
.pl_private
=
2329 spa_import_progress_list
;
2331 procfs_list_install("zfs",
2335 &spa_import_progress_list
->procfs_list
,
2336 spa_import_progress_show
,
2337 spa_import_progress_show_header
,
2339 offsetof(spa_import_progress_t
, smh_node
));
2343 spa_import_progress_destroy(void)
2345 spa_history_list_t
*shl
= spa_import_progress_list
;
2346 procfs_list_uninstall(&shl
->procfs_list
);
2347 spa_import_progress_truncate(shl
, 0);
2348 procfs_list_destroy(&shl
->procfs_list
);
2349 kmem_free(shl
, sizeof (spa_history_list_t
));
2353 spa_import_progress_set_state(uint64_t pool_guid
,
2354 spa_load_state_t load_state
)
2356 spa_history_list_t
*shl
= spa_import_progress_list
;
2357 spa_import_progress_t
*sip
;
2363 mutex_enter(&shl
->procfs_list
.pl_lock
);
2364 for (sip
= list_tail(&shl
->procfs_list
.pl_list
); sip
!= NULL
;
2365 sip
= list_prev(&shl
->procfs_list
.pl_list
, sip
)) {
2366 if (sip
->pool_guid
== pool_guid
) {
2367 sip
->spa_load_state
= load_state
;
2368 if (sip
->spa_load_notes
!= NULL
) {
2369 kmem_strfree(sip
->spa_load_notes
);
2370 sip
->spa_load_notes
= NULL
;
2376 mutex_exit(&shl
->procfs_list
.pl_lock
);
2382 spa_import_progress_set_notes_impl(spa_t
*spa
, boolean_t log_dbgmsg
,
2383 const char *fmt
, va_list adx
)
2385 spa_history_list_t
*shl
= spa_import_progress_list
;
2386 spa_import_progress_t
*sip
;
2387 uint64_t pool_guid
= spa_guid(spa
);
2392 char *notes
= kmem_vasprintf(fmt
, adx
);
2394 mutex_enter(&shl
->procfs_list
.pl_lock
);
2395 for (sip
= list_tail(&shl
->procfs_list
.pl_list
); sip
!= NULL
;
2396 sip
= list_prev(&shl
->procfs_list
.pl_list
, sip
)) {
2397 if (sip
->pool_guid
== pool_guid
) {
2398 if (sip
->spa_load_notes
!= NULL
) {
2399 kmem_strfree(sip
->spa_load_notes
);
2400 sip
->spa_load_notes
= NULL
;
2402 sip
->spa_load_notes
= notes
;
2404 zfs_dbgmsg("'%s' %s", sip
->pool_name
, notes
);
2409 mutex_exit(&shl
->procfs_list
.pl_lock
);
2411 kmem_strfree(notes
);
2415 spa_import_progress_set_notes(spa_t
*spa
, const char *fmt
, ...)
2420 spa_import_progress_set_notes_impl(spa
, B_TRUE
, fmt
, adx
);
2425 spa_import_progress_set_notes_nolog(spa_t
*spa
, const char *fmt
, ...)
2430 spa_import_progress_set_notes_impl(spa
, B_FALSE
, fmt
, adx
);
2435 spa_import_progress_set_max_txg(uint64_t pool_guid
, uint64_t load_max_txg
)
2437 spa_history_list_t
*shl
= spa_import_progress_list
;
2438 spa_import_progress_t
*sip
;
2444 mutex_enter(&shl
->procfs_list
.pl_lock
);
2445 for (sip
= list_tail(&shl
->procfs_list
.pl_list
); sip
!= NULL
;
2446 sip
= list_prev(&shl
->procfs_list
.pl_list
, sip
)) {
2447 if (sip
->pool_guid
== pool_guid
) {
2448 sip
->spa_load_max_txg
= load_max_txg
;
2453 mutex_exit(&shl
->procfs_list
.pl_lock
);
2459 spa_import_progress_set_mmp_check(uint64_t pool_guid
,
2460 uint64_t mmp_sec_remaining
)
2462 spa_history_list_t
*shl
= spa_import_progress_list
;
2463 spa_import_progress_t
*sip
;
2469 mutex_enter(&shl
->procfs_list
.pl_lock
);
2470 for (sip
= list_tail(&shl
->procfs_list
.pl_list
); sip
!= NULL
;
2471 sip
= list_prev(&shl
->procfs_list
.pl_list
, sip
)) {
2472 if (sip
->pool_guid
== pool_guid
) {
2473 sip
->mmp_sec_remaining
= mmp_sec_remaining
;
2478 mutex_exit(&shl
->procfs_list
.pl_lock
);
2484 * A new import is in progress, add an entry.
2487 spa_import_progress_add(spa_t
*spa
)
2489 spa_history_list_t
*shl
= spa_import_progress_list
;
2490 spa_import_progress_t
*sip
;
2491 const char *poolname
= NULL
;
2493 sip
= kmem_zalloc(sizeof (spa_import_progress_t
), KM_SLEEP
);
2494 sip
->pool_guid
= spa_guid(spa
);
2496 (void) nvlist_lookup_string(spa
->spa_config
, ZPOOL_CONFIG_POOL_NAME
,
2498 if (poolname
== NULL
)
2499 poolname
= spa_name(spa
);
2500 sip
->pool_name
= spa_strdup(poolname
);
2501 sip
->spa_load_state
= spa_load_state(spa
);
2502 sip
->spa_load_notes
= NULL
;
2504 mutex_enter(&shl
->procfs_list
.pl_lock
);
2505 procfs_list_add(&shl
->procfs_list
, sip
);
2507 mutex_exit(&shl
->procfs_list
.pl_lock
);
2511 spa_import_progress_remove(uint64_t pool_guid
)
2513 spa_history_list_t
*shl
= spa_import_progress_list
;
2514 spa_import_progress_t
*sip
;
2516 mutex_enter(&shl
->procfs_list
.pl_lock
);
2517 for (sip
= list_tail(&shl
->procfs_list
.pl_list
); sip
!= NULL
;
2518 sip
= list_prev(&shl
->procfs_list
.pl_list
, sip
)) {
2519 if (sip
->pool_guid
== pool_guid
) {
2521 spa_strfree(sip
->pool_name
);
2522 if (sip
->spa_load_notes
)
2523 spa_strfree(sip
->spa_load_notes
);
2524 list_remove(&shl
->procfs_list
.pl_list
, sip
);
2526 kmem_free(sip
, sizeof (spa_import_progress_t
));
2530 mutex_exit(&shl
->procfs_list
.pl_lock
);
2534 * ==========================================================================
2535 * Initialization and Termination
2536 * ==========================================================================
2540 spa_name_compare(const void *a1
, const void *a2
)
2542 const spa_t
*s1
= a1
;
2543 const spa_t
*s2
= a2
;
2546 s
= strcmp(s1
->spa_name
, s2
->spa_name
);
2548 return (TREE_ISIGN(s
));
2558 spa_init(spa_mode_t mode
)
2560 mutex_init(&spa_namespace_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2561 mutex_init(&spa_spare_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2562 mutex_init(&spa_l2cache_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2563 cv_init(&spa_namespace_cv
, NULL
, CV_DEFAULT
, NULL
);
2565 avl_create(&spa_namespace_avl
, spa_name_compare
, sizeof (spa_t
),
2566 offsetof(spa_t
, spa_avl
));
2568 avl_create(&spa_spare_avl
, spa_spare_compare
, sizeof (spa_aux_t
),
2569 offsetof(spa_aux_t
, aux_avl
));
2571 avl_create(&spa_l2cache_avl
, spa_l2cache_compare
, sizeof (spa_aux_t
),
2572 offsetof(spa_aux_t
, aux_avl
));
2574 spa_mode_global
= mode
;
2577 if (spa_mode_global
!= SPA_MODE_READ
&& dprintf_find_string("watch")) {
2578 struct sigaction sa
;
2580 sa
.sa_flags
= SA_SIGINFO
;
2581 sigemptyset(&sa
.sa_mask
);
2582 sa
.sa_sigaction
= arc_buf_sigsegv
;
2584 if (sigaction(SIGSEGV
, &sa
, NULL
) == -1) {
2585 perror("could not enable watchpoints: "
2586 "sigaction(SIGSEGV, ...) = ");
2594 zfs_refcount_init();
2597 metaslab_stat_init();
2603 vdev_mirror_stat_init();
2604 vdev_raidz_math_init();
2609 zpool_feature_init();
2615 spa_import_progress_init();
2627 vdev_mirror_stat_fini();
2628 vdev_raidz_math_fini();
2635 metaslab_stat_fini();
2638 zfs_refcount_fini();
2642 spa_import_progress_destroy();
2645 avl_destroy(&spa_namespace_avl
);
2646 avl_destroy(&spa_spare_avl
);
2647 avl_destroy(&spa_l2cache_avl
);
2649 cv_destroy(&spa_namespace_cv
);
2650 mutex_destroy(&spa_namespace_lock
);
2651 mutex_destroy(&spa_spare_lock
);
2652 mutex_destroy(&spa_l2cache_lock
);
2656 * Return whether this pool has a dedicated slog device. No locking needed.
2657 * It's not a problem if the wrong answer is returned as it's only for
2658 * performance and not correctness.
2661 spa_has_slogs(spa_t
*spa
)
2663 return (spa
->spa_log_class
->mc_groups
!= 0);
2667 spa_get_log_state(spa_t
*spa
)
2669 return (spa
->spa_log_state
);
2673 spa_set_log_state(spa_t
*spa
, spa_log_state_t state
)
2675 spa
->spa_log_state
= state
;
2679 spa_is_root(spa_t
*spa
)
2681 return (spa
->spa_is_root
);
2685 spa_writeable(spa_t
*spa
)
2687 return (!!(spa
->spa_mode
& SPA_MODE_WRITE
) && spa
->spa_trust_config
);
2691 * Returns true if there is a pending sync task in any of the current
2692 * syncing txg, the current quiescing txg, or the current open txg.
2695 spa_has_pending_synctask(spa_t
*spa
)
2697 return (!txg_all_lists_empty(&spa
->spa_dsl_pool
->dp_sync_tasks
) ||
2698 !txg_all_lists_empty(&spa
->spa_dsl_pool
->dp_early_sync_tasks
));
2702 spa_mode(spa_t
*spa
)
2704 return (spa
->spa_mode
);
2708 spa_get_last_scrubbed_txg(spa_t
*spa
)
2710 return (spa
->spa_scrubbed_last_txg
);
2714 spa_bootfs(spa_t
*spa
)
2716 return (spa
->spa_bootfs
);
2720 spa_delegation(spa_t
*spa
)
2722 return (spa
->spa_delegation
);
2726 spa_meta_objset(spa_t
*spa
)
2728 return (spa
->spa_meta_objset
);
2732 spa_dedup_checksum(spa_t
*spa
)
2734 return (spa
->spa_dedup_checksum
);
2738 * Reset pool scan stat per scan pass (or reboot).
2741 spa_scan_stat_init(spa_t
*spa
)
2743 /* data not stored on disk */
2744 spa
->spa_scan_pass_start
= gethrestime_sec();
2745 if (dsl_scan_is_paused_scrub(spa
->spa_dsl_pool
->dp_scan
))
2746 spa
->spa_scan_pass_scrub_pause
= spa
->spa_scan_pass_start
;
2748 spa
->spa_scan_pass_scrub_pause
= 0;
2750 if (dsl_errorscrub_is_paused(spa
->spa_dsl_pool
->dp_scan
))
2751 spa
->spa_scan_pass_errorscrub_pause
= spa
->spa_scan_pass_start
;
2753 spa
->spa_scan_pass_errorscrub_pause
= 0;
2755 spa
->spa_scan_pass_scrub_spent_paused
= 0;
2756 spa
->spa_scan_pass_exam
= 0;
2757 spa
->spa_scan_pass_issued
= 0;
2759 // error scrub stats
2760 spa
->spa_scan_pass_errorscrub_spent_paused
= 0;
2764 * Get scan stats for zpool status reports
2767 spa_scan_get_stats(spa_t
*spa
, pool_scan_stat_t
*ps
)
2769 dsl_scan_t
*scn
= spa
->spa_dsl_pool
? spa
->spa_dsl_pool
->dp_scan
: NULL
;
2771 if (scn
== NULL
|| (scn
->scn_phys
.scn_func
== POOL_SCAN_NONE
&&
2772 scn
->errorscrub_phys
.dep_func
== POOL_SCAN_NONE
))
2773 return (SET_ERROR(ENOENT
));
2775 memset(ps
, 0, sizeof (pool_scan_stat_t
));
2777 /* data stored on disk */
2778 ps
->pss_func
= scn
->scn_phys
.scn_func
;
2779 ps
->pss_state
= scn
->scn_phys
.scn_state
;
2780 ps
->pss_start_time
= scn
->scn_phys
.scn_start_time
;
2781 ps
->pss_end_time
= scn
->scn_phys
.scn_end_time
;
2782 ps
->pss_to_examine
= scn
->scn_phys
.scn_to_examine
;
2783 ps
->pss_examined
= scn
->scn_phys
.scn_examined
;
2784 ps
->pss_skipped
= scn
->scn_phys
.scn_skipped
;
2785 ps
->pss_processed
= scn
->scn_phys
.scn_processed
;
2786 ps
->pss_errors
= scn
->scn_phys
.scn_errors
;
2788 /* data not stored on disk */
2789 ps
->pss_pass_exam
= spa
->spa_scan_pass_exam
;
2790 ps
->pss_pass_start
= spa
->spa_scan_pass_start
;
2791 ps
->pss_pass_scrub_pause
= spa
->spa_scan_pass_scrub_pause
;
2792 ps
->pss_pass_scrub_spent_paused
= spa
->spa_scan_pass_scrub_spent_paused
;
2793 ps
->pss_pass_issued
= spa
->spa_scan_pass_issued
;
2795 scn
->scn_issued_before_pass
+ spa
->spa_scan_pass_issued
;
2797 /* error scrub data stored on disk */
2798 ps
->pss_error_scrub_func
= scn
->errorscrub_phys
.dep_func
;
2799 ps
->pss_error_scrub_state
= scn
->errorscrub_phys
.dep_state
;
2800 ps
->pss_error_scrub_start
= scn
->errorscrub_phys
.dep_start_time
;
2801 ps
->pss_error_scrub_end
= scn
->errorscrub_phys
.dep_end_time
;
2802 ps
->pss_error_scrub_examined
= scn
->errorscrub_phys
.dep_examined
;
2803 ps
->pss_error_scrub_to_be_examined
=
2804 scn
->errorscrub_phys
.dep_to_examine
;
2806 /* error scrub data not stored on disk */
2807 ps
->pss_pass_error_scrub_pause
= spa
->spa_scan_pass_errorscrub_pause
;
2813 spa_maxblocksize(spa_t
*spa
)
2815 if (spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_BLOCKS
))
2816 return (SPA_MAXBLOCKSIZE
);
2818 return (SPA_OLD_MAXBLOCKSIZE
);
2823 * Returns the txg that the last device removal completed. No indirect mappings
2824 * have been added since this txg.
2827 spa_get_last_removal_txg(spa_t
*spa
)
2830 uint64_t ret
= -1ULL;
2832 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2834 * sr_prev_indirect_vdev is only modified while holding all the
2835 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2838 vdevid
= spa
->spa_removing_phys
.sr_prev_indirect_vdev
;
2840 while (vdevid
!= -1ULL) {
2841 vdev_t
*vd
= vdev_lookup_top(spa
, vdevid
);
2842 vdev_indirect_births_t
*vib
= vd
->vdev_indirect_births
;
2844 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
2847 * If the removal did not remap any data, we don't care.
2849 if (vdev_indirect_births_count(vib
) != 0) {
2850 ret
= vdev_indirect_births_last_entry_txg(vib
);
2854 vdevid
= vd
->vdev_indirect_config
.vic_prev_indirect_vdev
;
2856 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2859 spa_feature_is_active(spa
, SPA_FEATURE_DEVICE_REMOVAL
));
2865 spa_maxdnodesize(spa_t
*spa
)
2867 if (spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_DNODE
))
2868 return (DNODE_MAX_SIZE
);
2870 return (DNODE_MIN_SIZE
);
2874 spa_multihost(spa_t
*spa
)
2876 return (spa
->spa_multihost
? B_TRUE
: B_FALSE
);
2880 spa_get_hostid(spa_t
*spa
)
2882 return (spa
->spa_hostid
);
2886 spa_trust_config(spa_t
*spa
)
2888 return (spa
->spa_trust_config
);
2892 spa_missing_tvds_allowed(spa_t
*spa
)
2894 return (spa
->spa_missing_tvds_allowed
);
2898 spa_syncing_log_sm(spa_t
*spa
)
2900 return (spa
->spa_syncing_log_sm
);
2904 spa_set_missing_tvds(spa_t
*spa
, uint64_t missing
)
2906 spa
->spa_missing_tvds
= missing
;
2910 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
2913 spa_state_to_name(spa_t
*spa
)
2915 ASSERT3P(spa
, !=, NULL
);
2918 * it is possible for the spa to exist, without root vdev
2919 * as the spa transitions during import/export
2921 vdev_t
*rvd
= spa
->spa_root_vdev
;
2923 return ("TRANSITIONING");
2925 vdev_state_t state
= rvd
->vdev_state
;
2926 vdev_aux_t aux
= rvd
->vdev_stat
.vs_aux
;
2928 if (spa_suspended(spa
))
2929 return ("SUSPENDED");
2932 case VDEV_STATE_CLOSED
:
2933 case VDEV_STATE_OFFLINE
:
2935 case VDEV_STATE_REMOVED
:
2937 case VDEV_STATE_CANT_OPEN
:
2938 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
2940 else if (aux
== VDEV_AUX_SPLIT_POOL
)
2944 case VDEV_STATE_FAULTED
:
2946 case VDEV_STATE_DEGRADED
:
2947 return ("DEGRADED");
2948 case VDEV_STATE_HEALTHY
:
2958 spa_top_vdevs_spacemap_addressable(spa_t
*spa
)
2960 vdev_t
*rvd
= spa
->spa_root_vdev
;
2961 for (uint64_t c
= 0; c
< rvd
->vdev_children
; c
++) {
2962 if (!vdev_is_spacemap_addressable(rvd
->vdev_child
[c
]))
2969 spa_has_checkpoint(spa_t
*spa
)
2971 return (spa
->spa_checkpoint_txg
!= 0);
2975 spa_importing_readonly_checkpoint(spa_t
*spa
)
2977 return ((spa
->spa_import_flags
& ZFS_IMPORT_CHECKPOINT
) &&
2978 spa
->spa_mode
== SPA_MODE_READ
);
2982 spa_min_claim_txg(spa_t
*spa
)
2984 uint64_t checkpoint_txg
= spa
->spa_uberblock
.ub_checkpoint_txg
;
2986 if (checkpoint_txg
!= 0)
2987 return (checkpoint_txg
+ 1);
2989 return (spa
->spa_first_txg
);
2993 * If there is a checkpoint, async destroys may consume more space from
2994 * the pool instead of freeing it. In an attempt to save the pool from
2995 * getting suspended when it is about to run out of space, we stop
2996 * processing async destroys.
2999 spa_suspend_async_destroy(spa_t
*spa
)
3001 dsl_pool_t
*dp
= spa_get_dsl(spa
);
3003 uint64_t unreserved
= dsl_pool_unreserved_space(dp
,
3004 ZFS_SPACE_CHECK_EXTRA_RESERVED
);
3005 uint64_t used
= dsl_dir_phys(dp
->dp_root_dir
)->dd_used_bytes
;
3006 uint64_t avail
= (unreserved
> used
) ? (unreserved
- used
) : 0;
3008 if (spa_has_checkpoint(spa
) && avail
== 0)
3014 #if defined(_KERNEL)
3017 param_set_deadman_failmode_common(const char *val
)
3023 return (SET_ERROR(EINVAL
));
3025 if ((p
= strchr(val
, '\n')) != NULL
)
3028 if (strcmp(val
, "wait") != 0 && strcmp(val
, "continue") != 0 &&
3029 strcmp(val
, "panic"))
3030 return (SET_ERROR(EINVAL
));
3032 if (spa_mode_global
!= SPA_MODE_UNINIT
) {
3033 mutex_enter(&spa_namespace_lock
);
3034 while ((spa
= spa_next(spa
)) != NULL
)
3035 spa_set_deadman_failmode(spa
, val
);
3036 mutex_exit(&spa_namespace_lock
);
3043 /* Namespace manipulation */
3044 EXPORT_SYMBOL(spa_lookup
);
3045 EXPORT_SYMBOL(spa_add
);
3046 EXPORT_SYMBOL(spa_remove
);
3047 EXPORT_SYMBOL(spa_next
);
3049 /* Refcount functions */
3050 EXPORT_SYMBOL(spa_open_ref
);
3051 EXPORT_SYMBOL(spa_close
);
3052 EXPORT_SYMBOL(spa_refcount_zero
);
3054 /* Pool configuration lock */
3055 EXPORT_SYMBOL(spa_config_tryenter
);
3056 EXPORT_SYMBOL(spa_config_enter
);
3057 EXPORT_SYMBOL(spa_config_exit
);
3058 EXPORT_SYMBOL(spa_config_held
);
3060 /* Pool vdev add/remove lock */
3061 EXPORT_SYMBOL(spa_vdev_enter
);
3062 EXPORT_SYMBOL(spa_vdev_exit
);
3064 /* Pool vdev state change lock */
3065 EXPORT_SYMBOL(spa_vdev_state_enter
);
3066 EXPORT_SYMBOL(spa_vdev_state_exit
);
3068 /* Accessor functions */
3069 EXPORT_SYMBOL(spa_shutting_down
);
3070 EXPORT_SYMBOL(spa_get_dsl
);
3071 EXPORT_SYMBOL(spa_get_rootblkptr
);
3072 EXPORT_SYMBOL(spa_set_rootblkptr
);
3073 EXPORT_SYMBOL(spa_altroot
);
3074 EXPORT_SYMBOL(spa_sync_pass
);
3075 EXPORT_SYMBOL(spa_name
);
3076 EXPORT_SYMBOL(spa_guid
);
3077 EXPORT_SYMBOL(spa_last_synced_txg
);
3078 EXPORT_SYMBOL(spa_first_txg
);
3079 EXPORT_SYMBOL(spa_syncing_txg
);
3080 EXPORT_SYMBOL(spa_version
);
3081 EXPORT_SYMBOL(spa_state
);
3082 EXPORT_SYMBOL(spa_load_state
);
3083 EXPORT_SYMBOL(spa_freeze_txg
);
3084 EXPORT_SYMBOL(spa_get_dspace
);
3085 EXPORT_SYMBOL(spa_update_dspace
);
3086 EXPORT_SYMBOL(spa_deflate
);
3087 EXPORT_SYMBOL(spa_normal_class
);
3088 EXPORT_SYMBOL(spa_log_class
);
3089 EXPORT_SYMBOL(spa_special_class
);
3090 EXPORT_SYMBOL(spa_preferred_class
);
3091 EXPORT_SYMBOL(spa_max_replication
);
3092 EXPORT_SYMBOL(spa_prev_software_version
);
3093 EXPORT_SYMBOL(spa_get_failmode
);
3094 EXPORT_SYMBOL(spa_suspended
);
3095 EXPORT_SYMBOL(spa_bootfs
);
3096 EXPORT_SYMBOL(spa_delegation
);
3097 EXPORT_SYMBOL(spa_meta_objset
);
3098 EXPORT_SYMBOL(spa_maxblocksize
);
3099 EXPORT_SYMBOL(spa_maxdnodesize
);
3101 /* Miscellaneous support routines */
3102 EXPORT_SYMBOL(spa_guid_exists
);
3103 EXPORT_SYMBOL(spa_strdup
);
3104 EXPORT_SYMBOL(spa_strfree
);
3105 EXPORT_SYMBOL(spa_generate_guid
);
3106 EXPORT_SYMBOL(snprintf_blkptr
);
3107 EXPORT_SYMBOL(spa_freeze
);
3108 EXPORT_SYMBOL(spa_upgrade
);
3109 EXPORT_SYMBOL(spa_evict_all
);
3110 EXPORT_SYMBOL(spa_lookup_by_guid
);
3111 EXPORT_SYMBOL(spa_has_spare
);
3112 EXPORT_SYMBOL(dva_get_dsize_sync
);
3113 EXPORT_SYMBOL(bp_get_dsize_sync
);
3114 EXPORT_SYMBOL(bp_get_dsize
);
3115 EXPORT_SYMBOL(spa_has_slogs
);
3116 EXPORT_SYMBOL(spa_is_root
);
3117 EXPORT_SYMBOL(spa_writeable
);
3118 EXPORT_SYMBOL(spa_mode
);
3119 EXPORT_SYMBOL(spa_namespace_lock
);
3120 EXPORT_SYMBOL(spa_trust_config
);
3121 EXPORT_SYMBOL(spa_missing_tvds_allowed
);
3122 EXPORT_SYMBOL(spa_set_missing_tvds
);
3123 EXPORT_SYMBOL(spa_state_to_name
);
3124 EXPORT_SYMBOL(spa_importing_readonly_checkpoint
);
3125 EXPORT_SYMBOL(spa_min_claim_txg
);
3126 EXPORT_SYMBOL(spa_suspend_async_destroy
);
3127 EXPORT_SYMBOL(spa_has_checkpoint
);
3128 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable
);
3130 ZFS_MODULE_PARAM(zfs
, zfs_
, flags
, UINT
, ZMOD_RW
,
3131 "Set additional debugging flags");
3133 ZFS_MODULE_PARAM(zfs
, zfs_
, recover
, INT
, ZMOD_RW
,
3134 "Set to attempt to recover from fatal errors");
3136 ZFS_MODULE_PARAM(zfs
, zfs_
, free_leak_on_eio
, INT
, ZMOD_RW
,
3137 "Set to ignore IO errors during free and permanently leak the space");
3139 ZFS_MODULE_PARAM(zfs_deadman
, zfs_deadman_
, checktime_ms
, U64
, ZMOD_RW
,
3140 "Dead I/O check interval in milliseconds");
3142 ZFS_MODULE_PARAM(zfs_deadman
, zfs_deadman_
, enabled
, INT
, ZMOD_RW
,
3143 "Enable deadman timer");
3145 ZFS_MODULE_PARAM(zfs_spa
, spa_
, asize_inflation
, UINT
, ZMOD_RW
,
3146 "SPA size estimate multiplication factor");
3148 ZFS_MODULE_PARAM(zfs
, zfs_
, ddt_data_is_special
, INT
, ZMOD_RW
,
3149 "Place DDT data into the special class");
3151 ZFS_MODULE_PARAM(zfs
, zfs_
, user_indirect_is_special
, INT
, ZMOD_RW
,
3152 "Place user data indirect blocks into the special class");
3155 ZFS_MODULE_PARAM_CALL(zfs_deadman
, zfs_deadman_
, failmode
,
3156 param_set_deadman_failmode
, param_get_charp
, ZMOD_RW
,
3157 "Failmode for deadman timer");
3159 ZFS_MODULE_PARAM_CALL(zfs_deadman
, zfs_deadman_
, synctime_ms
,
3160 param_set_deadman_synctime
, spl_param_get_u64
, ZMOD_RW
,
3161 "Pool sync expiration time in milliseconds");
3163 ZFS_MODULE_PARAM_CALL(zfs_deadman
, zfs_deadman_
, ziotime_ms
,
3164 param_set_deadman_ziotime
, spl_param_get_u64
, ZMOD_RW
,
3165 "IO expiration time in milliseconds");
3167 ZFS_MODULE_PARAM(zfs
, zfs_
, special_class_metadata_reserve_pct
, UINT
, ZMOD_RW
,
3168 "Small file blocks in special vdevs depends on this much "
3169 "free space available");
3172 ZFS_MODULE_PARAM_CALL(zfs_spa
, spa_
, slop_shift
, param_set_slop_shift
,
3173 param_get_uint
, ZMOD_RW
, "Reserved free space in pool");
3175 ZFS_MODULE_PARAM(zfs
, spa_
, num_allocators
, INT
, ZMOD_RW
,
3176 "Number of allocators per spa");
3178 ZFS_MODULE_PARAM(zfs
, spa_
, cpus_per_allocator
, INT
, ZMOD_RW
,
3179 "Minimum number of CPUs per allocators");