4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_dataset.h>
34 * Deadlist concurrency:
36 * Deadlists can only be modified from the syncing thread.
38 * Except for dsl_deadlist_insert(), it can only be modified with the
39 * dp_config_rwlock held with RW_WRITER.
41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
42 * be called concurrently, from open context, with the dl_config_rwlock held
45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
46 * the accessors, protecting:
47 * dl_phys->dl_used,comp,uncomp
48 * and protecting the dl_tree from being loaded.
49 * The locking is provided by dl_lock. Note that locking on the bpobj_t
50 * provides its own locking, and dl_oldfmt is immutable.
57 * Livelists use the same 'deadlist_t' struct as deadlists and are also used
58 * to track blkptrs over the lifetime of a dataset. Livelists however, belong
59 * to clones and track the blkptrs that are clone-specific (were born after
60 * the clone's creation). The exception is embedded block pointers which are
61 * not included in livelists because they do not need to be freed.
63 * When it comes time to delete the clone, the livelist provides a quick
64 * reference as to what needs to be freed. For this reason, livelists also track
65 * when clone-specific blkptrs are freed before deletion to prevent double
66 * frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the
67 * deletion algorithm iterates backwards over the livelist, matching
68 * FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists
69 * are also updated in the case when blkptrs are remapped: the old version
70 * of the blkptr is cancelled out with a FREE and the new version is tracked
73 * To bound the amount of memory required for deletion, livelists over a
74 * certain size are spread over multiple entries. Entries are grouped by
75 * birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will
76 * be in the same entry. This allows us to delete livelists incrementally
77 * over multiple syncs, one entry at a time.
79 * During the lifetime of the clone, livelists can get extremely large.
80 * Their size is managed by periodic condensing (preemptively cancelling out
81 * FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when
82 * the shared space between the clone and its origin is so small that it
83 * doesn't make sense to use livelists anymore.
87 * The threshold sublist size at which we create a new sub-livelist for the
88 * next txg. However, since blkptrs of the same transaction group must be in
89 * the same sub-list, the actual sublist size may exceed this. When picking the
90 * size we had to balance the fact that larger sublists mean fewer sublists
91 * (decreasing the cost of insertion) against the consideration that sublists
92 * will be loaded into memory and shouldn't take up an inordinate amount of
93 * space. We settled on ~500000 entries, corresponding to roughly 128M.
95 uint64_t zfs_livelist_max_entries
= 500000;
98 * We can approximate how much of a performance gain a livelist will give us
99 * based on the percentage of blocks shared between the clone and its origin.
100 * 0 percent shared means that the clone has completely diverged and that the
101 * old method is maximally effective: every read from the block tree will
102 * result in lots of frees. Livelists give us gains when they track blocks
103 * scattered across the tree, when one read in the old method might only
104 * result in a few frees. Once the clone has been overwritten enough,
105 * writes are no longer sparse and we'll no longer get much of a benefit from
106 * tracking them with a livelist. We chose a lower limit of 75 percent shared
107 * (25 percent overwritten). This means that 1/4 of all block pointers will be
108 * freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists
109 * to make deletion 4x faster. Once the amount of shared space drops below this
110 * threshold, the clone will revert to the old deletion method.
112 int zfs_livelist_min_percent_shared
= 75;
115 dsl_deadlist_compare(const void *arg1
, const void *arg2
)
117 const dsl_deadlist_entry_t
*dle1
= arg1
;
118 const dsl_deadlist_entry_t
*dle2
= arg2
;
120 return (TREE_CMP(dle1
->dle_mintxg
, dle2
->dle_mintxg
));
124 dsl_deadlist_cache_compare(const void *arg1
, const void *arg2
)
126 const dsl_deadlist_cache_entry_t
*dlce1
= arg1
;
127 const dsl_deadlist_cache_entry_t
*dlce2
= arg2
;
129 return (TREE_CMP(dlce1
->dlce_mintxg
, dlce2
->dlce_mintxg
));
133 dsl_deadlist_load_tree(dsl_deadlist_t
*dl
)
139 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
141 ASSERT(!dl
->dl_oldfmt
);
142 if (dl
->dl_havecache
) {
144 * After loading the tree, the caller may modify the tree,
145 * e.g. to add or remove nodes, or to make a node no longer
146 * refer to the empty_bpobj. These changes would make the
147 * dl_cache incorrect. Therefore we discard the cache here,
148 * so that it can't become incorrect.
150 dsl_deadlist_cache_entry_t
*dlce
;
152 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
154 kmem_free(dlce
, sizeof (*dlce
));
156 avl_destroy(&dl
->dl_cache
);
157 dl
->dl_havecache
= B_FALSE
;
162 za
= zap_attribute_alloc();
163 avl_create(&dl
->dl_tree
, dsl_deadlist_compare
,
164 sizeof (dsl_deadlist_entry_t
),
165 offsetof(dsl_deadlist_entry_t
, dle_node
));
166 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
167 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
168 zap_cursor_advance(&zc
)) {
169 dsl_deadlist_entry_t
*dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
170 dle
->dle_mintxg
= zfs_strtonum(za
->za_name
, NULL
);
173 * Prefetch all the bpobj's so that we do that i/o
174 * in parallel. Then open them all in a second pass.
176 dle
->dle_bpobj
.bpo_object
= za
->za_first_integer
;
177 dmu_prefetch_dnode(dl
->dl_os
, dle
->dle_bpobj
.bpo_object
,
178 ZIO_PRIORITY_SYNC_READ
);
180 avl_add(&dl
->dl_tree
, dle
);
182 VERIFY3U(error
, ==, ENOENT
);
183 zap_cursor_fini(&zc
);
184 zap_attribute_free(za
);
186 for (dsl_deadlist_entry_t
*dle
= avl_first(&dl
->dl_tree
);
187 dle
!= NULL
; dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
188 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
,
189 dle
->dle_bpobj
.bpo_object
));
191 dl
->dl_havetree
= B_TRUE
;
195 * Load only the non-empty bpobj's into the dl_cache. The cache is an analog
196 * of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It
197 * is used only for gathering space statistics. The dl_cache has two
198 * advantages over the dl_tree:
200 * 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's
201 * mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj
202 * many times and to inquire about its (zero) space stats many times.
204 * 2. The dl_cache uses less memory than the dl_tree. We only need to load
205 * the dl_tree of snapshots when deleting a snapshot, after which we free the
206 * dl_tree with dsl_deadlist_discard_tree
209 dsl_deadlist_load_cache(dsl_deadlist_t
*dl
)
215 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
217 ASSERT(!dl
->dl_oldfmt
);
218 if (dl
->dl_havecache
)
221 uint64_t empty_bpobj
= dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
;
223 avl_create(&dl
->dl_cache
, dsl_deadlist_cache_compare
,
224 sizeof (dsl_deadlist_cache_entry_t
),
225 offsetof(dsl_deadlist_cache_entry_t
, dlce_node
));
226 za
= zap_attribute_alloc();
227 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
228 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
229 zap_cursor_advance(&zc
)) {
230 if (za
->za_first_integer
== empty_bpobj
)
232 dsl_deadlist_cache_entry_t
*dlce
=
233 kmem_zalloc(sizeof (*dlce
), KM_SLEEP
);
234 dlce
->dlce_mintxg
= zfs_strtonum(za
->za_name
, NULL
);
237 * Prefetch all the bpobj's so that we do that i/o
238 * in parallel. Then open them all in a second pass.
240 dlce
->dlce_bpobj
= za
->za_first_integer
;
241 dmu_prefetch_dnode(dl
->dl_os
, dlce
->dlce_bpobj
,
242 ZIO_PRIORITY_SYNC_READ
);
243 avl_add(&dl
->dl_cache
, dlce
);
245 VERIFY3U(error
, ==, ENOENT
);
246 zap_cursor_fini(&zc
);
247 zap_attribute_free(za
);
249 for (dsl_deadlist_cache_entry_t
*dlce
= avl_first(&dl
->dl_cache
);
250 dlce
!= NULL
; dlce
= AVL_NEXT(&dl
->dl_cache
, dlce
)) {
252 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, dlce
->dlce_bpobj
));
254 VERIFY0(bpobj_space(&bpo
,
255 &dlce
->dlce_bytes
, &dlce
->dlce_comp
, &dlce
->dlce_uncomp
));
258 dl
->dl_havecache
= B_TRUE
;
262 * Discard the tree to save memory.
265 dsl_deadlist_discard_tree(dsl_deadlist_t
*dl
)
267 mutex_enter(&dl
->dl_lock
);
269 if (!dl
->dl_havetree
) {
270 mutex_exit(&dl
->dl_lock
);
273 dsl_deadlist_entry_t
*dle
;
275 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
)) != NULL
) {
276 bpobj_close(&dle
->dle_bpobj
);
277 kmem_free(dle
, sizeof (*dle
));
279 avl_destroy(&dl
->dl_tree
);
281 dl
->dl_havetree
= B_FALSE
;
282 mutex_exit(&dl
->dl_lock
);
286 dsl_deadlist_iterate(dsl_deadlist_t
*dl
, deadlist_iter_t func
, void *args
)
288 dsl_deadlist_entry_t
*dle
;
290 ASSERT(dsl_deadlist_is_open(dl
));
292 mutex_enter(&dl
->dl_lock
);
293 dsl_deadlist_load_tree(dl
);
294 mutex_exit(&dl
->dl_lock
);
295 for (dle
= avl_first(&dl
->dl_tree
); dle
!= NULL
;
296 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
297 if (func(args
, dle
) != 0)
303 dsl_deadlist_open(dsl_deadlist_t
*dl
, objset_t
*os
, uint64_t object
)
305 dmu_object_info_t doi
;
308 ASSERT(!dsl_deadlist_is_open(dl
));
310 mutex_init(&dl
->dl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
312 dl
->dl_object
= object
;
313 err
= dmu_bonus_hold(os
, object
, dl
, &dl
->dl_dbuf
);
316 dmu_object_info_from_db(dl
->dl_dbuf
, &doi
);
317 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
318 dmu_buf_rele(dl
->dl_dbuf
, dl
);
320 dl
->dl_oldfmt
= B_TRUE
;
321 return (bpobj_open(&dl
->dl_bpobj
, os
, object
));
324 dl
->dl_oldfmt
= B_FALSE
;
325 dl
->dl_phys
= dl
->dl_dbuf
->db_data
;
326 dl
->dl_havetree
= B_FALSE
;
327 dl
->dl_havecache
= B_FALSE
;
332 dsl_deadlist_is_open(dsl_deadlist_t
*dl
)
334 return (dl
->dl_os
!= NULL
);
338 dsl_deadlist_close(dsl_deadlist_t
*dl
)
340 ASSERT(dsl_deadlist_is_open(dl
));
341 mutex_destroy(&dl
->dl_lock
);
344 dl
->dl_oldfmt
= B_FALSE
;
345 bpobj_close(&dl
->dl_bpobj
);
351 if (dl
->dl_havetree
) {
352 dsl_deadlist_entry_t
*dle
;
354 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
))
356 bpobj_close(&dle
->dle_bpobj
);
357 kmem_free(dle
, sizeof (*dle
));
359 avl_destroy(&dl
->dl_tree
);
361 if (dl
->dl_havecache
) {
362 dsl_deadlist_cache_entry_t
*dlce
;
364 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
366 kmem_free(dlce
, sizeof (*dlce
));
368 avl_destroy(&dl
->dl_cache
);
370 dmu_buf_rele(dl
->dl_dbuf
, dl
);
378 dsl_deadlist_alloc(objset_t
*os
, dmu_tx_t
*tx
)
380 if (spa_version(dmu_objset_spa(os
)) < SPA_VERSION_DEADLISTS
)
381 return (bpobj_alloc(os
, SPA_OLD_MAXBLOCKSIZE
, tx
));
382 return (zap_create(os
, DMU_OT_DEADLIST
, DMU_OT_DEADLIST_HDR
,
383 sizeof (dsl_deadlist_phys_t
), tx
));
387 dsl_deadlist_free(objset_t
*os
, uint64_t dlobj
, dmu_tx_t
*tx
)
389 dmu_object_info_t doi
;
394 VERIFY0(dmu_object_info(os
, dlobj
, &doi
));
395 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
396 bpobj_free(os
, dlobj
, tx
);
400 za
= zap_attribute_alloc();
401 for (zap_cursor_init(&zc
, os
, dlobj
);
402 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
403 zap_cursor_advance(&zc
)) {
404 uint64_t obj
= za
->za_first_integer
;
405 if (obj
== dmu_objset_pool(os
)->dp_empty_bpobj
)
406 bpobj_decr_empty(os
, tx
);
408 bpobj_free(os
, obj
, tx
);
410 VERIFY3U(error
, ==, ENOENT
);
411 zap_cursor_fini(&zc
);
412 zap_attribute_free(za
);
413 VERIFY0(dmu_object_free(os
, dlobj
, tx
));
417 dle_enqueue(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
418 const blkptr_t
*bp
, boolean_t bp_freed
, dmu_tx_t
*tx
)
420 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
421 if (dle
->dle_bpobj
.bpo_object
==
422 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
423 uint64_t obj
= bpobj_alloc(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
424 bpobj_close(&dle
->dle_bpobj
);
425 bpobj_decr_empty(dl
->dl_os
, tx
);
426 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
427 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
428 dle
->dle_mintxg
, obj
, tx
));
430 bpobj_enqueue(&dle
->dle_bpobj
, bp
, bp_freed
, tx
);
434 dle_enqueue_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
435 uint64_t obj
, dmu_tx_t
*tx
)
437 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
438 if (dle
->dle_bpobj
.bpo_object
!=
439 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
440 bpobj_enqueue_subobj(&dle
->dle_bpobj
, obj
, tx
);
442 bpobj_close(&dle
->dle_bpobj
);
443 bpobj_decr_empty(dl
->dl_os
, tx
);
444 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
445 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
446 dle
->dle_mintxg
, obj
, tx
));
451 * Prefetch metadata required for dle_enqueue_subobj().
454 dle_prefetch_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
457 if (dle
->dle_bpobj
.bpo_object
!=
458 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
)
459 bpobj_prefetch_subobj(&dle
->dle_bpobj
, obj
);
463 dsl_deadlist_insert(dsl_deadlist_t
*dl
, const blkptr_t
*bp
, boolean_t bp_freed
,
466 dsl_deadlist_entry_t dle_tofind
;
467 dsl_deadlist_entry_t
*dle
;
471 bpobj_enqueue(&dl
->dl_bpobj
, bp
, bp_freed
, tx
);
475 mutex_enter(&dl
->dl_lock
);
476 dsl_deadlist_load_tree(dl
);
478 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
480 int sign
= bp_freed
? -1 : +1;
481 dl
->dl_phys
->dl_used
+=
482 sign
* bp_get_dsize_sync(dmu_objset_spa(dl
->dl_os
), bp
);
483 dl
->dl_phys
->dl_comp
+= sign
* BP_GET_PSIZE(bp
);
484 dl
->dl_phys
->dl_uncomp
+= sign
* BP_GET_UCSIZE(bp
);
486 dle_tofind
.dle_mintxg
= BP_GET_LOGICAL_BIRTH(bp
);
487 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
489 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
491 dle
= AVL_PREV(&dl
->dl_tree
, dle
);
494 zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
495 bp
, (longlong_t
)BP_GET_LOGICAL_BIRTH(bp
));
496 dle
= avl_first(&dl
->dl_tree
);
499 ASSERT3P(dle
, !=, NULL
);
500 dle_enqueue(dl
, dle
, bp
, bp_freed
, tx
);
501 mutex_exit(&dl
->dl_lock
);
505 dsl_deadlist_insert_alloc_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
507 dsl_deadlist_t
*dl
= arg
;
508 dsl_deadlist_insert(dl
, bp
, B_FALSE
, tx
);
513 dsl_deadlist_insert_free_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
515 dsl_deadlist_t
*dl
= arg
;
516 dsl_deadlist_insert(dl
, bp
, B_TRUE
, tx
);
521 * Insert new key in deadlist, which must be > all current entries.
522 * mintxg is not inclusive.
525 dsl_deadlist_add_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
528 dsl_deadlist_entry_t
*dle
;
533 dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
534 dle
->dle_mintxg
= mintxg
;
536 mutex_enter(&dl
->dl_lock
);
537 dsl_deadlist_load_tree(dl
);
539 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
540 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
541 avl_add(&dl
->dl_tree
, dle
);
543 VERIFY0(zap_add_int_key(dl
->dl_os
, dl
->dl_object
,
545 mutex_exit(&dl
->dl_lock
);
549 * Remove this key, merging its entries into the previous key.
552 dsl_deadlist_remove_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
554 dsl_deadlist_entry_t dle_tofind
;
555 dsl_deadlist_entry_t
*dle
, *dle_prev
;
559 mutex_enter(&dl
->dl_lock
);
560 dsl_deadlist_load_tree(dl
);
562 dle_tofind
.dle_mintxg
= mintxg
;
563 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
564 ASSERT3P(dle
, !=, NULL
);
565 dle_prev
= AVL_PREV(&dl
->dl_tree
, dle
);
566 ASSERT3P(dle_prev
, !=, NULL
);
568 dle_enqueue_subobj(dl
, dle_prev
, dle
->dle_bpobj
.bpo_object
, tx
);
570 avl_remove(&dl
->dl_tree
, dle
);
571 bpobj_close(&dle
->dle_bpobj
);
572 kmem_free(dle
, sizeof (*dle
));
574 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
, mintxg
, tx
));
575 mutex_exit(&dl
->dl_lock
);
579 * Remove a deadlist entry and all of its contents by removing the entry from
580 * the deadlist's avl tree, freeing the entry's bpobj and adjusting the
581 * deadlist's space accounting accordingly.
584 dsl_deadlist_remove_entry(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
586 uint64_t used
, comp
, uncomp
;
587 dsl_deadlist_entry_t dle_tofind
;
588 dsl_deadlist_entry_t
*dle
;
589 objset_t
*os
= dl
->dl_os
;
594 mutex_enter(&dl
->dl_lock
);
595 dsl_deadlist_load_tree(dl
);
597 dle_tofind
.dle_mintxg
= mintxg
;
598 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
599 VERIFY3P(dle
, !=, NULL
);
601 avl_remove(&dl
->dl_tree
, dle
);
602 VERIFY0(zap_remove_int(os
, dl
->dl_object
, mintxg
, tx
));
603 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
604 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
605 dl
->dl_phys
->dl_used
-= used
;
606 dl
->dl_phys
->dl_comp
-= comp
;
607 dl
->dl_phys
->dl_uncomp
-= uncomp
;
608 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
) {
609 bpobj_decr_empty(os
, tx
);
611 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
613 bpobj_close(&dle
->dle_bpobj
);
614 kmem_free(dle
, sizeof (*dle
));
615 mutex_exit(&dl
->dl_lock
);
619 * Clear out the contents of a deadlist_entry by freeing its bpobj,
620 * replacing it with an empty bpobj and adjusting the deadlist's
624 dsl_deadlist_clear_entry(dsl_deadlist_entry_t
*dle
, dsl_deadlist_t
*dl
,
627 uint64_t new_obj
, used
, comp
, uncomp
;
628 objset_t
*os
= dl
->dl_os
;
630 mutex_enter(&dl
->dl_lock
);
631 VERIFY0(zap_remove_int(os
, dl
->dl_object
, dle
->dle_mintxg
, tx
));
632 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
633 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
634 dl
->dl_phys
->dl_used
-= used
;
635 dl
->dl_phys
->dl_comp
-= comp
;
636 dl
->dl_phys
->dl_uncomp
-= uncomp
;
637 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
)
638 bpobj_decr_empty(os
, tx
);
640 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
641 bpobj_close(&dle
->dle_bpobj
);
642 new_obj
= bpobj_alloc_empty(os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
643 VERIFY0(bpobj_open(&dle
->dle_bpobj
, os
, new_obj
));
644 VERIFY0(zap_add_int_key(os
, dl
->dl_object
, dle
->dle_mintxg
,
646 ASSERT(bpobj_is_empty(&dle
->dle_bpobj
));
647 mutex_exit(&dl
->dl_lock
);
651 * Return the first entry in deadlist's avl tree
653 dsl_deadlist_entry_t
*
654 dsl_deadlist_first(dsl_deadlist_t
*dl
)
656 dsl_deadlist_entry_t
*dle
;
658 mutex_enter(&dl
->dl_lock
);
659 dsl_deadlist_load_tree(dl
);
660 dle
= avl_first(&dl
->dl_tree
);
661 mutex_exit(&dl
->dl_lock
);
667 * Return the last entry in deadlist's avl tree
669 dsl_deadlist_entry_t
*
670 dsl_deadlist_last(dsl_deadlist_t
*dl
)
672 dsl_deadlist_entry_t
*dle
;
674 mutex_enter(&dl
->dl_lock
);
675 dsl_deadlist_load_tree(dl
);
676 dle
= avl_last(&dl
->dl_tree
);
677 mutex_exit(&dl
->dl_lock
);
683 * Walk ds's snapshots to regenerate generate ZAP & AVL.
686 dsl_deadlist_regenerate(objset_t
*os
, uint64_t dlobj
,
687 uint64_t mrs_obj
, dmu_tx_t
*tx
)
689 dsl_deadlist_t dl
= { 0 };
690 dsl_pool_t
*dp
= dmu_objset_pool(os
);
692 VERIFY0(dsl_deadlist_open(&dl
, os
, dlobj
));
694 dsl_deadlist_close(&dl
);
698 while (mrs_obj
!= 0) {
700 VERIFY0(dsl_dataset_hold_obj(dp
, mrs_obj
, FTAG
, &ds
));
701 dsl_deadlist_add_key(&dl
,
702 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
703 mrs_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
704 dsl_dataset_rele(ds
, FTAG
);
706 dsl_deadlist_close(&dl
);
710 dsl_deadlist_clone(dsl_deadlist_t
*dl
, uint64_t maxtxg
,
711 uint64_t mrs_obj
, dmu_tx_t
*tx
)
713 dsl_deadlist_entry_t
*dle
;
716 newobj
= dsl_deadlist_alloc(dl
->dl_os
, tx
);
719 dsl_deadlist_regenerate(dl
->dl_os
, newobj
, mrs_obj
, tx
);
723 mutex_enter(&dl
->dl_lock
);
724 dsl_deadlist_load_tree(dl
);
726 for (dle
= avl_first(&dl
->dl_tree
); dle
;
727 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
730 if (dle
->dle_mintxg
>= maxtxg
)
733 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
734 VERIFY0(zap_add_int_key(dl
->dl_os
, newobj
,
735 dle
->dle_mintxg
, obj
, tx
));
737 mutex_exit(&dl
->dl_lock
);
742 dsl_deadlist_space(dsl_deadlist_t
*dl
,
743 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
745 ASSERT(dsl_deadlist_is_open(dl
));
747 VERIFY0(bpobj_space(&dl
->dl_bpobj
,
748 usedp
, compp
, uncompp
));
752 mutex_enter(&dl
->dl_lock
);
753 *usedp
= dl
->dl_phys
->dl_used
;
754 *compp
= dl
->dl_phys
->dl_comp
;
755 *uncompp
= dl
->dl_phys
->dl_uncomp
;
756 mutex_exit(&dl
->dl_lock
);
760 * return space used in the range (mintxg, maxtxg].
761 * Includes maxtxg, does not include mintxg.
762 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
766 dsl_deadlist_space_range(dsl_deadlist_t
*dl
, uint64_t mintxg
, uint64_t maxtxg
,
767 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
769 dsl_deadlist_cache_entry_t
*dlce
;
770 dsl_deadlist_cache_entry_t dlce_tofind
;
774 VERIFY0(bpobj_space_range(&dl
->dl_bpobj
,
775 mintxg
, maxtxg
, usedp
, compp
, uncompp
));
779 *usedp
= *compp
= *uncompp
= 0;
781 mutex_enter(&dl
->dl_lock
);
782 dsl_deadlist_load_cache(dl
);
783 dlce_tofind
.dlce_mintxg
= mintxg
;
784 dlce
= avl_find(&dl
->dl_cache
, &dlce_tofind
, &where
);
787 * If this mintxg doesn't exist, it may be an empty_bpobj which
788 * is omitted from the sparse tree. Start at the next non-empty
792 dlce
= avl_nearest(&dl
->dl_cache
, where
, AVL_AFTER
);
794 for (; dlce
&& dlce
->dlce_mintxg
< maxtxg
;
795 dlce
= AVL_NEXT(&dl
->dl_tree
, dlce
)) {
796 *usedp
+= dlce
->dlce_bytes
;
797 *compp
+= dlce
->dlce_comp
;
798 *uncompp
+= dlce
->dlce_uncomp
;
801 mutex_exit(&dl
->dl_lock
);
805 dsl_deadlist_insert_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
,
808 dsl_deadlist_entry_t dle_tofind
;
809 dsl_deadlist_entry_t
*dle
;
811 uint64_t used
, comp
, uncomp
;
814 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
816 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
817 VERIFY0(bpobj_space(&bpo
, &used
, &comp
, &uncomp
));
820 dsl_deadlist_load_tree(dl
);
822 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
823 dl
->dl_phys
->dl_used
+= used
;
824 dl
->dl_phys
->dl_comp
+= comp
;
825 dl
->dl_phys
->dl_uncomp
+= uncomp
;
827 dle_tofind
.dle_mintxg
= birth
;
828 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
830 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
831 dle_enqueue_subobj(dl
, dle
, obj
, tx
);
835 * Prefetch metadata required for dsl_deadlist_insert_bpobj().
838 dsl_deadlist_prefetch_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
)
840 dsl_deadlist_entry_t dle_tofind
;
841 dsl_deadlist_entry_t
*dle
;
844 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
846 dsl_deadlist_load_tree(dl
);
848 dle_tofind
.dle_mintxg
= birth
;
849 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
851 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
852 dle_prefetch_subobj(dl
, dle
, obj
);
856 dsl_deadlist_insert_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
859 dsl_deadlist_t
*dl
= arg
;
860 dsl_deadlist_insert(dl
, bp
, bp_freed
, tx
);
865 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
869 dsl_deadlist_merge(dsl_deadlist_t
*dl
, uint64_t obj
, dmu_tx_t
*tx
)
871 zap_cursor_t zc
, pzc
;
872 zap_attribute_t
*za
, *pza
;
874 dsl_deadlist_phys_t
*dlp
;
875 dmu_object_info_t doi
;
876 int error
, perror
, i
;
878 VERIFY0(dmu_object_info(dl
->dl_os
, obj
, &doi
));
879 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
881 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
882 VERIFY0(bpobj_iterate(&bpo
, dsl_deadlist_insert_cb
, dl
, tx
));
887 za
= zap_attribute_alloc();
888 pza
= zap_attribute_alloc();
890 mutex_enter(&dl
->dl_lock
);
892 * Prefetch up to 128 deadlists first and then more as we progress.
893 * The limit is a balance between ARC use and diminishing returns.
895 for (zap_cursor_init(&pzc
, dl
->dl_os
, obj
), i
= 0;
896 (perror
= zap_cursor_retrieve(&pzc
, pza
)) == 0 && i
< 128;
897 zap_cursor_advance(&pzc
), i
++) {
898 dsl_deadlist_prefetch_bpobj(dl
, pza
->za_first_integer
,
899 zfs_strtonum(pza
->za_name
, NULL
));
901 for (zap_cursor_init(&zc
, dl
->dl_os
, obj
);
902 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
903 zap_cursor_advance(&zc
)) {
904 dsl_deadlist_insert_bpobj(dl
, za
->za_first_integer
,
905 zfs_strtonum(za
->za_name
, NULL
), tx
);
906 VERIFY0(zap_remove(dl
->dl_os
, obj
, za
->za_name
, tx
));
908 dsl_deadlist_prefetch_bpobj(dl
, pza
->za_first_integer
,
909 zfs_strtonum(pza
->za_name
, NULL
));
910 zap_cursor_advance(&pzc
);
911 perror
= zap_cursor_retrieve(&pzc
, pza
);
914 VERIFY3U(error
, ==, ENOENT
);
915 zap_cursor_fini(&zc
);
916 zap_cursor_fini(&pzc
);
918 VERIFY0(dmu_bonus_hold(dl
->dl_os
, obj
, FTAG
, &bonus
));
919 dlp
= bonus
->db_data
;
920 dmu_buf_will_dirty(bonus
, tx
);
921 memset(dlp
, 0, sizeof (*dlp
));
922 dmu_buf_rele(bonus
, FTAG
);
923 mutex_exit(&dl
->dl_lock
);
925 zap_attribute_free(za
);
926 zap_attribute_free(pza
);
930 * Remove entries on dl that are born > mintxg, and put them on the bpobj.
933 dsl_deadlist_move_bpobj(dsl_deadlist_t
*dl
, bpobj_t
*bpo
, uint64_t mintxg
,
936 dsl_deadlist_entry_t dle_tofind
;
937 dsl_deadlist_entry_t
*dle
, *pdle
;
941 ASSERT(!dl
->dl_oldfmt
);
943 mutex_enter(&dl
->dl_lock
);
944 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
945 dsl_deadlist_load_tree(dl
);
947 dle_tofind
.dle_mintxg
= mintxg
;
948 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
950 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_AFTER
);
952 * Prefetch up to 128 deadlists first and then more as we progress.
953 * The limit is a balance between ARC use and diminishing returns.
955 for (pdle
= dle
, i
= 0; pdle
&& i
< 128; i
++) {
956 bpobj_prefetch_subobj(bpo
, pdle
->dle_bpobj
.bpo_object
);
957 pdle
= AVL_NEXT(&dl
->dl_tree
, pdle
);
960 uint64_t used
, comp
, uncomp
;
961 dsl_deadlist_entry_t
*dle_next
;
963 bpobj_enqueue_subobj(bpo
, dle
->dle_bpobj
.bpo_object
, tx
);
965 bpobj_prefetch_subobj(bpo
, pdle
->dle_bpobj
.bpo_object
);
966 pdle
= AVL_NEXT(&dl
->dl_tree
, pdle
);
969 VERIFY0(bpobj_space(&dle
->dle_bpobj
,
970 &used
, &comp
, &uncomp
));
971 ASSERT3U(dl
->dl_phys
->dl_used
, >=, used
);
972 ASSERT3U(dl
->dl_phys
->dl_comp
, >=, comp
);
973 ASSERT3U(dl
->dl_phys
->dl_uncomp
, >=, uncomp
);
974 dl
->dl_phys
->dl_used
-= used
;
975 dl
->dl_phys
->dl_comp
-= comp
;
976 dl
->dl_phys
->dl_uncomp
-= uncomp
;
978 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
,
979 dle
->dle_mintxg
, tx
));
981 dle_next
= AVL_NEXT(&dl
->dl_tree
, dle
);
982 avl_remove(&dl
->dl_tree
, dle
);
983 bpobj_close(&dle
->dle_bpobj
);
984 kmem_free(dle
, sizeof (*dle
));
987 mutex_exit(&dl
->dl_lock
);
990 typedef struct livelist_entry
{
997 livelist_compare(const void *larg
, const void *rarg
)
999 const blkptr_t
*l
= &((livelist_entry_t
*)larg
)->le_bp
;
1000 const blkptr_t
*r
= &((livelist_entry_t
*)rarg
)->le_bp
;
1002 /* Sort them according to dva[0] */
1003 uint64_t l_dva0_vdev
= DVA_GET_VDEV(&l
->blk_dva
[0]);
1004 uint64_t r_dva0_vdev
= DVA_GET_VDEV(&r
->blk_dva
[0]);
1006 if (l_dva0_vdev
!= r_dva0_vdev
)
1007 return (TREE_CMP(l_dva0_vdev
, r_dva0_vdev
));
1009 /* if vdevs are equal, sort by offsets. */
1010 uint64_t l_dva0_offset
= DVA_GET_OFFSET(&l
->blk_dva
[0]);
1011 uint64_t r_dva0_offset
= DVA_GET_OFFSET(&r
->blk_dva
[0]);
1012 return (TREE_CMP(l_dva0_offset
, r_dva0_offset
));
1015 struct livelist_iter_arg
{
1022 * Expects an AVL tree which is incrementally filled will FREE blkptrs
1023 * and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a
1024 * corresponding FREE are stored in the supplied bplist.
1026 * Note that multiple FREE and ALLOC entries for the same blkptr may be
1027 * encountered when dedup or block cloning is involved. For this reason we
1028 * keep a refcount for all the FREE entries of each blkptr and ensure that
1029 * each of those FREE entries has a corresponding ALLOC preceding it.
1032 dsl_livelist_iterate(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
1035 struct livelist_iter_arg
*lia
= arg
;
1036 avl_tree_t
*avl
= lia
->avl
;
1037 bplist_t
*to_free
= lia
->to_free
;
1041 if ((t
!= NULL
) && (zthr_has_waiters(t
) || zthr_iscancelled(t
)))
1042 return (SET_ERROR(EINTR
));
1044 livelist_entry_t node
;
1046 livelist_entry_t
*found
= avl_find(avl
, &node
, NULL
);
1048 ASSERT3U(BP_GET_PSIZE(bp
), ==, BP_GET_PSIZE(&found
->le_bp
));
1049 ASSERT3U(BP_GET_CHECKSUM(bp
), ==,
1050 BP_GET_CHECKSUM(&found
->le_bp
));
1051 ASSERT3U(BP_GET_BIRTH(bp
), ==, BP_GET_BIRTH(&found
->le_bp
));
1054 if (found
== NULL
) {
1055 /* first free entry for this blkptr */
1056 livelist_entry_t
*e
=
1057 kmem_alloc(sizeof (livelist_entry_t
), KM_SLEEP
);
1063 * Deduped or cloned block free. We could assert D bit
1064 * for dedup, but there is no such one for cloning.
1066 ASSERT3U(found
->le_refcnt
+ 1, >, found
->le_refcnt
);
1070 if (found
== NULL
) {
1071 /* block is currently marked as allocated */
1072 bplist_append(to_free
, bp
);
1074 /* alloc matches a free entry */
1075 ASSERT3U(found
->le_refcnt
, !=, 0);
1077 if (found
->le_refcnt
== 0) {
1078 /* all tracked free pairs have been matched */
1079 avl_remove(avl
, found
);
1080 kmem_free(found
, sizeof (livelist_entry_t
));
1088 * Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs
1089 * which have an ALLOC entry but no matching FREE
1092 dsl_process_sub_livelist(bpobj_t
*bpobj
, bplist_t
*to_free
, zthr_t
*t
,
1096 avl_create(&avl
, livelist_compare
, sizeof (livelist_entry_t
),
1097 offsetof(livelist_entry_t
, le_node
));
1099 /* process the sublist */
1100 struct livelist_iter_arg arg
= {
1105 int err
= bpobj_iterate_nofree(bpobj
, dsl_livelist_iterate
, &arg
, size
);
1106 VERIFY(err
!= 0 || avl_numnodes(&avl
) == 0);
1108 void *cookie
= NULL
;
1109 livelist_entry_t
*le
= NULL
;
1110 while ((le
= avl_destroy_nodes(&avl
, &cookie
)) != NULL
) {
1111 kmem_free(le
, sizeof (livelist_entry_t
));
1117 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, max_entries
, U64
, ZMOD_RW
,
1118 "Size to start the next sub-livelist in a livelist");
1120 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, min_percent_shared
, INT
, ZMOD_RW
,
1121 "Threshold at which livelist is disabled");