4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_dataset.h>
34 * Deadlist concurrency:
36 * Deadlists can only be modified from the syncing thread.
38 * Except for dsl_deadlist_insert(), it can only be modified with the
39 * dp_config_rwlock held with RW_WRITER.
41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
42 * be called concurrently, from open context, with the dl_config_rwlock held
45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
46 * the accessors, protecting:
47 * dl_phys->dl_used,comp,uncomp
48 * and protecting the dl_tree from being loaded.
49 * The locking is provided by dl_lock. Note that locking on the bpobj_t
50 * provides its own locking, and dl_oldfmt is immutable.
57 * Livelists use the same 'deadlist_t' struct as deadlists and are also used
58 * to track blkptrs over the lifetime of a dataset. Livelists however, belong
59 * to clones and track the blkptrs that are clone-specific (were born after
60 * the clone's creation). The exception is embedded block pointers which are
61 * not included in livelists because they do not need to be freed.
63 * When it comes time to delete the clone, the livelist provides a quick
64 * reference as to what needs to be freed. For this reason, livelists also track
65 * when clone-specific blkptrs are freed before deletion to prevent double
66 * frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the
67 * deletion algorithm iterates backwards over the livelist, matching
68 * FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists
69 * are also updated in the case when blkptrs are remapped: the old version
70 * of the blkptr is cancelled out with a FREE and the new version is tracked
73 * To bound the amount of memory required for deletion, livelists over a
74 * certain size are spread over multiple entries. Entries are grouped by
75 * birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will
76 * be in the same entry. This allows us to delete livelists incrementally
77 * over multiple syncs, one entry at a time.
79 * During the lifetime of the clone, livelists can get extremely large.
80 * Their size is managed by periodic condensing (preemptively cancelling out
81 * FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when
82 * the shared space between the clone and its origin is so small that it
83 * doesn't make sense to use livelists anymore.
87 * The threshold sublist size at which we create a new sub-livelist for the
88 * next txg. However, since blkptrs of the same transaction group must be in
89 * the same sub-list, the actual sublist size may exceed this. When picking the
90 * size we had to balance the fact that larger sublists mean fewer sublists
91 * (decreasing the cost of insertion) against the consideration that sublists
92 * will be loaded into memory and shouldn't take up an inordinate amount of
93 * space. We settled on ~500000 entries, corresponding to roughly 128M.
95 uint64_t zfs_livelist_max_entries
= 500000;
98 * We can approximate how much of a performance gain a livelist will give us
99 * based on the percentage of blocks shared between the clone and its origin.
100 * 0 percent shared means that the clone has completely diverged and that the
101 * old method is maximally effective: every read from the block tree will
102 * result in lots of frees. Livelists give us gains when they track blocks
103 * scattered across the tree, when one read in the old method might only
104 * result in a few frees. Once the clone has been overwritten enough,
105 * writes are no longer sparse and we'll no longer get much of a benefit from
106 * tracking them with a livelist. We chose a lower limit of 75 percent shared
107 * (25 percent overwritten). This means that 1/4 of all block pointers will be
108 * freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists
109 * to make deletion 4x faster. Once the amount of shared space drops below this
110 * threshold, the clone will revert to the old deletion method.
112 int zfs_livelist_min_percent_shared
= 75;
115 dsl_deadlist_compare(const void *arg1
, const void *arg2
)
117 const dsl_deadlist_entry_t
*dle1
= arg1
;
118 const dsl_deadlist_entry_t
*dle2
= arg2
;
120 return (TREE_CMP(dle1
->dle_mintxg
, dle2
->dle_mintxg
));
124 dsl_deadlist_cache_compare(const void *arg1
, const void *arg2
)
126 const dsl_deadlist_cache_entry_t
*dlce1
= arg1
;
127 const dsl_deadlist_cache_entry_t
*dlce2
= arg2
;
129 return (TREE_CMP(dlce1
->dlce_mintxg
, dlce2
->dlce_mintxg
));
133 dsl_deadlist_load_tree(dsl_deadlist_t
*dl
)
139 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
141 ASSERT(!dl
->dl_oldfmt
);
142 if (dl
->dl_havecache
) {
144 * After loading the tree, the caller may modify the tree,
145 * e.g. to add or remove nodes, or to make a node no longer
146 * refer to the empty_bpobj. These changes would make the
147 * dl_cache incorrect. Therefore we discard the cache here,
148 * so that it can't become incorrect.
150 dsl_deadlist_cache_entry_t
*dlce
;
152 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
154 kmem_free(dlce
, sizeof (*dlce
));
156 avl_destroy(&dl
->dl_cache
);
157 dl
->dl_havecache
= B_FALSE
;
162 avl_create(&dl
->dl_tree
, dsl_deadlist_compare
,
163 sizeof (dsl_deadlist_entry_t
),
164 offsetof(dsl_deadlist_entry_t
, dle_node
));
165 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
166 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
167 zap_cursor_advance(&zc
)) {
168 dsl_deadlist_entry_t
*dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
169 dle
->dle_mintxg
= zfs_strtonum(za
.za_name
, NULL
);
172 * Prefetch all the bpobj's so that we do that i/o
173 * in parallel. Then open them all in a second pass.
175 dle
->dle_bpobj
.bpo_object
= za
.za_first_integer
;
176 dmu_prefetch(dl
->dl_os
, dle
->dle_bpobj
.bpo_object
,
177 0, 0, 0, ZIO_PRIORITY_SYNC_READ
);
179 avl_add(&dl
->dl_tree
, dle
);
181 VERIFY3U(error
, ==, ENOENT
);
182 zap_cursor_fini(&zc
);
184 for (dsl_deadlist_entry_t
*dle
= avl_first(&dl
->dl_tree
);
185 dle
!= NULL
; dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
186 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
,
187 dle
->dle_bpobj
.bpo_object
));
189 dl
->dl_havetree
= B_TRUE
;
193 * Load only the non-empty bpobj's into the dl_cache. The cache is an analog
194 * of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It
195 * is used only for gathering space statistics. The dl_cache has two
196 * advantages over the dl_tree:
198 * 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's
199 * mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj
200 * many times and to inquire about its (zero) space stats many times.
202 * 2. The dl_cache uses less memory than the dl_tree. We only need to load
203 * the dl_tree of snapshots when deleting a snapshot, after which we free the
204 * dl_tree with dsl_deadlist_discard_tree
207 dsl_deadlist_load_cache(dsl_deadlist_t
*dl
)
213 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
215 ASSERT(!dl
->dl_oldfmt
);
216 if (dl
->dl_havecache
)
219 uint64_t empty_bpobj
= dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
;
221 avl_create(&dl
->dl_cache
, dsl_deadlist_cache_compare
,
222 sizeof (dsl_deadlist_cache_entry_t
),
223 offsetof(dsl_deadlist_cache_entry_t
, dlce_node
));
224 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
225 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
226 zap_cursor_advance(&zc
)) {
227 if (za
.za_first_integer
== empty_bpobj
)
229 dsl_deadlist_cache_entry_t
*dlce
=
230 kmem_zalloc(sizeof (*dlce
), KM_SLEEP
);
231 dlce
->dlce_mintxg
= zfs_strtonum(za
.za_name
, NULL
);
234 * Prefetch all the bpobj's so that we do that i/o
235 * in parallel. Then open them all in a second pass.
237 dlce
->dlce_bpobj
= za
.za_first_integer
;
238 dmu_prefetch(dl
->dl_os
, dlce
->dlce_bpobj
,
239 0, 0, 0, ZIO_PRIORITY_SYNC_READ
);
240 avl_add(&dl
->dl_cache
, dlce
);
242 VERIFY3U(error
, ==, ENOENT
);
243 zap_cursor_fini(&zc
);
245 for (dsl_deadlist_cache_entry_t
*dlce
= avl_first(&dl
->dl_cache
);
246 dlce
!= NULL
; dlce
= AVL_NEXT(&dl
->dl_cache
, dlce
)) {
248 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, dlce
->dlce_bpobj
));
250 VERIFY0(bpobj_space(&bpo
,
251 &dlce
->dlce_bytes
, &dlce
->dlce_comp
, &dlce
->dlce_uncomp
));
254 dl
->dl_havecache
= B_TRUE
;
258 * Discard the tree to save memory.
261 dsl_deadlist_discard_tree(dsl_deadlist_t
*dl
)
263 mutex_enter(&dl
->dl_lock
);
265 if (!dl
->dl_havetree
) {
266 mutex_exit(&dl
->dl_lock
);
269 dsl_deadlist_entry_t
*dle
;
271 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
)) != NULL
) {
272 bpobj_close(&dle
->dle_bpobj
);
273 kmem_free(dle
, sizeof (*dle
));
275 avl_destroy(&dl
->dl_tree
);
277 dl
->dl_havetree
= B_FALSE
;
278 mutex_exit(&dl
->dl_lock
);
282 dsl_deadlist_iterate(dsl_deadlist_t
*dl
, deadlist_iter_t func
, void *args
)
284 dsl_deadlist_entry_t
*dle
;
286 ASSERT(dsl_deadlist_is_open(dl
));
288 mutex_enter(&dl
->dl_lock
);
289 dsl_deadlist_load_tree(dl
);
290 mutex_exit(&dl
->dl_lock
);
291 for (dle
= avl_first(&dl
->dl_tree
); dle
!= NULL
;
292 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
293 if (func(args
, dle
) != 0)
299 dsl_deadlist_open(dsl_deadlist_t
*dl
, objset_t
*os
, uint64_t object
)
301 dmu_object_info_t doi
;
303 ASSERT(!dsl_deadlist_is_open(dl
));
305 mutex_init(&dl
->dl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
307 dl
->dl_object
= object
;
308 VERIFY0(dmu_bonus_hold(os
, object
, dl
, &dl
->dl_dbuf
));
309 dmu_object_info_from_db(dl
->dl_dbuf
, &doi
);
310 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
311 dmu_buf_rele(dl
->dl_dbuf
, dl
);
313 dl
->dl_oldfmt
= B_TRUE
;
314 VERIFY0(bpobj_open(&dl
->dl_bpobj
, os
, object
));
318 dl
->dl_oldfmt
= B_FALSE
;
319 dl
->dl_phys
= dl
->dl_dbuf
->db_data
;
320 dl
->dl_havetree
= B_FALSE
;
321 dl
->dl_havecache
= B_FALSE
;
325 dsl_deadlist_is_open(dsl_deadlist_t
*dl
)
327 return (dl
->dl_os
!= NULL
);
331 dsl_deadlist_close(dsl_deadlist_t
*dl
)
333 ASSERT(dsl_deadlist_is_open(dl
));
334 mutex_destroy(&dl
->dl_lock
);
337 dl
->dl_oldfmt
= B_FALSE
;
338 bpobj_close(&dl
->dl_bpobj
);
344 if (dl
->dl_havetree
) {
345 dsl_deadlist_entry_t
*dle
;
347 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
))
349 bpobj_close(&dle
->dle_bpobj
);
350 kmem_free(dle
, sizeof (*dle
));
352 avl_destroy(&dl
->dl_tree
);
354 if (dl
->dl_havecache
) {
355 dsl_deadlist_cache_entry_t
*dlce
;
357 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
359 kmem_free(dlce
, sizeof (*dlce
));
361 avl_destroy(&dl
->dl_cache
);
363 dmu_buf_rele(dl
->dl_dbuf
, dl
);
371 dsl_deadlist_alloc(objset_t
*os
, dmu_tx_t
*tx
)
373 if (spa_version(dmu_objset_spa(os
)) < SPA_VERSION_DEADLISTS
)
374 return (bpobj_alloc(os
, SPA_OLD_MAXBLOCKSIZE
, tx
));
375 return (zap_create(os
, DMU_OT_DEADLIST
, DMU_OT_DEADLIST_HDR
,
376 sizeof (dsl_deadlist_phys_t
), tx
));
380 dsl_deadlist_free(objset_t
*os
, uint64_t dlobj
, dmu_tx_t
*tx
)
382 dmu_object_info_t doi
;
387 VERIFY0(dmu_object_info(os
, dlobj
, &doi
));
388 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
389 bpobj_free(os
, dlobj
, tx
);
393 for (zap_cursor_init(&zc
, os
, dlobj
);
394 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
395 zap_cursor_advance(&zc
)) {
396 uint64_t obj
= za
.za_first_integer
;
397 if (obj
== dmu_objset_pool(os
)->dp_empty_bpobj
)
398 bpobj_decr_empty(os
, tx
);
400 bpobj_free(os
, obj
, tx
);
402 VERIFY3U(error
, ==, ENOENT
);
403 zap_cursor_fini(&zc
);
404 VERIFY0(dmu_object_free(os
, dlobj
, tx
));
408 dle_enqueue(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
409 const blkptr_t
*bp
, boolean_t bp_freed
, dmu_tx_t
*tx
)
411 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
412 if (dle
->dle_bpobj
.bpo_object
==
413 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
414 uint64_t obj
= bpobj_alloc(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
415 bpobj_close(&dle
->dle_bpobj
);
416 bpobj_decr_empty(dl
->dl_os
, tx
);
417 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
418 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
419 dle
->dle_mintxg
, obj
, tx
));
421 bpobj_enqueue(&dle
->dle_bpobj
, bp
, bp_freed
, tx
);
425 dle_enqueue_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
426 uint64_t obj
, dmu_tx_t
*tx
)
428 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
429 if (dle
->dle_bpobj
.bpo_object
!=
430 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
431 bpobj_enqueue_subobj(&dle
->dle_bpobj
, obj
, tx
);
433 bpobj_close(&dle
->dle_bpobj
);
434 bpobj_decr_empty(dl
->dl_os
, tx
);
435 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
436 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
437 dle
->dle_mintxg
, obj
, tx
));
442 * Prefetch metadata required for dle_enqueue_subobj().
445 dle_prefetch_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
448 if (dle
->dle_bpobj
.bpo_object
!=
449 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
)
450 bpobj_prefetch_subobj(&dle
->dle_bpobj
, obj
);
454 dsl_deadlist_insert(dsl_deadlist_t
*dl
, const blkptr_t
*bp
, boolean_t bp_freed
,
457 dsl_deadlist_entry_t dle_tofind
;
458 dsl_deadlist_entry_t
*dle
;
462 bpobj_enqueue(&dl
->dl_bpobj
, bp
, bp_freed
, tx
);
466 mutex_enter(&dl
->dl_lock
);
467 dsl_deadlist_load_tree(dl
);
469 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
471 int sign
= bp_freed
? -1 : +1;
472 dl
->dl_phys
->dl_used
+=
473 sign
* bp_get_dsize_sync(dmu_objset_spa(dl
->dl_os
), bp
);
474 dl
->dl_phys
->dl_comp
+= sign
* BP_GET_PSIZE(bp
);
475 dl
->dl_phys
->dl_uncomp
+= sign
* BP_GET_UCSIZE(bp
);
477 dle_tofind
.dle_mintxg
= bp
->blk_birth
;
478 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
480 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
482 dle
= AVL_PREV(&dl
->dl_tree
, dle
);
485 zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
486 bp
, (longlong_t
)bp
->blk_birth
);
487 dle
= avl_first(&dl
->dl_tree
);
490 ASSERT3P(dle
, !=, NULL
);
491 dle_enqueue(dl
, dle
, bp
, bp_freed
, tx
);
492 mutex_exit(&dl
->dl_lock
);
496 dsl_deadlist_insert_alloc_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
498 dsl_deadlist_t
*dl
= arg
;
499 dsl_deadlist_insert(dl
, bp
, B_FALSE
, tx
);
504 dsl_deadlist_insert_free_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
506 dsl_deadlist_t
*dl
= arg
;
507 dsl_deadlist_insert(dl
, bp
, B_TRUE
, tx
);
512 * Insert new key in deadlist, which must be > all current entries.
513 * mintxg is not inclusive.
516 dsl_deadlist_add_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
519 dsl_deadlist_entry_t
*dle
;
524 dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
525 dle
->dle_mintxg
= mintxg
;
527 mutex_enter(&dl
->dl_lock
);
528 dsl_deadlist_load_tree(dl
);
530 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
531 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
532 avl_add(&dl
->dl_tree
, dle
);
534 VERIFY0(zap_add_int_key(dl
->dl_os
, dl
->dl_object
,
536 mutex_exit(&dl
->dl_lock
);
540 * Remove this key, merging its entries into the previous key.
543 dsl_deadlist_remove_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
545 dsl_deadlist_entry_t dle_tofind
;
546 dsl_deadlist_entry_t
*dle
, *dle_prev
;
550 mutex_enter(&dl
->dl_lock
);
551 dsl_deadlist_load_tree(dl
);
553 dle_tofind
.dle_mintxg
= mintxg
;
554 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
555 ASSERT3P(dle
, !=, NULL
);
556 dle_prev
= AVL_PREV(&dl
->dl_tree
, dle
);
557 ASSERT3P(dle_prev
, !=, NULL
);
559 dle_enqueue_subobj(dl
, dle_prev
, dle
->dle_bpobj
.bpo_object
, tx
);
561 avl_remove(&dl
->dl_tree
, dle
);
562 bpobj_close(&dle
->dle_bpobj
);
563 kmem_free(dle
, sizeof (*dle
));
565 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
, mintxg
, tx
));
566 mutex_exit(&dl
->dl_lock
);
570 * Remove a deadlist entry and all of its contents by removing the entry from
571 * the deadlist's avl tree, freeing the entry's bpobj and adjusting the
572 * deadlist's space accounting accordingly.
575 dsl_deadlist_remove_entry(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
577 uint64_t used
, comp
, uncomp
;
578 dsl_deadlist_entry_t dle_tofind
;
579 dsl_deadlist_entry_t
*dle
;
580 objset_t
*os
= dl
->dl_os
;
585 mutex_enter(&dl
->dl_lock
);
586 dsl_deadlist_load_tree(dl
);
588 dle_tofind
.dle_mintxg
= mintxg
;
589 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
590 VERIFY3P(dle
, !=, NULL
);
592 avl_remove(&dl
->dl_tree
, dle
);
593 VERIFY0(zap_remove_int(os
, dl
->dl_object
, mintxg
, tx
));
594 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
595 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
596 dl
->dl_phys
->dl_used
-= used
;
597 dl
->dl_phys
->dl_comp
-= comp
;
598 dl
->dl_phys
->dl_uncomp
-= uncomp
;
599 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
) {
600 bpobj_decr_empty(os
, tx
);
602 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
604 bpobj_close(&dle
->dle_bpobj
);
605 kmem_free(dle
, sizeof (*dle
));
606 mutex_exit(&dl
->dl_lock
);
610 * Clear out the contents of a deadlist_entry by freeing its bpobj,
611 * replacing it with an empty bpobj and adjusting the deadlist's
615 dsl_deadlist_clear_entry(dsl_deadlist_entry_t
*dle
, dsl_deadlist_t
*dl
,
618 uint64_t new_obj
, used
, comp
, uncomp
;
619 objset_t
*os
= dl
->dl_os
;
621 mutex_enter(&dl
->dl_lock
);
622 VERIFY0(zap_remove_int(os
, dl
->dl_object
, dle
->dle_mintxg
, tx
));
623 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
624 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
625 dl
->dl_phys
->dl_used
-= used
;
626 dl
->dl_phys
->dl_comp
-= comp
;
627 dl
->dl_phys
->dl_uncomp
-= uncomp
;
628 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
)
629 bpobj_decr_empty(os
, tx
);
631 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
632 bpobj_close(&dle
->dle_bpobj
);
633 new_obj
= bpobj_alloc_empty(os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
634 VERIFY0(bpobj_open(&dle
->dle_bpobj
, os
, new_obj
));
635 VERIFY0(zap_add_int_key(os
, dl
->dl_object
, dle
->dle_mintxg
,
637 ASSERT(bpobj_is_empty(&dle
->dle_bpobj
));
638 mutex_exit(&dl
->dl_lock
);
642 * Return the first entry in deadlist's avl tree
644 dsl_deadlist_entry_t
*
645 dsl_deadlist_first(dsl_deadlist_t
*dl
)
647 dsl_deadlist_entry_t
*dle
;
649 mutex_enter(&dl
->dl_lock
);
650 dsl_deadlist_load_tree(dl
);
651 dle
= avl_first(&dl
->dl_tree
);
652 mutex_exit(&dl
->dl_lock
);
658 * Return the last entry in deadlist's avl tree
660 dsl_deadlist_entry_t
*
661 dsl_deadlist_last(dsl_deadlist_t
*dl
)
663 dsl_deadlist_entry_t
*dle
;
665 mutex_enter(&dl
->dl_lock
);
666 dsl_deadlist_load_tree(dl
);
667 dle
= avl_last(&dl
->dl_tree
);
668 mutex_exit(&dl
->dl_lock
);
674 * Walk ds's snapshots to regenerate generate ZAP & AVL.
677 dsl_deadlist_regenerate(objset_t
*os
, uint64_t dlobj
,
678 uint64_t mrs_obj
, dmu_tx_t
*tx
)
680 dsl_deadlist_t dl
= { 0 };
681 dsl_pool_t
*dp
= dmu_objset_pool(os
);
683 dsl_deadlist_open(&dl
, os
, dlobj
);
685 dsl_deadlist_close(&dl
);
689 while (mrs_obj
!= 0) {
691 VERIFY0(dsl_dataset_hold_obj(dp
, mrs_obj
, FTAG
, &ds
));
692 dsl_deadlist_add_key(&dl
,
693 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
694 mrs_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
695 dsl_dataset_rele(ds
, FTAG
);
697 dsl_deadlist_close(&dl
);
701 dsl_deadlist_clone(dsl_deadlist_t
*dl
, uint64_t maxtxg
,
702 uint64_t mrs_obj
, dmu_tx_t
*tx
)
704 dsl_deadlist_entry_t
*dle
;
707 newobj
= dsl_deadlist_alloc(dl
->dl_os
, tx
);
710 dsl_deadlist_regenerate(dl
->dl_os
, newobj
, mrs_obj
, tx
);
714 mutex_enter(&dl
->dl_lock
);
715 dsl_deadlist_load_tree(dl
);
717 for (dle
= avl_first(&dl
->dl_tree
); dle
;
718 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
721 if (dle
->dle_mintxg
>= maxtxg
)
724 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
725 VERIFY0(zap_add_int_key(dl
->dl_os
, newobj
,
726 dle
->dle_mintxg
, obj
, tx
));
728 mutex_exit(&dl
->dl_lock
);
733 dsl_deadlist_space(dsl_deadlist_t
*dl
,
734 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
736 ASSERT(dsl_deadlist_is_open(dl
));
738 VERIFY0(bpobj_space(&dl
->dl_bpobj
,
739 usedp
, compp
, uncompp
));
743 mutex_enter(&dl
->dl_lock
);
744 *usedp
= dl
->dl_phys
->dl_used
;
745 *compp
= dl
->dl_phys
->dl_comp
;
746 *uncompp
= dl
->dl_phys
->dl_uncomp
;
747 mutex_exit(&dl
->dl_lock
);
751 * return space used in the range (mintxg, maxtxg].
752 * Includes maxtxg, does not include mintxg.
753 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
757 dsl_deadlist_space_range(dsl_deadlist_t
*dl
, uint64_t mintxg
, uint64_t maxtxg
,
758 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
760 dsl_deadlist_cache_entry_t
*dlce
;
761 dsl_deadlist_cache_entry_t dlce_tofind
;
765 VERIFY0(bpobj_space_range(&dl
->dl_bpobj
,
766 mintxg
, maxtxg
, usedp
, compp
, uncompp
));
770 *usedp
= *compp
= *uncompp
= 0;
772 mutex_enter(&dl
->dl_lock
);
773 dsl_deadlist_load_cache(dl
);
774 dlce_tofind
.dlce_mintxg
= mintxg
;
775 dlce
= avl_find(&dl
->dl_cache
, &dlce_tofind
, &where
);
778 * If this mintxg doesn't exist, it may be an empty_bpobj which
779 * is omitted from the sparse tree. Start at the next non-empty
783 dlce
= avl_nearest(&dl
->dl_cache
, where
, AVL_AFTER
);
785 for (; dlce
&& dlce
->dlce_mintxg
< maxtxg
;
786 dlce
= AVL_NEXT(&dl
->dl_tree
, dlce
)) {
787 *usedp
+= dlce
->dlce_bytes
;
788 *compp
+= dlce
->dlce_comp
;
789 *uncompp
+= dlce
->dlce_uncomp
;
792 mutex_exit(&dl
->dl_lock
);
796 dsl_deadlist_insert_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
,
799 dsl_deadlist_entry_t dle_tofind
;
800 dsl_deadlist_entry_t
*dle
;
802 uint64_t used
, comp
, uncomp
;
805 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
807 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
808 VERIFY0(bpobj_space(&bpo
, &used
, &comp
, &uncomp
));
811 dsl_deadlist_load_tree(dl
);
813 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
814 dl
->dl_phys
->dl_used
+= used
;
815 dl
->dl_phys
->dl_comp
+= comp
;
816 dl
->dl_phys
->dl_uncomp
+= uncomp
;
818 dle_tofind
.dle_mintxg
= birth
;
819 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
821 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
822 dle_enqueue_subobj(dl
, dle
, obj
, tx
);
826 * Prefetch metadata required for dsl_deadlist_insert_bpobj().
829 dsl_deadlist_prefetch_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
)
831 dsl_deadlist_entry_t dle_tofind
;
832 dsl_deadlist_entry_t
*dle
;
835 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
837 dsl_deadlist_load_tree(dl
);
839 dle_tofind
.dle_mintxg
= birth
;
840 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
842 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
843 dle_prefetch_subobj(dl
, dle
, obj
);
847 dsl_deadlist_insert_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
850 dsl_deadlist_t
*dl
= arg
;
851 dsl_deadlist_insert(dl
, bp
, bp_freed
, tx
);
856 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
860 dsl_deadlist_merge(dsl_deadlist_t
*dl
, uint64_t obj
, dmu_tx_t
*tx
)
862 zap_cursor_t zc
, pzc
;
863 zap_attribute_t
*za
, *pza
;
865 dsl_deadlist_phys_t
*dlp
;
866 dmu_object_info_t doi
;
867 int error
, perror
, i
;
869 VERIFY0(dmu_object_info(dl
->dl_os
, obj
, &doi
));
870 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
872 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
873 VERIFY0(bpobj_iterate(&bpo
, dsl_deadlist_insert_cb
, dl
, tx
));
878 za
= kmem_alloc(sizeof (*za
), KM_SLEEP
);
879 pza
= kmem_alloc(sizeof (*pza
), KM_SLEEP
);
881 mutex_enter(&dl
->dl_lock
);
883 * Prefetch up to 128 deadlists first and then more as we progress.
884 * The limit is a balance between ARC use and diminishing returns.
886 for (zap_cursor_init(&pzc
, dl
->dl_os
, obj
), i
= 0;
887 (perror
= zap_cursor_retrieve(&pzc
, pza
)) == 0 && i
< 128;
888 zap_cursor_advance(&pzc
), i
++) {
889 dsl_deadlist_prefetch_bpobj(dl
, pza
->za_first_integer
,
890 zfs_strtonum(pza
->za_name
, NULL
));
892 for (zap_cursor_init(&zc
, dl
->dl_os
, obj
);
893 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
894 zap_cursor_advance(&zc
)) {
895 dsl_deadlist_insert_bpobj(dl
, za
->za_first_integer
,
896 zfs_strtonum(za
->za_name
, NULL
), tx
);
897 VERIFY0(zap_remove(dl
->dl_os
, obj
, za
->za_name
, tx
));
899 dsl_deadlist_prefetch_bpobj(dl
, pza
->za_first_integer
,
900 zfs_strtonum(pza
->za_name
, NULL
));
901 zap_cursor_advance(&pzc
);
902 perror
= zap_cursor_retrieve(&pzc
, pza
);
905 VERIFY3U(error
, ==, ENOENT
);
906 zap_cursor_fini(&zc
);
907 zap_cursor_fini(&pzc
);
909 VERIFY0(dmu_bonus_hold(dl
->dl_os
, obj
, FTAG
, &bonus
));
910 dlp
= bonus
->db_data
;
911 dmu_buf_will_dirty(bonus
, tx
);
912 memset(dlp
, 0, sizeof (*dlp
));
913 dmu_buf_rele(bonus
, FTAG
);
914 mutex_exit(&dl
->dl_lock
);
916 kmem_free(za
, sizeof (*za
));
917 kmem_free(pza
, sizeof (*pza
));
921 * Remove entries on dl that are born > mintxg, and put them on the bpobj.
924 dsl_deadlist_move_bpobj(dsl_deadlist_t
*dl
, bpobj_t
*bpo
, uint64_t mintxg
,
927 dsl_deadlist_entry_t dle_tofind
;
928 dsl_deadlist_entry_t
*dle
, *pdle
;
932 ASSERT(!dl
->dl_oldfmt
);
934 mutex_enter(&dl
->dl_lock
);
935 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
936 dsl_deadlist_load_tree(dl
);
938 dle_tofind
.dle_mintxg
= mintxg
;
939 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
941 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_AFTER
);
943 * Prefetch up to 128 deadlists first and then more as we progress.
944 * The limit is a balance between ARC use and diminishing returns.
946 for (pdle
= dle
, i
= 0; pdle
&& i
< 128; i
++) {
947 bpobj_prefetch_subobj(bpo
, pdle
->dle_bpobj
.bpo_object
);
948 pdle
= AVL_NEXT(&dl
->dl_tree
, pdle
);
951 uint64_t used
, comp
, uncomp
;
952 dsl_deadlist_entry_t
*dle_next
;
954 bpobj_enqueue_subobj(bpo
, dle
->dle_bpobj
.bpo_object
, tx
);
956 bpobj_prefetch_subobj(bpo
, pdle
->dle_bpobj
.bpo_object
);
957 pdle
= AVL_NEXT(&dl
->dl_tree
, pdle
);
960 VERIFY0(bpobj_space(&dle
->dle_bpobj
,
961 &used
, &comp
, &uncomp
));
962 ASSERT3U(dl
->dl_phys
->dl_used
, >=, used
);
963 ASSERT3U(dl
->dl_phys
->dl_comp
, >=, comp
);
964 ASSERT3U(dl
->dl_phys
->dl_uncomp
, >=, uncomp
);
965 dl
->dl_phys
->dl_used
-= used
;
966 dl
->dl_phys
->dl_comp
-= comp
;
967 dl
->dl_phys
->dl_uncomp
-= uncomp
;
969 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
,
970 dle
->dle_mintxg
, tx
));
972 dle_next
= AVL_NEXT(&dl
->dl_tree
, dle
);
973 avl_remove(&dl
->dl_tree
, dle
);
974 bpobj_close(&dle
->dle_bpobj
);
975 kmem_free(dle
, sizeof (*dle
));
978 mutex_exit(&dl
->dl_lock
);
981 typedef struct livelist_entry
{
988 livelist_compare(const void *larg
, const void *rarg
)
990 const blkptr_t
*l
= &((livelist_entry_t
*)larg
)->le_bp
;
991 const blkptr_t
*r
= &((livelist_entry_t
*)rarg
)->le_bp
;
993 /* Sort them according to dva[0] */
994 uint64_t l_dva0_vdev
= DVA_GET_VDEV(&l
->blk_dva
[0]);
995 uint64_t r_dva0_vdev
= DVA_GET_VDEV(&r
->blk_dva
[0]);
997 if (l_dva0_vdev
!= r_dva0_vdev
)
998 return (TREE_CMP(l_dva0_vdev
, r_dva0_vdev
));
1000 /* if vdevs are equal, sort by offsets. */
1001 uint64_t l_dva0_offset
= DVA_GET_OFFSET(&l
->blk_dva
[0]);
1002 uint64_t r_dva0_offset
= DVA_GET_OFFSET(&r
->blk_dva
[0]);
1003 if (l_dva0_offset
== r_dva0_offset
)
1004 ASSERT3U(l
->blk_birth
, ==, r
->blk_birth
);
1005 return (TREE_CMP(l_dva0_offset
, r_dva0_offset
));
1008 struct livelist_iter_arg
{
1015 * Expects an AVL tree which is incrementally filled will FREE blkptrs
1016 * and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a
1017 * corresponding FREE are stored in the supplied bplist.
1019 * Note that multiple FREE and ALLOC entries for the same blkptr may
1020 * be encountered when dedup is involved. For this reason we keep a
1021 * refcount for all the FREE entries of each blkptr and ensure that
1022 * each of those FREE entries has a corresponding ALLOC preceding it.
1025 dsl_livelist_iterate(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
1028 struct livelist_iter_arg
*lia
= arg
;
1029 avl_tree_t
*avl
= lia
->avl
;
1030 bplist_t
*to_free
= lia
->to_free
;
1034 if ((t
!= NULL
) && (zthr_has_waiters(t
) || zthr_iscancelled(t
)))
1035 return (SET_ERROR(EINTR
));
1037 livelist_entry_t node
;
1039 livelist_entry_t
*found
= avl_find(avl
, &node
, NULL
);
1041 if (found
== NULL
) {
1042 /* first free entry for this blkptr */
1043 livelist_entry_t
*e
=
1044 kmem_alloc(sizeof (livelist_entry_t
), KM_SLEEP
);
1049 /* dedup block free */
1050 ASSERT(BP_GET_DEDUP(bp
));
1051 ASSERT3U(BP_GET_CHECKSUM(bp
), ==,
1052 BP_GET_CHECKSUM(&found
->le_bp
));
1053 ASSERT3U(found
->le_refcnt
+ 1, >, found
->le_refcnt
);
1057 if (found
== NULL
) {
1058 /* block is currently marked as allocated */
1059 bplist_append(to_free
, bp
);
1061 /* alloc matches a free entry */
1062 ASSERT3U(found
->le_refcnt
, !=, 0);
1064 if (found
->le_refcnt
== 0) {
1065 /* all tracked free pairs have been matched */
1066 avl_remove(avl
, found
);
1067 kmem_free(found
, sizeof (livelist_entry_t
));
1070 * This is definitely a deduped blkptr so
1071 * let's validate it.
1073 ASSERT(BP_GET_DEDUP(bp
));
1074 ASSERT3U(BP_GET_CHECKSUM(bp
), ==,
1075 BP_GET_CHECKSUM(&found
->le_bp
));
1083 * Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs
1084 * which have an ALLOC entry but no matching FREE
1087 dsl_process_sub_livelist(bpobj_t
*bpobj
, bplist_t
*to_free
, zthr_t
*t
,
1091 avl_create(&avl
, livelist_compare
, sizeof (livelist_entry_t
),
1092 offsetof(livelist_entry_t
, le_node
));
1094 /* process the sublist */
1095 struct livelist_iter_arg arg
= {
1100 int err
= bpobj_iterate_nofree(bpobj
, dsl_livelist_iterate
, &arg
, size
);
1101 VERIFY(err
!= 0 || avl_numnodes(&avl
) == 0);
1103 void *cookie
= NULL
;
1104 livelist_entry_t
*le
= NULL
;
1105 while ((le
= avl_destroy_nodes(&avl
, &cookie
)) != NULL
) {
1106 kmem_free(le
, sizeof (livelist_entry_t
));
1112 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, max_entries
, U64
, ZMOD_RW
,
1113 "Size to start the next sub-livelist in a livelist");
1115 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, min_percent_shared
, INT
, ZMOD_RW
,
1116 "Threshold at which livelist is disabled");