4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_dataset.h>
34 * Deadlist concurrency:
36 * Deadlists can only be modified from the syncing thread.
38 * Except for dsl_deadlist_insert(), it can only be modified with the
39 * dp_config_rwlock held with RW_WRITER.
41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
42 * be called concurrently, from open context, with the dl_config_rwlock held
45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
46 * the accessors, protecting:
47 * dl_phys->dl_used,comp,uncomp
48 * and protecting the dl_tree from being loaded.
49 * The locking is provided by dl_lock. Note that locking on the bpobj_t
50 * provides its own locking, and dl_oldfmt is immutable.
57 * Livelists use the same 'deadlist_t' struct as deadlists and are also used
58 * to track blkptrs over the lifetime of a dataset. Livelists however, belong
59 * to clones and track the blkptrs that are clone-specific (were born after
60 * the clone's creation). The exception is embedded block pointers which are
61 * not included in livelists because they do not need to be freed.
63 * When it comes time to delete the clone, the livelist provides a quick
64 * reference as to what needs to be freed. For this reason, livelists also track
65 * when clone-specific blkptrs are freed before deletion to prevent double
66 * frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the
67 * deletion algorithm iterates backwards over the livelist, matching
68 * FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists
69 * are also updated in the case when blkptrs are remapped: the old version
70 * of the blkptr is cancelled out with a FREE and the new version is tracked
73 * To bound the amount of memory required for deletion, livelists over a
74 * certain size are spread over multiple entries. Entries are grouped by
75 * birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will
76 * be in the same entry. This allows us to delete livelists incrementally
77 * over multiple syncs, one entry at a time.
79 * During the lifetime of the clone, livelists can get extremely large.
80 * Their size is managed by periodic condensing (preemptively cancelling out
81 * FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when
82 * the shared space between the clone and its origin is so small that it
83 * doesn't make sense to use livelists anymore.
87 * The threshold sublist size at which we create a new sub-livelist for the
88 * next txg. However, since blkptrs of the same transaction group must be in
89 * the same sub-list, the actual sublist size may exceed this. When picking the
90 * size we had to balance the fact that larger sublists mean fewer sublists
91 * (decreasing the cost of insertion) against the consideration that sublists
92 * will be loaded into memory and shouldn't take up an inordinate amount of
93 * space. We settled on ~500000 entries, corresponding to roughly 128M.
95 unsigned long zfs_livelist_max_entries
= 500000;
98 * We can approximate how much of a performance gain a livelist will give us
99 * based on the percentage of blocks shared between the clone and its origin.
100 * 0 percent shared means that the clone has completely diverged and that the
101 * old method is maximally effective: every read from the block tree will
102 * result in lots of frees. Livelists give us gains when they track blocks
103 * scattered across the tree, when one read in the old method might only
104 * result in a few frees. Once the clone has been overwritten enough,
105 * writes are no longer sparse and we'll no longer get much of a benefit from
106 * tracking them with a livelist. We chose a lower limit of 75 percent shared
107 * (25 percent overwritten). This means that 1/4 of all block pointers will be
108 * freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists
109 * to make deletion 4x faster. Once the amount of shared space drops below this
110 * threshold, the clone will revert to the old deletion method.
112 int zfs_livelist_min_percent_shared
= 75;
115 dsl_deadlist_compare(const void *arg1
, const void *arg2
)
117 const dsl_deadlist_entry_t
*dle1
= arg1
;
118 const dsl_deadlist_entry_t
*dle2
= arg2
;
120 return (TREE_CMP(dle1
->dle_mintxg
, dle2
->dle_mintxg
));
124 dsl_deadlist_cache_compare(const void *arg1
, const void *arg2
)
126 const dsl_deadlist_cache_entry_t
*dlce1
= arg1
;
127 const dsl_deadlist_cache_entry_t
*dlce2
= arg2
;
129 return (TREE_CMP(dlce1
->dlce_mintxg
, dlce2
->dlce_mintxg
));
133 dsl_deadlist_load_tree(dsl_deadlist_t
*dl
)
139 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
141 ASSERT(!dl
->dl_oldfmt
);
142 if (dl
->dl_havecache
) {
144 * After loading the tree, the caller may modify the tree,
145 * e.g. to add or remove nodes, or to make a node no longer
146 * refer to the empty_bpobj. These changes would make the
147 * dl_cache incorrect. Therefore we discard the cache here,
148 * so that it can't become incorrect.
150 dsl_deadlist_cache_entry_t
*dlce
;
152 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
154 kmem_free(dlce
, sizeof (*dlce
));
156 avl_destroy(&dl
->dl_cache
);
157 dl
->dl_havecache
= B_FALSE
;
162 avl_create(&dl
->dl_tree
, dsl_deadlist_compare
,
163 sizeof (dsl_deadlist_entry_t
),
164 offsetof(dsl_deadlist_entry_t
, dle_node
));
165 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
166 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
167 zap_cursor_advance(&zc
)) {
168 dsl_deadlist_entry_t
*dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
169 dle
->dle_mintxg
= zfs_strtonum(za
.za_name
, NULL
);
172 * Prefetch all the bpobj's so that we do that i/o
173 * in parallel. Then open them all in a second pass.
175 dle
->dle_bpobj
.bpo_object
= za
.za_first_integer
;
176 dmu_prefetch(dl
->dl_os
, dle
->dle_bpobj
.bpo_object
,
177 0, 0, 0, ZIO_PRIORITY_SYNC_READ
);
179 avl_add(&dl
->dl_tree
, dle
);
181 VERIFY3U(error
, ==, ENOENT
);
182 zap_cursor_fini(&zc
);
184 for (dsl_deadlist_entry_t
*dle
= avl_first(&dl
->dl_tree
);
185 dle
!= NULL
; dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
186 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
,
187 dle
->dle_bpobj
.bpo_object
));
189 dl
->dl_havetree
= B_TRUE
;
193 * Load only the non-empty bpobj's into the dl_cache. The cache is an analog
194 * of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It
195 * is used only for gathering space statistics. The dl_cache has two
196 * advantages over the dl_tree:
198 * 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's
199 * mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj
200 * many times and to inquire about its (zero) space stats many times.
202 * 2. The dl_cache uses less memory than the dl_tree. We only need to load
203 * the dl_tree of snapshots when deleting a snapshot, after which we free the
204 * dl_tree with dsl_deadlist_discard_tree
207 dsl_deadlist_load_cache(dsl_deadlist_t
*dl
)
213 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
215 ASSERT(!dl
->dl_oldfmt
);
216 if (dl
->dl_havecache
)
219 uint64_t empty_bpobj
= dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
;
221 avl_create(&dl
->dl_cache
, dsl_deadlist_cache_compare
,
222 sizeof (dsl_deadlist_cache_entry_t
),
223 offsetof(dsl_deadlist_cache_entry_t
, dlce_node
));
224 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
225 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
226 zap_cursor_advance(&zc
)) {
227 if (za
.za_first_integer
== empty_bpobj
)
229 dsl_deadlist_cache_entry_t
*dlce
=
230 kmem_zalloc(sizeof (*dlce
), KM_SLEEP
);
231 dlce
->dlce_mintxg
= zfs_strtonum(za
.za_name
, NULL
);
234 * Prefetch all the bpobj's so that we do that i/o
235 * in parallel. Then open them all in a second pass.
237 dlce
->dlce_bpobj
= za
.za_first_integer
;
238 dmu_prefetch(dl
->dl_os
, dlce
->dlce_bpobj
,
239 0, 0, 0, ZIO_PRIORITY_SYNC_READ
);
240 avl_add(&dl
->dl_cache
, dlce
);
242 VERIFY3U(error
, ==, ENOENT
);
243 zap_cursor_fini(&zc
);
245 for (dsl_deadlist_cache_entry_t
*dlce
= avl_first(&dl
->dl_cache
);
246 dlce
!= NULL
; dlce
= AVL_NEXT(&dl
->dl_cache
, dlce
)) {
248 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, dlce
->dlce_bpobj
));
250 VERIFY0(bpobj_space(&bpo
,
251 &dlce
->dlce_bytes
, &dlce
->dlce_comp
, &dlce
->dlce_uncomp
));
254 dl
->dl_havecache
= B_TRUE
;
258 * Discard the tree to save memory.
261 dsl_deadlist_discard_tree(dsl_deadlist_t
*dl
)
263 mutex_enter(&dl
->dl_lock
);
265 if (!dl
->dl_havetree
) {
266 mutex_exit(&dl
->dl_lock
);
269 dsl_deadlist_entry_t
*dle
;
271 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
)) != NULL
) {
272 bpobj_close(&dle
->dle_bpobj
);
273 kmem_free(dle
, sizeof (*dle
));
275 avl_destroy(&dl
->dl_tree
);
277 dl
->dl_havetree
= B_FALSE
;
278 mutex_exit(&dl
->dl_lock
);
282 dsl_deadlist_iterate(dsl_deadlist_t
*dl
, deadlist_iter_t func
, void *args
)
284 dsl_deadlist_entry_t
*dle
;
286 ASSERT(dsl_deadlist_is_open(dl
));
288 mutex_enter(&dl
->dl_lock
);
289 dsl_deadlist_load_tree(dl
);
290 mutex_exit(&dl
->dl_lock
);
291 for (dle
= avl_first(&dl
->dl_tree
); dle
!= NULL
;
292 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
293 if (func(args
, dle
) != 0)
299 dsl_deadlist_open(dsl_deadlist_t
*dl
, objset_t
*os
, uint64_t object
)
301 dmu_object_info_t doi
;
303 ASSERT(!dsl_deadlist_is_open(dl
));
305 mutex_init(&dl
->dl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
307 dl
->dl_object
= object
;
308 VERIFY0(dmu_bonus_hold(os
, object
, dl
, &dl
->dl_dbuf
));
309 dmu_object_info_from_db(dl
->dl_dbuf
, &doi
);
310 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
311 dmu_buf_rele(dl
->dl_dbuf
, dl
);
313 dl
->dl_oldfmt
= B_TRUE
;
314 VERIFY0(bpobj_open(&dl
->dl_bpobj
, os
, object
));
318 dl
->dl_oldfmt
= B_FALSE
;
319 dl
->dl_phys
= dl
->dl_dbuf
->db_data
;
320 dl
->dl_havetree
= B_FALSE
;
321 dl
->dl_havecache
= B_FALSE
;
325 dsl_deadlist_is_open(dsl_deadlist_t
*dl
)
327 return (dl
->dl_os
!= NULL
);
331 dsl_deadlist_close(dsl_deadlist_t
*dl
)
333 ASSERT(dsl_deadlist_is_open(dl
));
334 mutex_destroy(&dl
->dl_lock
);
337 dl
->dl_oldfmt
= B_FALSE
;
338 bpobj_close(&dl
->dl_bpobj
);
344 if (dl
->dl_havetree
) {
345 dsl_deadlist_entry_t
*dle
;
347 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
))
349 bpobj_close(&dle
->dle_bpobj
);
350 kmem_free(dle
, sizeof (*dle
));
352 avl_destroy(&dl
->dl_tree
);
354 if (dl
->dl_havecache
) {
355 dsl_deadlist_cache_entry_t
*dlce
;
357 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
359 kmem_free(dlce
, sizeof (*dlce
));
361 avl_destroy(&dl
->dl_cache
);
363 dmu_buf_rele(dl
->dl_dbuf
, dl
);
371 dsl_deadlist_alloc(objset_t
*os
, dmu_tx_t
*tx
)
373 if (spa_version(dmu_objset_spa(os
)) < SPA_VERSION_DEADLISTS
)
374 return (bpobj_alloc(os
, SPA_OLD_MAXBLOCKSIZE
, tx
));
375 return (zap_create(os
, DMU_OT_DEADLIST
, DMU_OT_DEADLIST_HDR
,
376 sizeof (dsl_deadlist_phys_t
), tx
));
380 dsl_deadlist_free(objset_t
*os
, uint64_t dlobj
, dmu_tx_t
*tx
)
382 dmu_object_info_t doi
;
387 VERIFY0(dmu_object_info(os
, dlobj
, &doi
));
388 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
389 bpobj_free(os
, dlobj
, tx
);
393 for (zap_cursor_init(&zc
, os
, dlobj
);
394 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
395 zap_cursor_advance(&zc
)) {
396 uint64_t obj
= za
.za_first_integer
;
397 if (obj
== dmu_objset_pool(os
)->dp_empty_bpobj
)
398 bpobj_decr_empty(os
, tx
);
400 bpobj_free(os
, obj
, tx
);
402 VERIFY3U(error
, ==, ENOENT
);
403 zap_cursor_fini(&zc
);
404 VERIFY0(dmu_object_free(os
, dlobj
, tx
));
408 dle_enqueue(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
409 const blkptr_t
*bp
, boolean_t bp_freed
, dmu_tx_t
*tx
)
411 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
412 if (dle
->dle_bpobj
.bpo_object
==
413 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
414 uint64_t obj
= bpobj_alloc(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
415 bpobj_close(&dle
->dle_bpobj
);
416 bpobj_decr_empty(dl
->dl_os
, tx
);
417 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
418 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
419 dle
->dle_mintxg
, obj
, tx
));
421 bpobj_enqueue(&dle
->dle_bpobj
, bp
, bp_freed
, tx
);
425 dle_enqueue_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
426 uint64_t obj
, dmu_tx_t
*tx
)
428 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
429 if (dle
->dle_bpobj
.bpo_object
!=
430 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
431 bpobj_enqueue_subobj(&dle
->dle_bpobj
, obj
, tx
);
433 bpobj_close(&dle
->dle_bpobj
);
434 bpobj_decr_empty(dl
->dl_os
, tx
);
435 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
436 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
437 dle
->dle_mintxg
, obj
, tx
));
442 dsl_deadlist_insert(dsl_deadlist_t
*dl
, const blkptr_t
*bp
, boolean_t bp_freed
,
445 dsl_deadlist_entry_t dle_tofind
;
446 dsl_deadlist_entry_t
*dle
;
450 bpobj_enqueue(&dl
->dl_bpobj
, bp
, bp_freed
, tx
);
454 mutex_enter(&dl
->dl_lock
);
455 dsl_deadlist_load_tree(dl
);
457 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
459 int sign
= bp_freed
? -1 : +1;
460 dl
->dl_phys
->dl_used
+=
461 sign
* bp_get_dsize_sync(dmu_objset_spa(dl
->dl_os
), bp
);
462 dl
->dl_phys
->dl_comp
+= sign
* BP_GET_PSIZE(bp
);
463 dl
->dl_phys
->dl_uncomp
+= sign
* BP_GET_UCSIZE(bp
);
465 dle_tofind
.dle_mintxg
= bp
->blk_birth
;
466 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
468 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
470 dle
= AVL_PREV(&dl
->dl_tree
, dle
);
473 zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
474 bp
, (longlong_t
)bp
->blk_birth
);
475 dle
= avl_first(&dl
->dl_tree
);
478 ASSERT3P(dle
, !=, NULL
);
479 dle_enqueue(dl
, dle
, bp
, bp_freed
, tx
);
480 mutex_exit(&dl
->dl_lock
);
484 dsl_deadlist_insert_alloc_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
486 dsl_deadlist_t
*dl
= arg
;
487 dsl_deadlist_insert(dl
, bp
, B_FALSE
, tx
);
492 dsl_deadlist_insert_free_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
494 dsl_deadlist_t
*dl
= arg
;
495 dsl_deadlist_insert(dl
, bp
, B_TRUE
, tx
);
500 * Insert new key in deadlist, which must be > all current entries.
501 * mintxg is not inclusive.
504 dsl_deadlist_add_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
507 dsl_deadlist_entry_t
*dle
;
512 dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
513 dle
->dle_mintxg
= mintxg
;
515 mutex_enter(&dl
->dl_lock
);
516 dsl_deadlist_load_tree(dl
);
518 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
519 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
520 avl_add(&dl
->dl_tree
, dle
);
522 VERIFY0(zap_add_int_key(dl
->dl_os
, dl
->dl_object
,
524 mutex_exit(&dl
->dl_lock
);
528 * Remove this key, merging its entries into the previous key.
531 dsl_deadlist_remove_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
533 dsl_deadlist_entry_t dle_tofind
;
534 dsl_deadlist_entry_t
*dle
, *dle_prev
;
538 mutex_enter(&dl
->dl_lock
);
539 dsl_deadlist_load_tree(dl
);
541 dle_tofind
.dle_mintxg
= mintxg
;
542 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
543 ASSERT3P(dle
, !=, NULL
);
544 dle_prev
= AVL_PREV(&dl
->dl_tree
, dle
);
546 dle_enqueue_subobj(dl
, dle_prev
, dle
->dle_bpobj
.bpo_object
, tx
);
548 avl_remove(&dl
->dl_tree
, dle
);
549 bpobj_close(&dle
->dle_bpobj
);
550 kmem_free(dle
, sizeof (*dle
));
552 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
, mintxg
, tx
));
553 mutex_exit(&dl
->dl_lock
);
557 * Remove a deadlist entry and all of its contents by removing the entry from
558 * the deadlist's avl tree, freeing the entry's bpobj and adjusting the
559 * deadlist's space accounting accordingly.
562 dsl_deadlist_remove_entry(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
564 uint64_t used
, comp
, uncomp
;
565 dsl_deadlist_entry_t dle_tofind
;
566 dsl_deadlist_entry_t
*dle
;
567 objset_t
*os
= dl
->dl_os
;
572 mutex_enter(&dl
->dl_lock
);
573 dsl_deadlist_load_tree(dl
);
575 dle_tofind
.dle_mintxg
= mintxg
;
576 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
577 VERIFY3P(dle
, !=, NULL
);
579 avl_remove(&dl
->dl_tree
, dle
);
580 VERIFY0(zap_remove_int(os
, dl
->dl_object
, mintxg
, tx
));
581 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
582 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
583 dl
->dl_phys
->dl_used
-= used
;
584 dl
->dl_phys
->dl_comp
-= comp
;
585 dl
->dl_phys
->dl_uncomp
-= uncomp
;
586 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
) {
587 bpobj_decr_empty(os
, tx
);
589 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
591 bpobj_close(&dle
->dle_bpobj
);
592 kmem_free(dle
, sizeof (*dle
));
593 mutex_exit(&dl
->dl_lock
);
597 * Clear out the contents of a deadlist_entry by freeing its bpobj,
598 * replacing it with an empty bpobj and adjusting the deadlist's
602 dsl_deadlist_clear_entry(dsl_deadlist_entry_t
*dle
, dsl_deadlist_t
*dl
,
605 uint64_t new_obj
, used
, comp
, uncomp
;
606 objset_t
*os
= dl
->dl_os
;
608 mutex_enter(&dl
->dl_lock
);
609 VERIFY0(zap_remove_int(os
, dl
->dl_object
, dle
->dle_mintxg
, tx
));
610 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
611 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
612 dl
->dl_phys
->dl_used
-= used
;
613 dl
->dl_phys
->dl_comp
-= comp
;
614 dl
->dl_phys
->dl_uncomp
-= uncomp
;
615 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
)
616 bpobj_decr_empty(os
, tx
);
618 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
619 bpobj_close(&dle
->dle_bpobj
);
620 new_obj
= bpobj_alloc_empty(os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
621 VERIFY0(bpobj_open(&dle
->dle_bpobj
, os
, new_obj
));
622 VERIFY0(zap_add_int_key(os
, dl
->dl_object
, dle
->dle_mintxg
,
624 ASSERT(bpobj_is_empty(&dle
->dle_bpobj
));
625 mutex_exit(&dl
->dl_lock
);
629 * Return the first entry in deadlist's avl tree
631 dsl_deadlist_entry_t
*
632 dsl_deadlist_first(dsl_deadlist_t
*dl
)
634 dsl_deadlist_entry_t
*dle
;
636 mutex_enter(&dl
->dl_lock
);
637 dsl_deadlist_load_tree(dl
);
638 dle
= avl_first(&dl
->dl_tree
);
639 mutex_exit(&dl
->dl_lock
);
645 * Return the last entry in deadlist's avl tree
647 dsl_deadlist_entry_t
*
648 dsl_deadlist_last(dsl_deadlist_t
*dl
)
650 dsl_deadlist_entry_t
*dle
;
652 mutex_enter(&dl
->dl_lock
);
653 dsl_deadlist_load_tree(dl
);
654 dle
= avl_last(&dl
->dl_tree
);
655 mutex_exit(&dl
->dl_lock
);
661 * Walk ds's snapshots to regenerate generate ZAP & AVL.
664 dsl_deadlist_regenerate(objset_t
*os
, uint64_t dlobj
,
665 uint64_t mrs_obj
, dmu_tx_t
*tx
)
667 dsl_deadlist_t dl
= { 0 };
668 dsl_pool_t
*dp
= dmu_objset_pool(os
);
670 dsl_deadlist_open(&dl
, os
, dlobj
);
672 dsl_deadlist_close(&dl
);
676 while (mrs_obj
!= 0) {
678 VERIFY0(dsl_dataset_hold_obj(dp
, mrs_obj
, FTAG
, &ds
));
679 dsl_deadlist_add_key(&dl
,
680 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
681 mrs_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
682 dsl_dataset_rele(ds
, FTAG
);
684 dsl_deadlist_close(&dl
);
688 dsl_deadlist_clone(dsl_deadlist_t
*dl
, uint64_t maxtxg
,
689 uint64_t mrs_obj
, dmu_tx_t
*tx
)
691 dsl_deadlist_entry_t
*dle
;
694 newobj
= dsl_deadlist_alloc(dl
->dl_os
, tx
);
697 dsl_deadlist_regenerate(dl
->dl_os
, newobj
, mrs_obj
, tx
);
701 mutex_enter(&dl
->dl_lock
);
702 dsl_deadlist_load_tree(dl
);
704 for (dle
= avl_first(&dl
->dl_tree
); dle
;
705 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
708 if (dle
->dle_mintxg
>= maxtxg
)
711 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
712 VERIFY0(zap_add_int_key(dl
->dl_os
, newobj
,
713 dle
->dle_mintxg
, obj
, tx
));
715 mutex_exit(&dl
->dl_lock
);
720 dsl_deadlist_space(dsl_deadlist_t
*dl
,
721 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
723 ASSERT(dsl_deadlist_is_open(dl
));
725 VERIFY0(bpobj_space(&dl
->dl_bpobj
,
726 usedp
, compp
, uncompp
));
730 mutex_enter(&dl
->dl_lock
);
731 *usedp
= dl
->dl_phys
->dl_used
;
732 *compp
= dl
->dl_phys
->dl_comp
;
733 *uncompp
= dl
->dl_phys
->dl_uncomp
;
734 mutex_exit(&dl
->dl_lock
);
738 * return space used in the range (mintxg, maxtxg].
739 * Includes maxtxg, does not include mintxg.
740 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
744 dsl_deadlist_space_range(dsl_deadlist_t
*dl
, uint64_t mintxg
, uint64_t maxtxg
,
745 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
747 dsl_deadlist_cache_entry_t
*dlce
;
748 dsl_deadlist_cache_entry_t dlce_tofind
;
752 VERIFY0(bpobj_space_range(&dl
->dl_bpobj
,
753 mintxg
, maxtxg
, usedp
, compp
, uncompp
));
757 *usedp
= *compp
= *uncompp
= 0;
759 mutex_enter(&dl
->dl_lock
);
760 dsl_deadlist_load_cache(dl
);
761 dlce_tofind
.dlce_mintxg
= mintxg
;
762 dlce
= avl_find(&dl
->dl_cache
, &dlce_tofind
, &where
);
765 * If this mintxg doesn't exist, it may be an empty_bpobj which
766 * is omitted from the sparse tree. Start at the next non-empty
770 dlce
= avl_nearest(&dl
->dl_cache
, where
, AVL_AFTER
);
772 for (; dlce
&& dlce
->dlce_mintxg
< maxtxg
;
773 dlce
= AVL_NEXT(&dl
->dl_tree
, dlce
)) {
774 *usedp
+= dlce
->dlce_bytes
;
775 *compp
+= dlce
->dlce_comp
;
776 *uncompp
+= dlce
->dlce_uncomp
;
779 mutex_exit(&dl
->dl_lock
);
783 dsl_deadlist_insert_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
,
786 dsl_deadlist_entry_t dle_tofind
;
787 dsl_deadlist_entry_t
*dle
;
789 uint64_t used
, comp
, uncomp
;
792 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
794 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
795 VERIFY0(bpobj_space(&bpo
, &used
, &comp
, &uncomp
));
798 dsl_deadlist_load_tree(dl
);
800 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
801 dl
->dl_phys
->dl_used
+= used
;
802 dl
->dl_phys
->dl_comp
+= comp
;
803 dl
->dl_phys
->dl_uncomp
+= uncomp
;
805 dle_tofind
.dle_mintxg
= birth
;
806 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
808 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
809 dle_enqueue_subobj(dl
, dle
, obj
, tx
);
813 dsl_deadlist_insert_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
816 dsl_deadlist_t
*dl
= arg
;
817 dsl_deadlist_insert(dl
, bp
, bp_freed
, tx
);
822 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
826 dsl_deadlist_merge(dsl_deadlist_t
*dl
, uint64_t obj
, dmu_tx_t
*tx
)
831 dsl_deadlist_phys_t
*dlp
;
832 dmu_object_info_t doi
;
835 VERIFY0(dmu_object_info(dl
->dl_os
, obj
, &doi
));
836 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
838 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
839 VERIFY0(bpobj_iterate(&bpo
, dsl_deadlist_insert_cb
, dl
, tx
));
844 mutex_enter(&dl
->dl_lock
);
845 for (zap_cursor_init(&zc
, dl
->dl_os
, obj
);
846 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
847 zap_cursor_advance(&zc
)) {
848 uint64_t mintxg
= zfs_strtonum(za
.za_name
, NULL
);
849 dsl_deadlist_insert_bpobj(dl
, za
.za_first_integer
, mintxg
, tx
);
850 VERIFY0(zap_remove_int(dl
->dl_os
, obj
, mintxg
, tx
));
852 VERIFY3U(error
, ==, ENOENT
);
853 zap_cursor_fini(&zc
);
855 VERIFY0(dmu_bonus_hold(dl
->dl_os
, obj
, FTAG
, &bonus
));
856 dlp
= bonus
->db_data
;
857 dmu_buf_will_dirty(bonus
, tx
);
858 bzero(dlp
, sizeof (*dlp
));
859 dmu_buf_rele(bonus
, FTAG
);
860 mutex_exit(&dl
->dl_lock
);
864 * Remove entries on dl that are born > mintxg, and put them on the bpobj.
867 dsl_deadlist_move_bpobj(dsl_deadlist_t
*dl
, bpobj_t
*bpo
, uint64_t mintxg
,
870 dsl_deadlist_entry_t dle_tofind
;
871 dsl_deadlist_entry_t
*dle
;
874 ASSERT(!dl
->dl_oldfmt
);
876 mutex_enter(&dl
->dl_lock
);
877 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
878 dsl_deadlist_load_tree(dl
);
880 dle_tofind
.dle_mintxg
= mintxg
;
881 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
883 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_AFTER
);
885 uint64_t used
, comp
, uncomp
;
886 dsl_deadlist_entry_t
*dle_next
;
888 bpobj_enqueue_subobj(bpo
, dle
->dle_bpobj
.bpo_object
, tx
);
890 VERIFY0(bpobj_space(&dle
->dle_bpobj
,
891 &used
, &comp
, &uncomp
));
892 ASSERT3U(dl
->dl_phys
->dl_used
, >=, used
);
893 ASSERT3U(dl
->dl_phys
->dl_comp
, >=, comp
);
894 ASSERT3U(dl
->dl_phys
->dl_uncomp
, >=, uncomp
);
895 dl
->dl_phys
->dl_used
-= used
;
896 dl
->dl_phys
->dl_comp
-= comp
;
897 dl
->dl_phys
->dl_uncomp
-= uncomp
;
899 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
,
900 dle
->dle_mintxg
, tx
));
902 dle_next
= AVL_NEXT(&dl
->dl_tree
, dle
);
903 avl_remove(&dl
->dl_tree
, dle
);
904 bpobj_close(&dle
->dle_bpobj
);
905 kmem_free(dle
, sizeof (*dle
));
908 mutex_exit(&dl
->dl_lock
);
911 typedef struct livelist_entry
{
918 livelist_compare(const void *larg
, const void *rarg
)
920 const blkptr_t
*l
= &((livelist_entry_t
*)larg
)->le_bp
;
921 const blkptr_t
*r
= &((livelist_entry_t
*)rarg
)->le_bp
;
923 /* Sort them according to dva[0] */
924 uint64_t l_dva0_vdev
= DVA_GET_VDEV(&l
->blk_dva
[0]);
925 uint64_t r_dva0_vdev
= DVA_GET_VDEV(&r
->blk_dva
[0]);
927 if (l_dva0_vdev
!= r_dva0_vdev
)
928 return (TREE_CMP(l_dva0_vdev
, r_dva0_vdev
));
930 /* if vdevs are equal, sort by offsets. */
931 uint64_t l_dva0_offset
= DVA_GET_OFFSET(&l
->blk_dva
[0]);
932 uint64_t r_dva0_offset
= DVA_GET_OFFSET(&r
->blk_dva
[0]);
933 if (l_dva0_offset
== r_dva0_offset
)
934 ASSERT3U(l
->blk_birth
, ==, r
->blk_birth
);
935 return (TREE_CMP(l_dva0_offset
, r_dva0_offset
));
938 struct livelist_iter_arg
{
945 * Expects an AVL tree which is incrementally filled will FREE blkptrs
946 * and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a
947 * corresponding FREE are stored in the supplied bplist.
949 * Note that multiple FREE and ALLOC entries for the same blkptr may
950 * be encountered when dedup is involved. For this reason we keep a
951 * refcount for all the FREE entries of each blkptr and ensure that
952 * each of those FREE entries has a corresponding ALLOC preceding it.
955 dsl_livelist_iterate(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
958 struct livelist_iter_arg
*lia
= arg
;
959 avl_tree_t
*avl
= lia
->avl
;
960 bplist_t
*to_free
= lia
->to_free
;
964 if ((t
!= NULL
) && (zthr_has_waiters(t
) || zthr_iscancelled(t
)))
965 return (SET_ERROR(EINTR
));
967 livelist_entry_t node
;
969 livelist_entry_t
*found
= avl_find(avl
, &node
, NULL
);
972 /* first free entry for this blkptr */
973 livelist_entry_t
*e
=
974 kmem_alloc(sizeof (livelist_entry_t
), KM_SLEEP
);
979 /* dedup block free */
980 ASSERT(BP_GET_DEDUP(bp
));
981 ASSERT3U(BP_GET_CHECKSUM(bp
), ==,
982 BP_GET_CHECKSUM(&found
->le_bp
));
983 ASSERT3U(found
->le_refcnt
+ 1, >, found
->le_refcnt
);
988 /* block is currently marked as allocated */
989 bplist_append(to_free
, bp
);
991 /* alloc matches a free entry */
992 ASSERT3U(found
->le_refcnt
, !=, 0);
994 if (found
->le_refcnt
== 0) {
995 /* all tracked free pairs have been matched */
996 avl_remove(avl
, found
);
997 kmem_free(found
, sizeof (livelist_entry_t
));
1000 * This is definitely a deduped blkptr so
1001 * let's validate it.
1003 ASSERT(BP_GET_DEDUP(bp
));
1004 ASSERT3U(BP_GET_CHECKSUM(bp
), ==,
1005 BP_GET_CHECKSUM(&found
->le_bp
));
1013 * Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs
1014 * which have an ALLOC entry but no matching FREE
1017 dsl_process_sub_livelist(bpobj_t
*bpobj
, bplist_t
*to_free
, zthr_t
*t
,
1021 avl_create(&avl
, livelist_compare
, sizeof (livelist_entry_t
),
1022 offsetof(livelist_entry_t
, le_node
));
1024 /* process the sublist */
1025 struct livelist_iter_arg arg
= {
1030 int err
= bpobj_iterate_nofree(bpobj
, dsl_livelist_iterate
, &arg
, size
);
1032 VERIFY0(avl_numnodes(&avl
));
1038 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, max_entries
, ULONG
, ZMOD_RW
,
1039 "Size to start the next sub-livelist in a livelist");
1041 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, min_percent_shared
, INT
, ZMOD_RW
,
1042 "Threshold at which livelist is disabled");