4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_dataset.h>
34 * Deadlist concurrency:
36 * Deadlists can only be modified from the syncing thread.
38 * Except for dsl_deadlist_insert(), it can only be modified with the
39 * dp_config_rwlock held with RW_WRITER.
41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
42 * be called concurrently, from open context, with the dl_config_rwlock held
45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
46 * the accessors, protecting:
47 * dl_phys->dl_used,comp,uncomp
48 * and protecting the dl_tree from being loaded.
49 * The locking is provided by dl_lock. Note that locking on the bpobj_t
50 * provides its own locking, and dl_oldfmt is immutable.
57 * Livelists use the same 'deadlist_t' struct as deadlists and are also used
58 * to track blkptrs over the lifetime of a dataset. Livelists however, belong
59 * to clones and track the blkptrs that are clone-specific (were born after
60 * the clone's creation). The exception is embedded block pointers which are
61 * not included in livelists because they do not need to be freed.
63 * When it comes time to delete the clone, the livelist provides a quick
64 * reference as to what needs to be freed. For this reason, livelists also track
65 * when clone-specific blkptrs are freed before deletion to prevent double
66 * frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the
67 * deletion algorithm iterates backwards over the livelist, matching
68 * FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists
69 * are also updated in the case when blkptrs are remapped: the old version
70 * of the blkptr is cancelled out with a FREE and the new version is tracked
73 * To bound the amount of memory required for deletion, livelists over a
74 * certain size are spread over multiple entries. Entries are grouped by
75 * birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will
76 * be in the same entry. This allows us to delete livelists incrementally
77 * over multiple syncs, one entry at a time.
79 * During the lifetime of the clone, livelists can get extremely large.
80 * Their size is managed by periodic condensing (preemptively cancelling out
81 * FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when
82 * the shared space between the clone and its origin is so small that it
83 * doesn't make sense to use livelists anymore.
87 * The threshold sublist size at which we create a new sub-livelist for the
88 * next txg. However, since blkptrs of the same transaction group must be in
89 * the same sub-list, the actual sublist size may exceed this. When picking the
90 * size we had to balance the fact that larger sublists mean fewer sublists
91 * (decreasing the cost of insertion) against the consideration that sublists
92 * will be loaded into memory and shouldn't take up an inordinate amount of
93 * space. We settled on ~500000 entries, corresponding to roughly 128M.
95 uint64_t zfs_livelist_max_entries
= 500000;
98 * We can approximate how much of a performance gain a livelist will give us
99 * based on the percentage of blocks shared between the clone and its origin.
100 * 0 percent shared means that the clone has completely diverged and that the
101 * old method is maximally effective: every read from the block tree will
102 * result in lots of frees. Livelists give us gains when they track blocks
103 * scattered across the tree, when one read in the old method might only
104 * result in a few frees. Once the clone has been overwritten enough,
105 * writes are no longer sparse and we'll no longer get much of a benefit from
106 * tracking them with a livelist. We chose a lower limit of 75 percent shared
107 * (25 percent overwritten). This means that 1/4 of all block pointers will be
108 * freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists
109 * to make deletion 4x faster. Once the amount of shared space drops below this
110 * threshold, the clone will revert to the old deletion method.
112 int zfs_livelist_min_percent_shared
= 75;
115 dsl_deadlist_compare(const void *arg1
, const void *arg2
)
117 const dsl_deadlist_entry_t
*dle1
= arg1
;
118 const dsl_deadlist_entry_t
*dle2
= arg2
;
120 return (TREE_CMP(dle1
->dle_mintxg
, dle2
->dle_mintxg
));
124 dsl_deadlist_cache_compare(const void *arg1
, const void *arg2
)
126 const dsl_deadlist_cache_entry_t
*dlce1
= arg1
;
127 const dsl_deadlist_cache_entry_t
*dlce2
= arg2
;
129 return (TREE_CMP(dlce1
->dlce_mintxg
, dlce2
->dlce_mintxg
));
133 dsl_deadlist_load_tree(dsl_deadlist_t
*dl
)
139 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
141 ASSERT(!dl
->dl_oldfmt
);
142 if (dl
->dl_havecache
) {
144 * After loading the tree, the caller may modify the tree,
145 * e.g. to add or remove nodes, or to make a node no longer
146 * refer to the empty_bpobj. These changes would make the
147 * dl_cache incorrect. Therefore we discard the cache here,
148 * so that it can't become incorrect.
150 dsl_deadlist_cache_entry_t
*dlce
;
152 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
154 kmem_free(dlce
, sizeof (*dlce
));
156 avl_destroy(&dl
->dl_cache
);
157 dl
->dl_havecache
= B_FALSE
;
162 za
= zap_attribute_alloc();
163 avl_create(&dl
->dl_tree
, dsl_deadlist_compare
,
164 sizeof (dsl_deadlist_entry_t
),
165 offsetof(dsl_deadlist_entry_t
, dle_node
));
166 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
167 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
168 zap_cursor_advance(&zc
)) {
169 dsl_deadlist_entry_t
*dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
170 dle
->dle_mintxg
= zfs_strtonum(za
->za_name
, NULL
);
173 * Prefetch all the bpobj's so that we do that i/o
174 * in parallel. Then open them all in a second pass.
176 dle
->dle_bpobj
.bpo_object
= za
->za_first_integer
;
177 dmu_prefetch_dnode(dl
->dl_os
, dle
->dle_bpobj
.bpo_object
,
178 ZIO_PRIORITY_SYNC_READ
);
180 avl_add(&dl
->dl_tree
, dle
);
182 VERIFY3U(error
, ==, ENOENT
);
183 zap_cursor_fini(&zc
);
184 zap_attribute_free(za
);
186 for (dsl_deadlist_entry_t
*dle
= avl_first(&dl
->dl_tree
);
187 dle
!= NULL
; dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
188 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
,
189 dle
->dle_bpobj
.bpo_object
));
191 dl
->dl_havetree
= B_TRUE
;
195 * Load only the non-empty bpobj's into the dl_cache. The cache is an analog
196 * of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It
197 * is used only for gathering space statistics. The dl_cache has two
198 * advantages over the dl_tree:
200 * 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's
201 * mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj
202 * many times and to inquire about its (zero) space stats many times.
204 * 2. The dl_cache uses less memory than the dl_tree. We only need to load
205 * the dl_tree of snapshots when deleting a snapshot, after which we free the
206 * dl_tree with dsl_deadlist_discard_tree
209 dsl_deadlist_load_cache(dsl_deadlist_t
*dl
)
215 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
217 ASSERT(!dl
->dl_oldfmt
);
218 if (dl
->dl_havecache
)
221 uint64_t empty_bpobj
= dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
;
223 avl_create(&dl
->dl_cache
, dsl_deadlist_cache_compare
,
224 sizeof (dsl_deadlist_cache_entry_t
),
225 offsetof(dsl_deadlist_cache_entry_t
, dlce_node
));
226 za
= zap_attribute_alloc();
227 for (zap_cursor_init(&zc
, dl
->dl_os
, dl
->dl_object
);
228 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
229 zap_cursor_advance(&zc
)) {
230 if (za
->za_first_integer
== empty_bpobj
)
232 dsl_deadlist_cache_entry_t
*dlce
=
233 kmem_zalloc(sizeof (*dlce
), KM_SLEEP
);
234 dlce
->dlce_mintxg
= zfs_strtonum(za
->za_name
, NULL
);
237 * Prefetch all the bpobj's so that we do that i/o
238 * in parallel. Then open them all in a second pass.
240 dlce
->dlce_bpobj
= za
->za_first_integer
;
241 dmu_prefetch_dnode(dl
->dl_os
, dlce
->dlce_bpobj
,
242 ZIO_PRIORITY_SYNC_READ
);
243 avl_add(&dl
->dl_cache
, dlce
);
245 VERIFY3U(error
, ==, ENOENT
);
246 zap_cursor_fini(&zc
);
247 zap_attribute_free(za
);
249 for (dsl_deadlist_cache_entry_t
*dlce
= avl_first(&dl
->dl_cache
);
250 dlce
!= NULL
; dlce
= AVL_NEXT(&dl
->dl_cache
, dlce
)) {
252 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, dlce
->dlce_bpobj
));
254 VERIFY0(bpobj_space(&bpo
,
255 &dlce
->dlce_bytes
, &dlce
->dlce_comp
, &dlce
->dlce_uncomp
));
258 dl
->dl_havecache
= B_TRUE
;
262 * Discard the tree to save memory.
265 dsl_deadlist_discard_tree(dsl_deadlist_t
*dl
)
267 mutex_enter(&dl
->dl_lock
);
269 if (!dl
->dl_havetree
) {
270 mutex_exit(&dl
->dl_lock
);
273 dsl_deadlist_entry_t
*dle
;
275 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
)) != NULL
) {
276 bpobj_close(&dle
->dle_bpobj
);
277 kmem_free(dle
, sizeof (*dle
));
279 avl_destroy(&dl
->dl_tree
);
281 dl
->dl_havetree
= B_FALSE
;
282 mutex_exit(&dl
->dl_lock
);
286 dsl_deadlist_iterate(dsl_deadlist_t
*dl
, deadlist_iter_t func
, void *args
)
288 dsl_deadlist_entry_t
*dle
;
290 ASSERT(dsl_deadlist_is_open(dl
));
292 mutex_enter(&dl
->dl_lock
);
293 dsl_deadlist_load_tree(dl
);
294 mutex_exit(&dl
->dl_lock
);
295 for (dle
= avl_first(&dl
->dl_tree
); dle
!= NULL
;
296 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
297 if (func(args
, dle
) != 0)
303 dsl_deadlist_open(dsl_deadlist_t
*dl
, objset_t
*os
, uint64_t object
)
305 dmu_object_info_t doi
;
307 ASSERT(!dsl_deadlist_is_open(dl
));
309 mutex_init(&dl
->dl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
311 dl
->dl_object
= object
;
312 VERIFY0(dmu_bonus_hold(os
, object
, dl
, &dl
->dl_dbuf
));
313 dmu_object_info_from_db(dl
->dl_dbuf
, &doi
);
314 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
315 dmu_buf_rele(dl
->dl_dbuf
, dl
);
317 dl
->dl_oldfmt
= B_TRUE
;
318 VERIFY0(bpobj_open(&dl
->dl_bpobj
, os
, object
));
322 dl
->dl_oldfmt
= B_FALSE
;
323 dl
->dl_phys
= dl
->dl_dbuf
->db_data
;
324 dl
->dl_havetree
= B_FALSE
;
325 dl
->dl_havecache
= B_FALSE
;
329 dsl_deadlist_is_open(dsl_deadlist_t
*dl
)
331 return (dl
->dl_os
!= NULL
);
335 dsl_deadlist_close(dsl_deadlist_t
*dl
)
337 ASSERT(dsl_deadlist_is_open(dl
));
338 mutex_destroy(&dl
->dl_lock
);
341 dl
->dl_oldfmt
= B_FALSE
;
342 bpobj_close(&dl
->dl_bpobj
);
348 if (dl
->dl_havetree
) {
349 dsl_deadlist_entry_t
*dle
;
351 while ((dle
= avl_destroy_nodes(&dl
->dl_tree
, &cookie
))
353 bpobj_close(&dle
->dle_bpobj
);
354 kmem_free(dle
, sizeof (*dle
));
356 avl_destroy(&dl
->dl_tree
);
358 if (dl
->dl_havecache
) {
359 dsl_deadlist_cache_entry_t
*dlce
;
361 while ((dlce
= avl_destroy_nodes(&dl
->dl_cache
, &cookie
))
363 kmem_free(dlce
, sizeof (*dlce
));
365 avl_destroy(&dl
->dl_cache
);
367 dmu_buf_rele(dl
->dl_dbuf
, dl
);
375 dsl_deadlist_alloc(objset_t
*os
, dmu_tx_t
*tx
)
377 if (spa_version(dmu_objset_spa(os
)) < SPA_VERSION_DEADLISTS
)
378 return (bpobj_alloc(os
, SPA_OLD_MAXBLOCKSIZE
, tx
));
379 return (zap_create(os
, DMU_OT_DEADLIST
, DMU_OT_DEADLIST_HDR
,
380 sizeof (dsl_deadlist_phys_t
), tx
));
384 dsl_deadlist_free(objset_t
*os
, uint64_t dlobj
, dmu_tx_t
*tx
)
386 dmu_object_info_t doi
;
391 VERIFY0(dmu_object_info(os
, dlobj
, &doi
));
392 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
393 bpobj_free(os
, dlobj
, tx
);
397 za
= zap_attribute_alloc();
398 for (zap_cursor_init(&zc
, os
, dlobj
);
399 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
400 zap_cursor_advance(&zc
)) {
401 uint64_t obj
= za
->za_first_integer
;
402 if (obj
== dmu_objset_pool(os
)->dp_empty_bpobj
)
403 bpobj_decr_empty(os
, tx
);
405 bpobj_free(os
, obj
, tx
);
407 VERIFY3U(error
, ==, ENOENT
);
408 zap_cursor_fini(&zc
);
409 zap_attribute_free(za
);
410 VERIFY0(dmu_object_free(os
, dlobj
, tx
));
414 dle_enqueue(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
415 const blkptr_t
*bp
, boolean_t bp_freed
, dmu_tx_t
*tx
)
417 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
418 if (dle
->dle_bpobj
.bpo_object
==
419 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
420 uint64_t obj
= bpobj_alloc(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
421 bpobj_close(&dle
->dle_bpobj
);
422 bpobj_decr_empty(dl
->dl_os
, tx
);
423 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
424 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
425 dle
->dle_mintxg
, obj
, tx
));
427 bpobj_enqueue(&dle
->dle_bpobj
, bp
, bp_freed
, tx
);
431 dle_enqueue_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
432 uint64_t obj
, dmu_tx_t
*tx
)
434 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
435 if (dle
->dle_bpobj
.bpo_object
!=
436 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
) {
437 bpobj_enqueue_subobj(&dle
->dle_bpobj
, obj
, tx
);
439 bpobj_close(&dle
->dle_bpobj
);
440 bpobj_decr_empty(dl
->dl_os
, tx
);
441 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
442 VERIFY0(zap_update_int_key(dl
->dl_os
, dl
->dl_object
,
443 dle
->dle_mintxg
, obj
, tx
));
448 * Prefetch metadata required for dle_enqueue_subobj().
451 dle_prefetch_subobj(dsl_deadlist_t
*dl
, dsl_deadlist_entry_t
*dle
,
454 if (dle
->dle_bpobj
.bpo_object
!=
455 dmu_objset_pool(dl
->dl_os
)->dp_empty_bpobj
)
456 bpobj_prefetch_subobj(&dle
->dle_bpobj
, obj
);
460 dsl_deadlist_insert(dsl_deadlist_t
*dl
, const blkptr_t
*bp
, boolean_t bp_freed
,
463 dsl_deadlist_entry_t dle_tofind
;
464 dsl_deadlist_entry_t
*dle
;
468 bpobj_enqueue(&dl
->dl_bpobj
, bp
, bp_freed
, tx
);
472 mutex_enter(&dl
->dl_lock
);
473 dsl_deadlist_load_tree(dl
);
475 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
477 int sign
= bp_freed
? -1 : +1;
478 dl
->dl_phys
->dl_used
+=
479 sign
* bp_get_dsize_sync(dmu_objset_spa(dl
->dl_os
), bp
);
480 dl
->dl_phys
->dl_comp
+= sign
* BP_GET_PSIZE(bp
);
481 dl
->dl_phys
->dl_uncomp
+= sign
* BP_GET_UCSIZE(bp
);
483 dle_tofind
.dle_mintxg
= BP_GET_LOGICAL_BIRTH(bp
);
484 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
486 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
488 dle
= AVL_PREV(&dl
->dl_tree
, dle
);
491 zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
492 bp
, (longlong_t
)BP_GET_LOGICAL_BIRTH(bp
));
493 dle
= avl_first(&dl
->dl_tree
);
496 ASSERT3P(dle
, !=, NULL
);
497 dle_enqueue(dl
, dle
, bp
, bp_freed
, tx
);
498 mutex_exit(&dl
->dl_lock
);
502 dsl_deadlist_insert_alloc_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
504 dsl_deadlist_t
*dl
= arg
;
505 dsl_deadlist_insert(dl
, bp
, B_FALSE
, tx
);
510 dsl_deadlist_insert_free_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
512 dsl_deadlist_t
*dl
= arg
;
513 dsl_deadlist_insert(dl
, bp
, B_TRUE
, tx
);
518 * Insert new key in deadlist, which must be > all current entries.
519 * mintxg is not inclusive.
522 dsl_deadlist_add_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
525 dsl_deadlist_entry_t
*dle
;
530 dle
= kmem_alloc(sizeof (*dle
), KM_SLEEP
);
531 dle
->dle_mintxg
= mintxg
;
533 mutex_enter(&dl
->dl_lock
);
534 dsl_deadlist_load_tree(dl
);
536 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
537 VERIFY0(bpobj_open(&dle
->dle_bpobj
, dl
->dl_os
, obj
));
538 avl_add(&dl
->dl_tree
, dle
);
540 VERIFY0(zap_add_int_key(dl
->dl_os
, dl
->dl_object
,
542 mutex_exit(&dl
->dl_lock
);
546 * Remove this key, merging its entries into the previous key.
549 dsl_deadlist_remove_key(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
551 dsl_deadlist_entry_t dle_tofind
;
552 dsl_deadlist_entry_t
*dle
, *dle_prev
;
556 mutex_enter(&dl
->dl_lock
);
557 dsl_deadlist_load_tree(dl
);
559 dle_tofind
.dle_mintxg
= mintxg
;
560 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
561 ASSERT3P(dle
, !=, NULL
);
562 dle_prev
= AVL_PREV(&dl
->dl_tree
, dle
);
563 ASSERT3P(dle_prev
, !=, NULL
);
565 dle_enqueue_subobj(dl
, dle_prev
, dle
->dle_bpobj
.bpo_object
, tx
);
567 avl_remove(&dl
->dl_tree
, dle
);
568 bpobj_close(&dle
->dle_bpobj
);
569 kmem_free(dle
, sizeof (*dle
));
571 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
, mintxg
, tx
));
572 mutex_exit(&dl
->dl_lock
);
576 * Remove a deadlist entry and all of its contents by removing the entry from
577 * the deadlist's avl tree, freeing the entry's bpobj and adjusting the
578 * deadlist's space accounting accordingly.
581 dsl_deadlist_remove_entry(dsl_deadlist_t
*dl
, uint64_t mintxg
, dmu_tx_t
*tx
)
583 uint64_t used
, comp
, uncomp
;
584 dsl_deadlist_entry_t dle_tofind
;
585 dsl_deadlist_entry_t
*dle
;
586 objset_t
*os
= dl
->dl_os
;
591 mutex_enter(&dl
->dl_lock
);
592 dsl_deadlist_load_tree(dl
);
594 dle_tofind
.dle_mintxg
= mintxg
;
595 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, NULL
);
596 VERIFY3P(dle
, !=, NULL
);
598 avl_remove(&dl
->dl_tree
, dle
);
599 VERIFY0(zap_remove_int(os
, dl
->dl_object
, mintxg
, tx
));
600 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
601 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
602 dl
->dl_phys
->dl_used
-= used
;
603 dl
->dl_phys
->dl_comp
-= comp
;
604 dl
->dl_phys
->dl_uncomp
-= uncomp
;
605 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
) {
606 bpobj_decr_empty(os
, tx
);
608 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
610 bpobj_close(&dle
->dle_bpobj
);
611 kmem_free(dle
, sizeof (*dle
));
612 mutex_exit(&dl
->dl_lock
);
616 * Clear out the contents of a deadlist_entry by freeing its bpobj,
617 * replacing it with an empty bpobj and adjusting the deadlist's
621 dsl_deadlist_clear_entry(dsl_deadlist_entry_t
*dle
, dsl_deadlist_t
*dl
,
624 uint64_t new_obj
, used
, comp
, uncomp
;
625 objset_t
*os
= dl
->dl_os
;
627 mutex_enter(&dl
->dl_lock
);
628 VERIFY0(zap_remove_int(os
, dl
->dl_object
, dle
->dle_mintxg
, tx
));
629 VERIFY0(bpobj_space(&dle
->dle_bpobj
, &used
, &comp
, &uncomp
));
630 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
631 dl
->dl_phys
->dl_used
-= used
;
632 dl
->dl_phys
->dl_comp
-= comp
;
633 dl
->dl_phys
->dl_uncomp
-= uncomp
;
634 if (dle
->dle_bpobj
.bpo_object
== dmu_objset_pool(os
)->dp_empty_bpobj
)
635 bpobj_decr_empty(os
, tx
);
637 bpobj_free(os
, dle
->dle_bpobj
.bpo_object
, tx
);
638 bpobj_close(&dle
->dle_bpobj
);
639 new_obj
= bpobj_alloc_empty(os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
640 VERIFY0(bpobj_open(&dle
->dle_bpobj
, os
, new_obj
));
641 VERIFY0(zap_add_int_key(os
, dl
->dl_object
, dle
->dle_mintxg
,
643 ASSERT(bpobj_is_empty(&dle
->dle_bpobj
));
644 mutex_exit(&dl
->dl_lock
);
648 * Return the first entry in deadlist's avl tree
650 dsl_deadlist_entry_t
*
651 dsl_deadlist_first(dsl_deadlist_t
*dl
)
653 dsl_deadlist_entry_t
*dle
;
655 mutex_enter(&dl
->dl_lock
);
656 dsl_deadlist_load_tree(dl
);
657 dle
= avl_first(&dl
->dl_tree
);
658 mutex_exit(&dl
->dl_lock
);
664 * Return the last entry in deadlist's avl tree
666 dsl_deadlist_entry_t
*
667 dsl_deadlist_last(dsl_deadlist_t
*dl
)
669 dsl_deadlist_entry_t
*dle
;
671 mutex_enter(&dl
->dl_lock
);
672 dsl_deadlist_load_tree(dl
);
673 dle
= avl_last(&dl
->dl_tree
);
674 mutex_exit(&dl
->dl_lock
);
680 * Walk ds's snapshots to regenerate generate ZAP & AVL.
683 dsl_deadlist_regenerate(objset_t
*os
, uint64_t dlobj
,
684 uint64_t mrs_obj
, dmu_tx_t
*tx
)
686 dsl_deadlist_t dl
= { 0 };
687 dsl_pool_t
*dp
= dmu_objset_pool(os
);
689 dsl_deadlist_open(&dl
, os
, dlobj
);
691 dsl_deadlist_close(&dl
);
695 while (mrs_obj
!= 0) {
697 VERIFY0(dsl_dataset_hold_obj(dp
, mrs_obj
, FTAG
, &ds
));
698 dsl_deadlist_add_key(&dl
,
699 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
700 mrs_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
701 dsl_dataset_rele(ds
, FTAG
);
703 dsl_deadlist_close(&dl
);
707 dsl_deadlist_clone(dsl_deadlist_t
*dl
, uint64_t maxtxg
,
708 uint64_t mrs_obj
, dmu_tx_t
*tx
)
710 dsl_deadlist_entry_t
*dle
;
713 newobj
= dsl_deadlist_alloc(dl
->dl_os
, tx
);
716 dsl_deadlist_regenerate(dl
->dl_os
, newobj
, mrs_obj
, tx
);
720 mutex_enter(&dl
->dl_lock
);
721 dsl_deadlist_load_tree(dl
);
723 for (dle
= avl_first(&dl
->dl_tree
); dle
;
724 dle
= AVL_NEXT(&dl
->dl_tree
, dle
)) {
727 if (dle
->dle_mintxg
>= maxtxg
)
730 obj
= bpobj_alloc_empty(dl
->dl_os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
731 VERIFY0(zap_add_int_key(dl
->dl_os
, newobj
,
732 dle
->dle_mintxg
, obj
, tx
));
734 mutex_exit(&dl
->dl_lock
);
739 dsl_deadlist_space(dsl_deadlist_t
*dl
,
740 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
742 ASSERT(dsl_deadlist_is_open(dl
));
744 VERIFY0(bpobj_space(&dl
->dl_bpobj
,
745 usedp
, compp
, uncompp
));
749 mutex_enter(&dl
->dl_lock
);
750 *usedp
= dl
->dl_phys
->dl_used
;
751 *compp
= dl
->dl_phys
->dl_comp
;
752 *uncompp
= dl
->dl_phys
->dl_uncomp
;
753 mutex_exit(&dl
->dl_lock
);
757 * return space used in the range (mintxg, maxtxg].
758 * Includes maxtxg, does not include mintxg.
759 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
763 dsl_deadlist_space_range(dsl_deadlist_t
*dl
, uint64_t mintxg
, uint64_t maxtxg
,
764 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
766 dsl_deadlist_cache_entry_t
*dlce
;
767 dsl_deadlist_cache_entry_t dlce_tofind
;
771 VERIFY0(bpobj_space_range(&dl
->dl_bpobj
,
772 mintxg
, maxtxg
, usedp
, compp
, uncompp
));
776 *usedp
= *compp
= *uncompp
= 0;
778 mutex_enter(&dl
->dl_lock
);
779 dsl_deadlist_load_cache(dl
);
780 dlce_tofind
.dlce_mintxg
= mintxg
;
781 dlce
= avl_find(&dl
->dl_cache
, &dlce_tofind
, &where
);
784 * If this mintxg doesn't exist, it may be an empty_bpobj which
785 * is omitted from the sparse tree. Start at the next non-empty
789 dlce
= avl_nearest(&dl
->dl_cache
, where
, AVL_AFTER
);
791 for (; dlce
&& dlce
->dlce_mintxg
< maxtxg
;
792 dlce
= AVL_NEXT(&dl
->dl_tree
, dlce
)) {
793 *usedp
+= dlce
->dlce_bytes
;
794 *compp
+= dlce
->dlce_comp
;
795 *uncompp
+= dlce
->dlce_uncomp
;
798 mutex_exit(&dl
->dl_lock
);
802 dsl_deadlist_insert_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
,
805 dsl_deadlist_entry_t dle_tofind
;
806 dsl_deadlist_entry_t
*dle
;
808 uint64_t used
, comp
, uncomp
;
811 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
813 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
814 VERIFY0(bpobj_space(&bpo
, &used
, &comp
, &uncomp
));
817 dsl_deadlist_load_tree(dl
);
819 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
820 dl
->dl_phys
->dl_used
+= used
;
821 dl
->dl_phys
->dl_comp
+= comp
;
822 dl
->dl_phys
->dl_uncomp
+= uncomp
;
824 dle_tofind
.dle_mintxg
= birth
;
825 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
827 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
828 dle_enqueue_subobj(dl
, dle
, obj
, tx
);
832 * Prefetch metadata required for dsl_deadlist_insert_bpobj().
835 dsl_deadlist_prefetch_bpobj(dsl_deadlist_t
*dl
, uint64_t obj
, uint64_t birth
)
837 dsl_deadlist_entry_t dle_tofind
;
838 dsl_deadlist_entry_t
*dle
;
841 ASSERT(MUTEX_HELD(&dl
->dl_lock
));
843 dsl_deadlist_load_tree(dl
);
845 dle_tofind
.dle_mintxg
= birth
;
846 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
848 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_BEFORE
);
849 dle_prefetch_subobj(dl
, dle
, obj
);
853 dsl_deadlist_insert_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
856 dsl_deadlist_t
*dl
= arg
;
857 dsl_deadlist_insert(dl
, bp
, bp_freed
, tx
);
862 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
866 dsl_deadlist_merge(dsl_deadlist_t
*dl
, uint64_t obj
, dmu_tx_t
*tx
)
868 zap_cursor_t zc
, pzc
;
869 zap_attribute_t
*za
, *pza
;
871 dsl_deadlist_phys_t
*dlp
;
872 dmu_object_info_t doi
;
873 int error
, perror
, i
;
875 VERIFY0(dmu_object_info(dl
->dl_os
, obj
, &doi
));
876 if (doi
.doi_type
== DMU_OT_BPOBJ
) {
878 VERIFY0(bpobj_open(&bpo
, dl
->dl_os
, obj
));
879 VERIFY0(bpobj_iterate(&bpo
, dsl_deadlist_insert_cb
, dl
, tx
));
884 za
= zap_attribute_alloc();
885 pza
= zap_attribute_alloc();
887 mutex_enter(&dl
->dl_lock
);
889 * Prefetch up to 128 deadlists first and then more as we progress.
890 * The limit is a balance between ARC use and diminishing returns.
892 for (zap_cursor_init(&pzc
, dl
->dl_os
, obj
), i
= 0;
893 (perror
= zap_cursor_retrieve(&pzc
, pza
)) == 0 && i
< 128;
894 zap_cursor_advance(&pzc
), i
++) {
895 dsl_deadlist_prefetch_bpobj(dl
, pza
->za_first_integer
,
896 zfs_strtonum(pza
->za_name
, NULL
));
898 for (zap_cursor_init(&zc
, dl
->dl_os
, obj
);
899 (error
= zap_cursor_retrieve(&zc
, za
)) == 0;
900 zap_cursor_advance(&zc
)) {
901 dsl_deadlist_insert_bpobj(dl
, za
->za_first_integer
,
902 zfs_strtonum(za
->za_name
, NULL
), tx
);
903 VERIFY0(zap_remove(dl
->dl_os
, obj
, za
->za_name
, tx
));
905 dsl_deadlist_prefetch_bpobj(dl
, pza
->za_first_integer
,
906 zfs_strtonum(pza
->za_name
, NULL
));
907 zap_cursor_advance(&pzc
);
908 perror
= zap_cursor_retrieve(&pzc
, pza
);
911 VERIFY3U(error
, ==, ENOENT
);
912 zap_cursor_fini(&zc
);
913 zap_cursor_fini(&pzc
);
915 VERIFY0(dmu_bonus_hold(dl
->dl_os
, obj
, FTAG
, &bonus
));
916 dlp
= bonus
->db_data
;
917 dmu_buf_will_dirty(bonus
, tx
);
918 memset(dlp
, 0, sizeof (*dlp
));
919 dmu_buf_rele(bonus
, FTAG
);
920 mutex_exit(&dl
->dl_lock
);
922 zap_attribute_free(za
);
923 zap_attribute_free(pza
);
927 * Remove entries on dl that are born > mintxg, and put them on the bpobj.
930 dsl_deadlist_move_bpobj(dsl_deadlist_t
*dl
, bpobj_t
*bpo
, uint64_t mintxg
,
933 dsl_deadlist_entry_t dle_tofind
;
934 dsl_deadlist_entry_t
*dle
, *pdle
;
938 ASSERT(!dl
->dl_oldfmt
);
940 mutex_enter(&dl
->dl_lock
);
941 dmu_buf_will_dirty(dl
->dl_dbuf
, tx
);
942 dsl_deadlist_load_tree(dl
);
944 dle_tofind
.dle_mintxg
= mintxg
;
945 dle
= avl_find(&dl
->dl_tree
, &dle_tofind
, &where
);
947 dle
= avl_nearest(&dl
->dl_tree
, where
, AVL_AFTER
);
949 * Prefetch up to 128 deadlists first and then more as we progress.
950 * The limit is a balance between ARC use and diminishing returns.
952 for (pdle
= dle
, i
= 0; pdle
&& i
< 128; i
++) {
953 bpobj_prefetch_subobj(bpo
, pdle
->dle_bpobj
.bpo_object
);
954 pdle
= AVL_NEXT(&dl
->dl_tree
, pdle
);
957 uint64_t used
, comp
, uncomp
;
958 dsl_deadlist_entry_t
*dle_next
;
960 bpobj_enqueue_subobj(bpo
, dle
->dle_bpobj
.bpo_object
, tx
);
962 bpobj_prefetch_subobj(bpo
, pdle
->dle_bpobj
.bpo_object
);
963 pdle
= AVL_NEXT(&dl
->dl_tree
, pdle
);
966 VERIFY0(bpobj_space(&dle
->dle_bpobj
,
967 &used
, &comp
, &uncomp
));
968 ASSERT3U(dl
->dl_phys
->dl_used
, >=, used
);
969 ASSERT3U(dl
->dl_phys
->dl_comp
, >=, comp
);
970 ASSERT3U(dl
->dl_phys
->dl_uncomp
, >=, uncomp
);
971 dl
->dl_phys
->dl_used
-= used
;
972 dl
->dl_phys
->dl_comp
-= comp
;
973 dl
->dl_phys
->dl_uncomp
-= uncomp
;
975 VERIFY0(zap_remove_int(dl
->dl_os
, dl
->dl_object
,
976 dle
->dle_mintxg
, tx
));
978 dle_next
= AVL_NEXT(&dl
->dl_tree
, dle
);
979 avl_remove(&dl
->dl_tree
, dle
);
980 bpobj_close(&dle
->dle_bpobj
);
981 kmem_free(dle
, sizeof (*dle
));
984 mutex_exit(&dl
->dl_lock
);
987 typedef struct livelist_entry
{
994 livelist_compare(const void *larg
, const void *rarg
)
996 const blkptr_t
*l
= &((livelist_entry_t
*)larg
)->le_bp
;
997 const blkptr_t
*r
= &((livelist_entry_t
*)rarg
)->le_bp
;
999 /* Sort them according to dva[0] */
1000 uint64_t l_dva0_vdev
= DVA_GET_VDEV(&l
->blk_dva
[0]);
1001 uint64_t r_dva0_vdev
= DVA_GET_VDEV(&r
->blk_dva
[0]);
1003 if (l_dva0_vdev
!= r_dva0_vdev
)
1004 return (TREE_CMP(l_dva0_vdev
, r_dva0_vdev
));
1006 /* if vdevs are equal, sort by offsets. */
1007 uint64_t l_dva0_offset
= DVA_GET_OFFSET(&l
->blk_dva
[0]);
1008 uint64_t r_dva0_offset
= DVA_GET_OFFSET(&r
->blk_dva
[0]);
1009 return (TREE_CMP(l_dva0_offset
, r_dva0_offset
));
1012 struct livelist_iter_arg
{
1019 * Expects an AVL tree which is incrementally filled will FREE blkptrs
1020 * and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a
1021 * corresponding FREE are stored in the supplied bplist.
1023 * Note that multiple FREE and ALLOC entries for the same blkptr may be
1024 * encountered when dedup or block cloning is involved. For this reason we
1025 * keep a refcount for all the FREE entries of each blkptr and ensure that
1026 * each of those FREE entries has a corresponding ALLOC preceding it.
1029 dsl_livelist_iterate(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
1032 struct livelist_iter_arg
*lia
= arg
;
1033 avl_tree_t
*avl
= lia
->avl
;
1034 bplist_t
*to_free
= lia
->to_free
;
1038 if ((t
!= NULL
) && (zthr_has_waiters(t
) || zthr_iscancelled(t
)))
1039 return (SET_ERROR(EINTR
));
1041 livelist_entry_t node
;
1043 livelist_entry_t
*found
= avl_find(avl
, &node
, NULL
);
1045 ASSERT3U(BP_GET_PSIZE(bp
), ==, BP_GET_PSIZE(&found
->le_bp
));
1046 ASSERT3U(BP_GET_CHECKSUM(bp
), ==,
1047 BP_GET_CHECKSUM(&found
->le_bp
));
1048 ASSERT3U(BP_GET_BIRTH(bp
), ==, BP_GET_BIRTH(&found
->le_bp
));
1051 if (found
== NULL
) {
1052 /* first free entry for this blkptr */
1053 livelist_entry_t
*e
=
1054 kmem_alloc(sizeof (livelist_entry_t
), KM_SLEEP
);
1060 * Deduped or cloned block free. We could assert D bit
1061 * for dedup, but there is no such one for cloning.
1063 ASSERT3U(found
->le_refcnt
+ 1, >, found
->le_refcnt
);
1067 if (found
== NULL
) {
1068 /* block is currently marked as allocated */
1069 bplist_append(to_free
, bp
);
1071 /* alloc matches a free entry */
1072 ASSERT3U(found
->le_refcnt
, !=, 0);
1074 if (found
->le_refcnt
== 0) {
1075 /* all tracked free pairs have been matched */
1076 avl_remove(avl
, found
);
1077 kmem_free(found
, sizeof (livelist_entry_t
));
1085 * Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs
1086 * which have an ALLOC entry but no matching FREE
1089 dsl_process_sub_livelist(bpobj_t
*bpobj
, bplist_t
*to_free
, zthr_t
*t
,
1093 avl_create(&avl
, livelist_compare
, sizeof (livelist_entry_t
),
1094 offsetof(livelist_entry_t
, le_node
));
1096 /* process the sublist */
1097 struct livelist_iter_arg arg
= {
1102 int err
= bpobj_iterate_nofree(bpobj
, dsl_livelist_iterate
, &arg
, size
);
1103 VERIFY(err
!= 0 || avl_numnodes(&avl
) == 0);
1105 void *cookie
= NULL
;
1106 livelist_entry_t
*le
= NULL
;
1107 while ((le
= avl_destroy_nodes(&avl
, &cookie
)) != NULL
) {
1108 kmem_free(le
, sizeof (livelist_entry_t
));
1114 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, max_entries
, U64
, ZMOD_RW
,
1115 "Size to start the next sub-livelist in a livelist");
1117 ZFS_MODULE_PARAM(zfs_livelist
, zfs_livelist_
, min_percent_shared
, INT
, ZMOD_RW
,
1118 "Threshold at which livelist is disabled");