ZIL: Call brt_pending_add() replaying TX_CLONE_RANGE
[zfs.git] / module / zfs / dbuf.c
blob5a7fe42b602a286ec9318f7aae725110f1a870dd
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2019, Klara Inc.
28 * Copyright (c) 2019, Allan Jude
29 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
60 static kstat_t *dbuf_ksp;
62 typedef struct dbuf_stats {
64 * Various statistics about the size of the dbuf cache.
66 kstat_named_t cache_count;
67 kstat_named_t cache_size_bytes;
68 kstat_named_t cache_size_bytes_max;
70 * Statistics regarding the bounds on the dbuf cache size.
72 kstat_named_t cache_target_bytes;
73 kstat_named_t cache_lowater_bytes;
74 kstat_named_t cache_hiwater_bytes;
76 * Total number of dbuf cache evictions that have occurred.
78 kstat_named_t cache_total_evicts;
80 * The distribution of dbuf levels in the dbuf cache and
81 * the total size of all dbufs at each level.
83 kstat_named_t cache_levels[DN_MAX_LEVELS];
84 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
86 * Statistics about the dbuf hash table.
88 kstat_named_t hash_hits;
89 kstat_named_t hash_misses;
90 kstat_named_t hash_collisions;
91 kstat_named_t hash_elements;
92 kstat_named_t hash_elements_max;
94 * Number of sublists containing more than one dbuf in the dbuf
95 * hash table. Keep track of the longest hash chain.
97 kstat_named_t hash_chains;
98 kstat_named_t hash_chain_max;
100 * Number of times a dbuf_create() discovers that a dbuf was
101 * already created and in the dbuf hash table.
103 kstat_named_t hash_insert_race;
105 * Number of entries in the hash table dbuf and mutex arrays.
107 kstat_named_t hash_table_count;
108 kstat_named_t hash_mutex_count;
110 * Statistics about the size of the metadata dbuf cache.
112 kstat_named_t metadata_cache_count;
113 kstat_named_t metadata_cache_size_bytes;
114 kstat_named_t metadata_cache_size_bytes_max;
116 * For diagnostic purposes, this is incremented whenever we can't add
117 * something to the metadata cache because it's full, and instead put
118 * the data in the regular dbuf cache.
120 kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
123 dbuf_stats_t dbuf_stats = {
124 { "cache_count", KSTAT_DATA_UINT64 },
125 { "cache_size_bytes", KSTAT_DATA_UINT64 },
126 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
127 { "cache_target_bytes", KSTAT_DATA_UINT64 },
128 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
129 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
130 { "cache_total_evicts", KSTAT_DATA_UINT64 },
131 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
132 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
133 { "hash_hits", KSTAT_DATA_UINT64 },
134 { "hash_misses", KSTAT_DATA_UINT64 },
135 { "hash_collisions", KSTAT_DATA_UINT64 },
136 { "hash_elements", KSTAT_DATA_UINT64 },
137 { "hash_elements_max", KSTAT_DATA_UINT64 },
138 { "hash_chains", KSTAT_DATA_UINT64 },
139 { "hash_chain_max", KSTAT_DATA_UINT64 },
140 { "hash_insert_race", KSTAT_DATA_UINT64 },
141 { "hash_table_count", KSTAT_DATA_UINT64 },
142 { "hash_mutex_count", KSTAT_DATA_UINT64 },
143 { "metadata_cache_count", KSTAT_DATA_UINT64 },
144 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
145 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
146 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
149 struct {
150 wmsum_t cache_count;
151 wmsum_t cache_total_evicts;
152 wmsum_t cache_levels[DN_MAX_LEVELS];
153 wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 wmsum_t hash_hits;
155 wmsum_t hash_misses;
156 wmsum_t hash_collisions;
157 wmsum_t hash_chains;
158 wmsum_t hash_insert_race;
159 wmsum_t metadata_cache_count;
160 wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
163 #define DBUF_STAT_INCR(stat, val) \
164 wmsum_add(&dbuf_sums.stat, val);
165 #define DBUF_STAT_DECR(stat, val) \
166 DBUF_STAT_INCR(stat, -(val));
167 #define DBUF_STAT_BUMP(stat) \
168 DBUF_STAT_INCR(stat, 1);
169 #define DBUF_STAT_BUMPDOWN(stat) \
170 DBUF_STAT_INCR(stat, -1);
171 #define DBUF_STAT_MAX(stat, v) { \
172 uint64_t _m; \
173 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
174 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 continue; \
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
183 * Global data structures and functions for the dbuf cache.
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
194 * There are two dbuf caches; each dbuf can only be in one of them at a time.
196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198 * that represent the metadata that describes filesystems/snapshots/
199 * bookmarks/properties/etc. We only evict from this cache when we export a
200 * pool, to short-circuit as much I/O as possible for all administrative
201 * commands that need the metadata. There is no eviction policy for this
202 * cache, because we try to only include types in it which would occupy a
203 * very small amount of space per object but create a large impact on the
204 * performance of these commands. Instead, after it reaches a maximum size
205 * (which should only happen on very small memory systems with a very large
206 * number of filesystem objects), we stop taking new dbufs into the
207 * metadata cache, instead putting them in the normal dbuf cache.
209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210 * are not currently held but have been recently released. These dbufs
211 * are not eligible for arc eviction until they are aged out of the cache.
212 * Dbufs that are aged out of the cache will be immediately destroyed and
213 * become eligible for arc eviction.
215 * Dbufs are added to these caches once the last hold is released. If a dbuf is
216 * later accessed and still exists in the dbuf cache, then it will be removed
217 * from the cache and later re-added to the head of the cache.
219 * If a given dbuf meets the requirements for the metadata cache, it will go
220 * there, otherwise it will be considered for the generic LRU dbuf cache. The
221 * caches and the refcounts tracking their sizes are stored in an array indexed
222 * by those caches' matching enum values (from dbuf_cached_state_t).
224 typedef struct dbuf_cache {
225 multilist_t cache;
226 zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
245 * The LRU dbuf cache uses a three-stage eviction policy:
246 * - A low water marker designates when the dbuf eviction thread
247 * should stop evicting from the dbuf cache.
248 * - When we reach the maximum size (aka mid water mark), we
249 * signal the eviction thread to run.
250 * - The high water mark indicates when the eviction thread
251 * is unable to keep up with the incoming load and eviction must
252 * happen in the context of the calling thread.
254 * The dbuf cache:
255 * (max size)
256 * low water mid water hi water
257 * +----------------------------------------+----------+----------+
258 * | | | |
259 * | | | |
260 * | | | |
261 * | | | |
262 * +----------------------------------------+----------+----------+
263 * stop signal evict
264 * evicting eviction directly
265 * thread
267 * The high and low water marks indicate the operating range for the eviction
268 * thread. The low water mark is, by default, 90% of the total size of the
269 * cache and the high water mark is at 110% (both of these percentages can be
270 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271 * respectively). The eviction thread will try to ensure that the cache remains
272 * within this range by waking up every second and checking if the cache is
273 * above the low water mark. The thread can also be woken up by callers adding
274 * elements into the cache if the cache is larger than the mid water (i.e max
275 * cache size). Once the eviction thread is woken up and eviction is required,
276 * it will continue evicting buffers until it's able to reduce the cache size
277 * to the low water mark. If the cache size continues to grow and hits the high
278 * water mark, then callers adding elements to the cache will begin to evict
279 * directly from the cache until the cache is no longer above the high water
280 * mark.
284 * The percentage above and below the maximum cache size.
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
289 static int
290 dbuf_cons(void *vdb, void *unused, int kmflag)
292 (void) unused, (void) kmflag;
293 dmu_buf_impl_t *db = vdb;
294 memset(db, 0, sizeof (dmu_buf_impl_t));
296 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
297 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
298 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 multilist_link_init(&db->db_cache_link);
300 zfs_refcount_create(&db->db_holds);
302 return (0);
305 static void
306 dbuf_dest(void *vdb, void *unused)
308 (void) unused;
309 dmu_buf_impl_t *db = vdb;
310 mutex_destroy(&db->db_mtx);
311 rw_destroy(&db->db_rwlock);
312 cv_destroy(&db->db_changed);
313 ASSERT(!multilist_link_active(&db->db_cache_link));
314 zfs_refcount_destroy(&db->db_holds);
318 * dbuf hash table routines
320 static dbuf_hash_table_t dbuf_hash_table;
323 * We use Cityhash for this. It's fast, and has good hash properties without
324 * requiring any large static buffers.
326 static uint64_t
327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
329 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
332 #define DTRACE_SET_STATE(db, why) \
333 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
334 const char *, why)
336 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
337 ((dbuf)->db.db_object == (obj) && \
338 (dbuf)->db_objset == (os) && \
339 (dbuf)->db_level == (level) && \
340 (dbuf)->db_blkid == (blkid))
342 dmu_buf_impl_t *
343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344 uint64_t *hash_out)
346 dbuf_hash_table_t *h = &dbuf_hash_table;
347 uint64_t hv;
348 uint64_t idx;
349 dmu_buf_impl_t *db;
351 hv = dbuf_hash(os, obj, level, blkid);
352 idx = hv & h->hash_table_mask;
354 mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 mutex_enter(&db->db_mtx);
358 if (db->db_state != DB_EVICTING) {
359 mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 return (db);
362 mutex_exit(&db->db_mtx);
365 mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 if (hash_out != NULL)
367 *hash_out = hv;
368 return (NULL);
371 static dmu_buf_impl_t *
372 dbuf_find_bonus(objset_t *os, uint64_t object)
374 dnode_t *dn;
375 dmu_buf_impl_t *db = NULL;
377 if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 if (dn->dn_bonus != NULL) {
380 db = dn->dn_bonus;
381 mutex_enter(&db->db_mtx);
383 rw_exit(&dn->dn_struct_rwlock);
384 dnode_rele(dn, FTAG);
386 return (db);
390 * Insert an entry into the hash table. If there is already an element
391 * equal to elem in the hash table, then the already existing element
392 * will be returned and the new element will not be inserted.
393 * Otherwise returns NULL.
395 static dmu_buf_impl_t *
396 dbuf_hash_insert(dmu_buf_impl_t *db)
398 dbuf_hash_table_t *h = &dbuf_hash_table;
399 objset_t *os = db->db_objset;
400 uint64_t obj = db->db.db_object;
401 int level = db->db_level;
402 uint64_t blkid, idx;
403 dmu_buf_impl_t *dbf;
404 uint32_t i;
406 blkid = db->db_blkid;
407 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 idx = db->db_hash & h->hash_table_mask;
410 mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 dbf = dbf->db_hash_next, i++) {
413 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 mutex_enter(&dbf->db_mtx);
415 if (dbf->db_state != DB_EVICTING) {
416 mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 return (dbf);
419 mutex_exit(&dbf->db_mtx);
423 if (i > 0) {
424 DBUF_STAT_BUMP(hash_collisions);
425 if (i == 1)
426 DBUF_STAT_BUMP(hash_chains);
428 DBUF_STAT_MAX(hash_chain_max, i);
431 mutex_enter(&db->db_mtx);
432 db->db_hash_next = h->hash_table[idx];
433 h->hash_table[idx] = db;
434 mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
436 DBUF_STAT_MAX(hash_elements_max, he);
438 return (NULL);
442 * This returns whether this dbuf should be stored in the metadata cache, which
443 * is based on whether it's from one of the dnode types that store data related
444 * to traversing dataset hierarchies.
446 static boolean_t
447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
449 DB_DNODE_ENTER(db);
450 dmu_object_type_t type = DB_DNODE(db)->dn_type;
451 DB_DNODE_EXIT(db);
453 /* Check if this dbuf is one of the types we care about */
454 if (DMU_OT_IS_METADATA_CACHED(type)) {
455 /* If we hit this, then we set something up wrong in dmu_ot */
456 ASSERT(DMU_OT_IS_METADATA(type));
459 * Sanity check for small-memory systems: don't allocate too
460 * much memory for this purpose.
462 if (zfs_refcount_count(
463 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
464 dbuf_metadata_cache_target_bytes()) {
465 DBUF_STAT_BUMP(metadata_cache_overflow);
466 return (B_FALSE);
469 return (B_TRUE);
472 return (B_FALSE);
476 * Remove an entry from the hash table. It must be in the EVICTING state.
478 static void
479 dbuf_hash_remove(dmu_buf_impl_t *db)
481 dbuf_hash_table_t *h = &dbuf_hash_table;
482 uint64_t idx;
483 dmu_buf_impl_t *dbf, **dbp;
485 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
486 db->db_blkid), ==, db->db_hash);
487 idx = db->db_hash & h->hash_table_mask;
490 * We mustn't hold db_mtx to maintain lock ordering:
491 * DBUF_HASH_MUTEX > db_mtx.
493 ASSERT(zfs_refcount_is_zero(&db->db_holds));
494 ASSERT(db->db_state == DB_EVICTING);
495 ASSERT(!MUTEX_HELD(&db->db_mtx));
497 mutex_enter(DBUF_HASH_MUTEX(h, idx));
498 dbp = &h->hash_table[idx];
499 while ((dbf = *dbp) != db) {
500 dbp = &dbf->db_hash_next;
501 ASSERT(dbf != NULL);
503 *dbp = db->db_hash_next;
504 db->db_hash_next = NULL;
505 if (h->hash_table[idx] &&
506 h->hash_table[idx]->db_hash_next == NULL)
507 DBUF_STAT_BUMPDOWN(hash_chains);
508 mutex_exit(DBUF_HASH_MUTEX(h, idx));
509 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
512 typedef enum {
513 DBVU_EVICTING,
514 DBVU_NOT_EVICTING
515 } dbvu_verify_type_t;
517 static void
518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
520 #ifdef ZFS_DEBUG
521 int64_t holds;
523 if (db->db_user == NULL)
524 return;
526 /* Only data blocks support the attachment of user data. */
527 ASSERT(db->db_level == 0);
529 /* Clients must resolve a dbuf before attaching user data. */
530 ASSERT(db->db.db_data != NULL);
531 ASSERT3U(db->db_state, ==, DB_CACHED);
533 holds = zfs_refcount_count(&db->db_holds);
534 if (verify_type == DBVU_EVICTING) {
536 * Immediate eviction occurs when holds == dirtycnt.
537 * For normal eviction buffers, holds is zero on
538 * eviction, except when dbuf_fix_old_data() calls
539 * dbuf_clear_data(). However, the hold count can grow
540 * during eviction even though db_mtx is held (see
541 * dmu_bonus_hold() for an example), so we can only
542 * test the generic invariant that holds >= dirtycnt.
544 ASSERT3U(holds, >=, db->db_dirtycnt);
545 } else {
546 if (db->db_user_immediate_evict == TRUE)
547 ASSERT3U(holds, >=, db->db_dirtycnt);
548 else
549 ASSERT3U(holds, >, 0);
551 #endif
554 static void
555 dbuf_evict_user(dmu_buf_impl_t *db)
557 dmu_buf_user_t *dbu = db->db_user;
559 ASSERT(MUTEX_HELD(&db->db_mtx));
561 if (dbu == NULL)
562 return;
564 dbuf_verify_user(db, DBVU_EVICTING);
565 db->db_user = NULL;
567 #ifdef ZFS_DEBUG
568 if (dbu->dbu_clear_on_evict_dbufp != NULL)
569 *dbu->dbu_clear_on_evict_dbufp = NULL;
570 #endif
573 * There are two eviction callbacks - one that we call synchronously
574 * and one that we invoke via a taskq. The async one is useful for
575 * avoiding lock order reversals and limiting stack depth.
577 * Note that if we have a sync callback but no async callback,
578 * it's likely that the sync callback will free the structure
579 * containing the dbu. In that case we need to take care to not
580 * dereference dbu after calling the sync evict func.
582 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
584 if (dbu->dbu_evict_func_sync != NULL)
585 dbu->dbu_evict_func_sync(dbu);
587 if (has_async) {
588 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
589 dbu, 0, &dbu->dbu_tqent);
593 boolean_t
594 dbuf_is_metadata(dmu_buf_impl_t *db)
597 * Consider indirect blocks and spill blocks to be meta data.
599 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
600 return (B_TRUE);
601 } else {
602 boolean_t is_metadata;
604 DB_DNODE_ENTER(db);
605 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
606 DB_DNODE_EXIT(db);
608 return (is_metadata);
613 * We want to exclude buffers that are on a special allocation class from
614 * L2ARC.
616 boolean_t
617 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
619 if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
620 (db->db_objset->os_secondary_cache ==
621 ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
622 if (l2arc_exclude_special == 0)
623 return (B_TRUE);
625 blkptr_t *bp = db->db_blkptr;
626 if (bp == NULL || BP_IS_HOLE(bp))
627 return (B_FALSE);
628 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
629 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
630 vdev_t *vd = NULL;
632 if (vdev < rvd->vdev_children)
633 vd = rvd->vdev_child[vdev];
635 if (vd == NULL)
636 return (B_TRUE);
638 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
639 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
640 return (B_TRUE);
642 return (B_FALSE);
645 static inline boolean_t
646 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
648 if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
649 (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
650 (level > 0 ||
651 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
652 if (l2arc_exclude_special == 0)
653 return (B_TRUE);
655 if (bp == NULL || BP_IS_HOLE(bp))
656 return (B_FALSE);
657 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
658 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
659 vdev_t *vd = NULL;
661 if (vdev < rvd->vdev_children)
662 vd = rvd->vdev_child[vdev];
664 if (vd == NULL)
665 return (B_TRUE);
667 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
668 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
669 return (B_TRUE);
671 return (B_FALSE);
676 * This function *must* return indices evenly distributed between all
677 * sublists of the multilist. This is needed due to how the dbuf eviction
678 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
679 * distributed between all sublists and uses this assumption when
680 * deciding which sublist to evict from and how much to evict from it.
682 static unsigned int
683 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
685 dmu_buf_impl_t *db = obj;
688 * The assumption here, is the hash value for a given
689 * dmu_buf_impl_t will remain constant throughout it's lifetime
690 * (i.e. it's objset, object, level and blkid fields don't change).
691 * Thus, we don't need to store the dbuf's sublist index
692 * on insertion, as this index can be recalculated on removal.
694 * Also, the low order bits of the hash value are thought to be
695 * distributed evenly. Otherwise, in the case that the multilist
696 * has a power of two number of sublists, each sublists' usage
697 * would not be evenly distributed. In this context full 64bit
698 * division would be a waste of time, so limit it to 32 bits.
700 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
701 db->db_level, db->db_blkid) %
702 multilist_get_num_sublists(ml));
706 * The target size of the dbuf cache can grow with the ARC target,
707 * unless limited by the tunable dbuf_cache_max_bytes.
709 static inline unsigned long
710 dbuf_cache_target_bytes(void)
712 return (MIN(dbuf_cache_max_bytes,
713 arc_target_bytes() >> dbuf_cache_shift));
717 * The target size of the dbuf metadata cache can grow with the ARC target,
718 * unless limited by the tunable dbuf_metadata_cache_max_bytes.
720 static inline unsigned long
721 dbuf_metadata_cache_target_bytes(void)
723 return (MIN(dbuf_metadata_cache_max_bytes,
724 arc_target_bytes() >> dbuf_metadata_cache_shift));
727 static inline uint64_t
728 dbuf_cache_hiwater_bytes(void)
730 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
731 return (dbuf_cache_target +
732 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
735 static inline uint64_t
736 dbuf_cache_lowater_bytes(void)
738 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
739 return (dbuf_cache_target -
740 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
743 static inline boolean_t
744 dbuf_cache_above_lowater(void)
746 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
747 dbuf_cache_lowater_bytes());
751 * Evict the oldest eligible dbuf from the dbuf cache.
753 static void
754 dbuf_evict_one(void)
756 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
757 multilist_sublist_t *mls = multilist_sublist_lock(
758 &dbuf_caches[DB_DBUF_CACHE].cache, idx);
760 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
762 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
763 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
764 db = multilist_sublist_prev(mls, db);
767 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
768 multilist_sublist_t *, mls);
770 if (db != NULL) {
771 multilist_sublist_remove(mls, db);
772 multilist_sublist_unlock(mls);
773 (void) zfs_refcount_remove_many(
774 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
775 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
776 DBUF_STAT_BUMPDOWN(cache_count);
777 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
778 db->db.db_size);
779 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
780 db->db_caching_status = DB_NO_CACHE;
781 dbuf_destroy(db);
782 DBUF_STAT_BUMP(cache_total_evicts);
783 } else {
784 multilist_sublist_unlock(mls);
789 * The dbuf evict thread is responsible for aging out dbufs from the
790 * cache. Once the cache has reached it's maximum size, dbufs are removed
791 * and destroyed. The eviction thread will continue running until the size
792 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
793 * out of the cache it is destroyed and becomes eligible for arc eviction.
795 static __attribute__((noreturn)) void
796 dbuf_evict_thread(void *unused)
798 (void) unused;
799 callb_cpr_t cpr;
801 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
803 mutex_enter(&dbuf_evict_lock);
804 while (!dbuf_evict_thread_exit) {
805 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
806 CALLB_CPR_SAFE_BEGIN(&cpr);
807 (void) cv_timedwait_idle_hires(&dbuf_evict_cv,
808 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
809 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
811 mutex_exit(&dbuf_evict_lock);
814 * Keep evicting as long as we're above the low water mark
815 * for the cache. We do this without holding the locks to
816 * minimize lock contention.
818 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
819 dbuf_evict_one();
822 mutex_enter(&dbuf_evict_lock);
825 dbuf_evict_thread_exit = B_FALSE;
826 cv_broadcast(&dbuf_evict_cv);
827 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
828 thread_exit();
832 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
833 * If the dbuf cache is at its high water mark, then evict a dbuf from the
834 * dbuf cache using the caller's context.
836 static void
837 dbuf_evict_notify(uint64_t size)
840 * We check if we should evict without holding the dbuf_evict_lock,
841 * because it's OK to occasionally make the wrong decision here,
842 * and grabbing the lock results in massive lock contention.
844 if (size > dbuf_cache_target_bytes()) {
845 if (size > dbuf_cache_hiwater_bytes())
846 dbuf_evict_one();
847 cv_signal(&dbuf_evict_cv);
851 static int
852 dbuf_kstat_update(kstat_t *ksp, int rw)
854 dbuf_stats_t *ds = ksp->ks_data;
855 dbuf_hash_table_t *h = &dbuf_hash_table;
857 if (rw == KSTAT_WRITE)
858 return (SET_ERROR(EACCES));
860 ds->cache_count.value.ui64 =
861 wmsum_value(&dbuf_sums.cache_count);
862 ds->cache_size_bytes.value.ui64 =
863 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
864 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
865 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
866 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
867 ds->cache_total_evicts.value.ui64 =
868 wmsum_value(&dbuf_sums.cache_total_evicts);
869 for (int i = 0; i < DN_MAX_LEVELS; i++) {
870 ds->cache_levels[i].value.ui64 =
871 wmsum_value(&dbuf_sums.cache_levels[i]);
872 ds->cache_levels_bytes[i].value.ui64 =
873 wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
875 ds->hash_hits.value.ui64 =
876 wmsum_value(&dbuf_sums.hash_hits);
877 ds->hash_misses.value.ui64 =
878 wmsum_value(&dbuf_sums.hash_misses);
879 ds->hash_collisions.value.ui64 =
880 wmsum_value(&dbuf_sums.hash_collisions);
881 ds->hash_chains.value.ui64 =
882 wmsum_value(&dbuf_sums.hash_chains);
883 ds->hash_insert_race.value.ui64 =
884 wmsum_value(&dbuf_sums.hash_insert_race);
885 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
886 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
887 ds->metadata_cache_count.value.ui64 =
888 wmsum_value(&dbuf_sums.metadata_cache_count);
889 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
890 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
891 ds->metadata_cache_overflow.value.ui64 =
892 wmsum_value(&dbuf_sums.metadata_cache_overflow);
893 return (0);
896 void
897 dbuf_init(void)
899 uint64_t hmsize, hsize = 1ULL << 16;
900 dbuf_hash_table_t *h = &dbuf_hash_table;
903 * The hash table is big enough to fill one eighth of physical memory
904 * with an average block size of zfs_arc_average_blocksize (default 8K).
905 * By default, the table will take up
906 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
908 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
909 hsize <<= 1;
911 h->hash_table = NULL;
912 while (h->hash_table == NULL) {
913 h->hash_table_mask = hsize - 1;
915 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
916 if (h->hash_table == NULL)
917 hsize >>= 1;
919 ASSERT3U(hsize, >=, 1ULL << 10);
923 * The hash table buckets are protected by an array of mutexes where
924 * each mutex is reponsible for protecting 128 buckets. A minimum
925 * array size of 8192 is targeted to avoid contention.
927 if (dbuf_mutex_cache_shift == 0)
928 hmsize = MAX(hsize >> 7, 1ULL << 13);
929 else
930 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
932 h->hash_mutexes = NULL;
933 while (h->hash_mutexes == NULL) {
934 h->hash_mutex_mask = hmsize - 1;
936 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
937 KM_SLEEP);
938 if (h->hash_mutexes == NULL)
939 hmsize >>= 1;
942 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
943 sizeof (dmu_buf_impl_t),
944 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
946 for (int i = 0; i < hmsize; i++)
947 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
949 dbuf_stats_init(h);
952 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
953 * configuration is not required.
955 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
957 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
958 multilist_create(&dbuf_caches[dcs].cache,
959 sizeof (dmu_buf_impl_t),
960 offsetof(dmu_buf_impl_t, db_cache_link),
961 dbuf_cache_multilist_index_func);
962 zfs_refcount_create(&dbuf_caches[dcs].size);
965 dbuf_evict_thread_exit = B_FALSE;
966 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
967 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
968 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
969 NULL, 0, &p0, TS_RUN, minclsyspri);
971 wmsum_init(&dbuf_sums.cache_count, 0);
972 wmsum_init(&dbuf_sums.cache_total_evicts, 0);
973 for (int i = 0; i < DN_MAX_LEVELS; i++) {
974 wmsum_init(&dbuf_sums.cache_levels[i], 0);
975 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
977 wmsum_init(&dbuf_sums.hash_hits, 0);
978 wmsum_init(&dbuf_sums.hash_misses, 0);
979 wmsum_init(&dbuf_sums.hash_collisions, 0);
980 wmsum_init(&dbuf_sums.hash_chains, 0);
981 wmsum_init(&dbuf_sums.hash_insert_race, 0);
982 wmsum_init(&dbuf_sums.metadata_cache_count, 0);
983 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
985 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
986 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
987 KSTAT_FLAG_VIRTUAL);
988 if (dbuf_ksp != NULL) {
989 for (int i = 0; i < DN_MAX_LEVELS; i++) {
990 snprintf(dbuf_stats.cache_levels[i].name,
991 KSTAT_STRLEN, "cache_level_%d", i);
992 dbuf_stats.cache_levels[i].data_type =
993 KSTAT_DATA_UINT64;
994 snprintf(dbuf_stats.cache_levels_bytes[i].name,
995 KSTAT_STRLEN, "cache_level_%d_bytes", i);
996 dbuf_stats.cache_levels_bytes[i].data_type =
997 KSTAT_DATA_UINT64;
999 dbuf_ksp->ks_data = &dbuf_stats;
1000 dbuf_ksp->ks_update = dbuf_kstat_update;
1001 kstat_install(dbuf_ksp);
1005 void
1006 dbuf_fini(void)
1008 dbuf_hash_table_t *h = &dbuf_hash_table;
1010 dbuf_stats_destroy();
1012 for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1013 mutex_destroy(&h->hash_mutexes[i]);
1015 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1016 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1017 sizeof (kmutex_t));
1019 kmem_cache_destroy(dbuf_kmem_cache);
1020 taskq_destroy(dbu_evict_taskq);
1022 mutex_enter(&dbuf_evict_lock);
1023 dbuf_evict_thread_exit = B_TRUE;
1024 while (dbuf_evict_thread_exit) {
1025 cv_signal(&dbuf_evict_cv);
1026 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1028 mutex_exit(&dbuf_evict_lock);
1030 mutex_destroy(&dbuf_evict_lock);
1031 cv_destroy(&dbuf_evict_cv);
1033 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1034 zfs_refcount_destroy(&dbuf_caches[dcs].size);
1035 multilist_destroy(&dbuf_caches[dcs].cache);
1038 if (dbuf_ksp != NULL) {
1039 kstat_delete(dbuf_ksp);
1040 dbuf_ksp = NULL;
1043 wmsum_fini(&dbuf_sums.cache_count);
1044 wmsum_fini(&dbuf_sums.cache_total_evicts);
1045 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1046 wmsum_fini(&dbuf_sums.cache_levels[i]);
1047 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1049 wmsum_fini(&dbuf_sums.hash_hits);
1050 wmsum_fini(&dbuf_sums.hash_misses);
1051 wmsum_fini(&dbuf_sums.hash_collisions);
1052 wmsum_fini(&dbuf_sums.hash_chains);
1053 wmsum_fini(&dbuf_sums.hash_insert_race);
1054 wmsum_fini(&dbuf_sums.metadata_cache_count);
1055 wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1059 * Other stuff.
1062 #ifdef ZFS_DEBUG
1063 static void
1064 dbuf_verify(dmu_buf_impl_t *db)
1066 dnode_t *dn;
1067 dbuf_dirty_record_t *dr;
1068 uint32_t txg_prev;
1070 ASSERT(MUTEX_HELD(&db->db_mtx));
1072 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1073 return;
1075 ASSERT(db->db_objset != NULL);
1076 DB_DNODE_ENTER(db);
1077 dn = DB_DNODE(db);
1078 if (dn == NULL) {
1079 ASSERT(db->db_parent == NULL);
1080 ASSERT(db->db_blkptr == NULL);
1081 } else {
1082 ASSERT3U(db->db.db_object, ==, dn->dn_object);
1083 ASSERT3P(db->db_objset, ==, dn->dn_objset);
1084 ASSERT3U(db->db_level, <, dn->dn_nlevels);
1085 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1086 db->db_blkid == DMU_SPILL_BLKID ||
1087 !avl_is_empty(&dn->dn_dbufs));
1089 if (db->db_blkid == DMU_BONUS_BLKID) {
1090 ASSERT(dn != NULL);
1091 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1092 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1093 } else if (db->db_blkid == DMU_SPILL_BLKID) {
1094 ASSERT(dn != NULL);
1095 ASSERT0(db->db.db_offset);
1096 } else {
1097 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1100 if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1101 ASSERT(dr->dr_dbuf == db);
1102 txg_prev = dr->dr_txg;
1103 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1104 dr = list_next(&db->db_dirty_records, dr)) {
1105 ASSERT(dr->dr_dbuf == db);
1106 ASSERT(txg_prev > dr->dr_txg);
1107 txg_prev = dr->dr_txg;
1112 * We can't assert that db_size matches dn_datablksz because it
1113 * can be momentarily different when another thread is doing
1114 * dnode_set_blksz().
1116 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1117 dr = db->db_data_pending;
1119 * It should only be modified in syncing context, so
1120 * make sure we only have one copy of the data.
1122 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1125 /* verify db->db_blkptr */
1126 if (db->db_blkptr) {
1127 if (db->db_parent == dn->dn_dbuf) {
1128 /* db is pointed to by the dnode */
1129 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1130 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1131 ASSERT(db->db_parent == NULL);
1132 else
1133 ASSERT(db->db_parent != NULL);
1134 if (db->db_blkid != DMU_SPILL_BLKID)
1135 ASSERT3P(db->db_blkptr, ==,
1136 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1137 } else {
1138 /* db is pointed to by an indirect block */
1139 int epb __maybe_unused = db->db_parent->db.db_size >>
1140 SPA_BLKPTRSHIFT;
1141 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1142 ASSERT3U(db->db_parent->db.db_object, ==,
1143 db->db.db_object);
1145 * dnode_grow_indblksz() can make this fail if we don't
1146 * have the parent's rwlock. XXX indblksz no longer
1147 * grows. safe to do this now?
1149 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1150 ASSERT3P(db->db_blkptr, ==,
1151 ((blkptr_t *)db->db_parent->db.db_data +
1152 db->db_blkid % epb));
1156 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1157 (db->db_buf == NULL || db->db_buf->b_data) &&
1158 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1159 db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1161 * If the blkptr isn't set but they have nonzero data,
1162 * it had better be dirty, otherwise we'll lose that
1163 * data when we evict this buffer.
1165 * There is an exception to this rule for indirect blocks; in
1166 * this case, if the indirect block is a hole, we fill in a few
1167 * fields on each of the child blocks (importantly, birth time)
1168 * to prevent hole birth times from being lost when you
1169 * partially fill in a hole.
1171 if (db->db_dirtycnt == 0) {
1172 if (db->db_level == 0) {
1173 uint64_t *buf = db->db.db_data;
1174 int i;
1176 for (i = 0; i < db->db.db_size >> 3; i++) {
1177 ASSERT(buf[i] == 0);
1179 } else {
1180 blkptr_t *bps = db->db.db_data;
1181 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1182 db->db.db_size);
1184 * We want to verify that all the blkptrs in the
1185 * indirect block are holes, but we may have
1186 * automatically set up a few fields for them.
1187 * We iterate through each blkptr and verify
1188 * they only have those fields set.
1190 for (int i = 0;
1191 i < db->db.db_size / sizeof (blkptr_t);
1192 i++) {
1193 blkptr_t *bp = &bps[i];
1194 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1195 &bp->blk_cksum));
1196 ASSERT(
1197 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1198 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1199 DVA_IS_EMPTY(&bp->blk_dva[2]));
1200 ASSERT0(bp->blk_fill);
1201 ASSERT0(bp->blk_pad[0]);
1202 ASSERT0(bp->blk_pad[1]);
1203 ASSERT(!BP_IS_EMBEDDED(bp));
1204 ASSERT(BP_IS_HOLE(bp));
1205 ASSERT0(bp->blk_phys_birth);
1210 DB_DNODE_EXIT(db);
1212 #endif
1214 static void
1215 dbuf_clear_data(dmu_buf_impl_t *db)
1217 ASSERT(MUTEX_HELD(&db->db_mtx));
1218 dbuf_evict_user(db);
1219 ASSERT3P(db->db_buf, ==, NULL);
1220 db->db.db_data = NULL;
1221 if (db->db_state != DB_NOFILL) {
1222 db->db_state = DB_UNCACHED;
1223 DTRACE_SET_STATE(db, "clear data");
1227 static void
1228 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1230 ASSERT(MUTEX_HELD(&db->db_mtx));
1231 ASSERT(buf != NULL);
1233 db->db_buf = buf;
1234 ASSERT(buf->b_data != NULL);
1235 db->db.db_data = buf->b_data;
1238 static arc_buf_t *
1239 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1241 spa_t *spa = db->db_objset->os_spa;
1243 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1247 * Loan out an arc_buf for read. Return the loaned arc_buf.
1249 arc_buf_t *
1250 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1252 arc_buf_t *abuf;
1254 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1255 mutex_enter(&db->db_mtx);
1256 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1257 int blksz = db->db.db_size;
1258 spa_t *spa = db->db_objset->os_spa;
1260 mutex_exit(&db->db_mtx);
1261 abuf = arc_loan_buf(spa, B_FALSE, blksz);
1262 memcpy(abuf->b_data, db->db.db_data, blksz);
1263 } else {
1264 abuf = db->db_buf;
1265 arc_loan_inuse_buf(abuf, db);
1266 db->db_buf = NULL;
1267 dbuf_clear_data(db);
1268 mutex_exit(&db->db_mtx);
1270 return (abuf);
1274 * Calculate which level n block references the data at the level 0 offset
1275 * provided.
1277 uint64_t
1278 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1280 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1282 * The level n blkid is equal to the level 0 blkid divided by
1283 * the number of level 0s in a level n block.
1285 * The level 0 blkid is offset >> datablkshift =
1286 * offset / 2^datablkshift.
1288 * The number of level 0s in a level n is the number of block
1289 * pointers in an indirect block, raised to the power of level.
1290 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1291 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1293 * Thus, the level n blkid is: offset /
1294 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1295 * = offset / 2^(datablkshift + level *
1296 * (indblkshift - SPA_BLKPTRSHIFT))
1297 * = offset >> (datablkshift + level *
1298 * (indblkshift - SPA_BLKPTRSHIFT))
1301 const unsigned exp = dn->dn_datablkshift +
1302 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1304 if (exp >= 8 * sizeof (offset)) {
1305 /* This only happens on the highest indirection level */
1306 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1307 return (0);
1310 ASSERT3U(exp, <, 8 * sizeof (offset));
1312 return (offset >> exp);
1313 } else {
1314 ASSERT3U(offset, <, dn->dn_datablksz);
1315 return (0);
1320 * This function is used to lock the parent of the provided dbuf. This should be
1321 * used when modifying or reading db_blkptr.
1323 db_lock_type_t
1324 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1326 enum db_lock_type ret = DLT_NONE;
1327 if (db->db_parent != NULL) {
1328 rw_enter(&db->db_parent->db_rwlock, rw);
1329 ret = DLT_PARENT;
1330 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1331 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1332 tag);
1333 ret = DLT_OBJSET;
1336 * We only return a DLT_NONE lock when it's the top-most indirect block
1337 * of the meta-dnode of the MOS.
1339 return (ret);
1343 * We need to pass the lock type in because it's possible that the block will
1344 * move from being the topmost indirect block in a dnode (and thus, have no
1345 * parent) to not the top-most via an indirection increase. This would cause a
1346 * panic if we didn't pass the lock type in.
1348 void
1349 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1351 if (type == DLT_PARENT)
1352 rw_exit(&db->db_parent->db_rwlock);
1353 else if (type == DLT_OBJSET)
1354 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1357 static void
1358 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1359 arc_buf_t *buf, void *vdb)
1361 (void) zb, (void) bp;
1362 dmu_buf_impl_t *db = vdb;
1364 mutex_enter(&db->db_mtx);
1365 ASSERT3U(db->db_state, ==, DB_READ);
1367 * All reads are synchronous, so we must have a hold on the dbuf
1369 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1370 ASSERT(db->db_buf == NULL);
1371 ASSERT(db->db.db_data == NULL);
1372 if (buf == NULL) {
1373 /* i/o error */
1374 ASSERT(zio == NULL || zio->io_error != 0);
1375 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1376 ASSERT3P(db->db_buf, ==, NULL);
1377 db->db_state = DB_UNCACHED;
1378 DTRACE_SET_STATE(db, "i/o error");
1379 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1380 /* freed in flight */
1381 ASSERT(zio == NULL || zio->io_error == 0);
1382 arc_release(buf, db);
1383 memset(buf->b_data, 0, db->db.db_size);
1384 arc_buf_freeze(buf);
1385 db->db_freed_in_flight = FALSE;
1386 dbuf_set_data(db, buf);
1387 db->db_state = DB_CACHED;
1388 DTRACE_SET_STATE(db, "freed in flight");
1389 } else {
1390 /* success */
1391 ASSERT(zio == NULL || zio->io_error == 0);
1392 dbuf_set_data(db, buf);
1393 db->db_state = DB_CACHED;
1394 DTRACE_SET_STATE(db, "successful read");
1396 cv_broadcast(&db->db_changed);
1397 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1401 * Shortcut for performing reads on bonus dbufs. Returns
1402 * an error if we fail to verify the dnode associated with
1403 * a decrypted block. Otherwise success.
1405 static int
1406 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1408 int bonuslen, max_bonuslen, err;
1410 err = dbuf_read_verify_dnode_crypt(db, flags);
1411 if (err)
1412 return (err);
1414 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1415 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1416 ASSERT(MUTEX_HELD(&db->db_mtx));
1417 ASSERT(DB_DNODE_HELD(db));
1418 ASSERT3U(bonuslen, <=, db->db.db_size);
1419 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1420 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1421 if (bonuslen < max_bonuslen)
1422 memset(db->db.db_data, 0, max_bonuslen);
1423 if (bonuslen)
1424 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1425 db->db_state = DB_CACHED;
1426 DTRACE_SET_STATE(db, "bonus buffer filled");
1427 return (0);
1430 static void
1431 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1433 blkptr_t *bps = db->db.db_data;
1434 uint32_t indbs = 1ULL << dn->dn_indblkshift;
1435 int n_bps = indbs >> SPA_BLKPTRSHIFT;
1437 for (int i = 0; i < n_bps; i++) {
1438 blkptr_t *bp = &bps[i];
1440 ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1441 BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1442 dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1443 BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1444 BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1445 BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
1450 * Handle reads on dbufs that are holes, if necessary. This function
1451 * requires that the dbuf's mutex is held. Returns success (0) if action
1452 * was taken, ENOENT if no action was taken.
1454 static int
1455 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1457 ASSERT(MUTEX_HELD(&db->db_mtx));
1459 int is_hole = bp == NULL || BP_IS_HOLE(bp);
1461 * For level 0 blocks only, if the above check fails:
1462 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1463 * processes the delete record and clears the bp while we are waiting
1464 * for the dn_mtx (resulting in a "no" from block_freed).
1466 if (!is_hole && db->db_level == 0)
1467 is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1469 if (is_hole) {
1470 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1471 memset(db->db.db_data, 0, db->db.db_size);
1473 if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1474 bp->blk_birth != 0) {
1475 dbuf_handle_indirect_hole(db, dn, bp);
1477 db->db_state = DB_CACHED;
1478 DTRACE_SET_STATE(db, "hole read satisfied");
1479 return (0);
1481 return (ENOENT);
1485 * This function ensures that, when doing a decrypting read of a block,
1486 * we make sure we have decrypted the dnode associated with it. We must do
1487 * this so that we ensure we are fully authenticating the checksum-of-MACs
1488 * tree from the root of the objset down to this block. Indirect blocks are
1489 * always verified against their secure checksum-of-MACs assuming that the
1490 * dnode containing them is correct. Now that we are doing a decrypting read,
1491 * we can be sure that the key is loaded and verify that assumption. This is
1492 * especially important considering that we always read encrypted dnode
1493 * blocks as raw data (without verifying their MACs) to start, and
1494 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1496 static int
1497 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1499 int err = 0;
1500 objset_t *os = db->db_objset;
1501 arc_buf_t *dnode_abuf;
1502 dnode_t *dn;
1503 zbookmark_phys_t zb;
1505 ASSERT(MUTEX_HELD(&db->db_mtx));
1507 if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1508 !os->os_encrypted || os->os_raw_receive)
1509 return (0);
1511 DB_DNODE_ENTER(db);
1512 dn = DB_DNODE(db);
1513 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1515 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1516 DB_DNODE_EXIT(db);
1517 return (0);
1520 SET_BOOKMARK(&zb, dmu_objset_id(os),
1521 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1522 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1525 * An error code of EACCES tells us that the key is still not
1526 * available. This is ok if we are only reading authenticated
1527 * (and therefore non-encrypted) blocks.
1529 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1530 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1531 (db->db_blkid == DMU_BONUS_BLKID &&
1532 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1533 err = 0;
1535 DB_DNODE_EXIT(db);
1537 return (err);
1541 * Drops db_mtx and the parent lock specified by dblt and tag before
1542 * returning.
1544 static int
1545 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1546 db_lock_type_t dblt, const void *tag)
1548 dnode_t *dn;
1549 zbookmark_phys_t zb;
1550 uint32_t aflags = ARC_FLAG_NOWAIT;
1551 int err, zio_flags;
1552 blkptr_t bp, *bpp;
1554 DB_DNODE_ENTER(db);
1555 dn = DB_DNODE(db);
1556 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1557 ASSERT(MUTEX_HELD(&db->db_mtx));
1558 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1559 ASSERT(db->db_buf == NULL);
1560 ASSERT(db->db_parent == NULL ||
1561 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1563 if (db->db_blkid == DMU_BONUS_BLKID) {
1564 err = dbuf_read_bonus(db, dn, flags);
1565 goto early_unlock;
1568 if (db->db_state == DB_UNCACHED) {
1569 if (db->db_blkptr == NULL) {
1570 bpp = NULL;
1571 } else {
1572 bp = *db->db_blkptr;
1573 bpp = &bp;
1575 } else {
1576 dbuf_dirty_record_t *dr;
1578 ASSERT3S(db->db_state, ==, DB_NOFILL);
1581 * Block cloning: If we have a pending block clone,
1582 * we don't want to read the underlying block, but the content
1583 * of the block being cloned, so we have the most recent data.
1585 dr = list_head(&db->db_dirty_records);
1586 if (dr == NULL || !dr->dt.dl.dr_brtwrite) {
1587 err = EIO;
1588 goto early_unlock;
1590 bp = dr->dt.dl.dr_overridden_by;
1591 bpp = &bp;
1594 err = dbuf_read_hole(db, dn, bpp);
1595 if (err == 0)
1596 goto early_unlock;
1598 ASSERT(bpp != NULL);
1601 * Any attempt to read a redacted block should result in an error. This
1602 * will never happen under normal conditions, but can be useful for
1603 * debugging purposes.
1605 if (BP_IS_REDACTED(bpp)) {
1606 ASSERT(dsl_dataset_feature_is_active(
1607 db->db_objset->os_dsl_dataset,
1608 SPA_FEATURE_REDACTED_DATASETS));
1609 err = SET_ERROR(EIO);
1610 goto early_unlock;
1613 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1614 db->db.db_object, db->db_level, db->db_blkid);
1617 * All bps of an encrypted os should have the encryption bit set.
1618 * If this is not true it indicates tampering and we report an error.
1620 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1621 spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
1622 zfs_panic_recover("unencrypted block in encrypted "
1623 "object set %llu", dmu_objset_id(db->db_objset));
1624 err = SET_ERROR(EIO);
1625 goto early_unlock;
1628 err = dbuf_read_verify_dnode_crypt(db, flags);
1629 if (err != 0)
1630 goto early_unlock;
1632 DB_DNODE_EXIT(db);
1634 db->db_state = DB_READ;
1635 DTRACE_SET_STATE(db, "read issued");
1636 mutex_exit(&db->db_mtx);
1638 if (!DBUF_IS_CACHEABLE(db))
1639 aflags |= ARC_FLAG_UNCACHED;
1640 else if (dbuf_is_l2cacheable(db))
1641 aflags |= ARC_FLAG_L2CACHE;
1643 dbuf_add_ref(db, NULL);
1645 zio_flags = (flags & DB_RF_CANFAIL) ?
1646 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1648 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1649 zio_flags |= ZIO_FLAG_RAW;
1651 * The zio layer will copy the provided blkptr later, but we have our
1652 * own copy so that we can release the parent's rwlock. We have to
1653 * do that so that if dbuf_read_done is called synchronously (on
1654 * an l1 cache hit) we don't acquire the db_mtx while holding the
1655 * parent's rwlock, which would be a lock ordering violation.
1657 dmu_buf_unlock_parent(db, dblt, tag);
1658 (void) arc_read(zio, db->db_objset->os_spa, bpp,
1659 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1660 &aflags, &zb);
1661 return (err);
1662 early_unlock:
1663 DB_DNODE_EXIT(db);
1664 mutex_exit(&db->db_mtx);
1665 dmu_buf_unlock_parent(db, dblt, tag);
1666 return (err);
1670 * This is our just-in-time copy function. It makes a copy of buffers that
1671 * have been modified in a previous transaction group before we access them in
1672 * the current active group.
1674 * This function is used in three places: when we are dirtying a buffer for the
1675 * first time in a txg, when we are freeing a range in a dnode that includes
1676 * this buffer, and when we are accessing a buffer which was received compressed
1677 * and later referenced in a WRITE_BYREF record.
1679 * Note that when we are called from dbuf_free_range() we do not put a hold on
1680 * the buffer, we just traverse the active dbuf list for the dnode.
1682 static void
1683 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1685 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1687 ASSERT(MUTEX_HELD(&db->db_mtx));
1688 ASSERT(db->db.db_data != NULL);
1689 ASSERT(db->db_level == 0);
1690 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1692 if (dr == NULL ||
1693 (dr->dt.dl.dr_data !=
1694 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1695 return;
1698 * If the last dirty record for this dbuf has not yet synced
1699 * and its referencing the dbuf data, either:
1700 * reset the reference to point to a new copy,
1701 * or (if there a no active holders)
1702 * just null out the current db_data pointer.
1704 ASSERT3U(dr->dr_txg, >=, txg - 2);
1705 if (db->db_blkid == DMU_BONUS_BLKID) {
1706 dnode_t *dn = DB_DNODE(db);
1707 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1708 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1709 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1710 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1711 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1712 dnode_t *dn = DB_DNODE(db);
1713 int size = arc_buf_size(db->db_buf);
1714 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1715 spa_t *spa = db->db_objset->os_spa;
1716 enum zio_compress compress_type =
1717 arc_get_compression(db->db_buf);
1718 uint8_t complevel = arc_get_complevel(db->db_buf);
1720 if (arc_is_encrypted(db->db_buf)) {
1721 boolean_t byteorder;
1722 uint8_t salt[ZIO_DATA_SALT_LEN];
1723 uint8_t iv[ZIO_DATA_IV_LEN];
1724 uint8_t mac[ZIO_DATA_MAC_LEN];
1726 arc_get_raw_params(db->db_buf, &byteorder, salt,
1727 iv, mac);
1728 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1729 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1730 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1731 compress_type, complevel);
1732 } else if (compress_type != ZIO_COMPRESS_OFF) {
1733 ASSERT3U(type, ==, ARC_BUFC_DATA);
1734 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1735 size, arc_buf_lsize(db->db_buf), compress_type,
1736 complevel);
1737 } else {
1738 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1740 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1741 } else {
1742 db->db_buf = NULL;
1743 dbuf_clear_data(db);
1748 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1750 int err = 0;
1751 boolean_t prefetch;
1752 dnode_t *dn;
1755 * We don't have to hold the mutex to check db_state because it
1756 * can't be freed while we have a hold on the buffer.
1758 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1760 DB_DNODE_ENTER(db);
1761 dn = DB_DNODE(db);
1763 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1764 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
1766 mutex_enter(&db->db_mtx);
1767 if (flags & DB_RF_PARTIAL_FIRST)
1768 db->db_partial_read = B_TRUE;
1769 else if (!(flags & DB_RF_PARTIAL_MORE))
1770 db->db_partial_read = B_FALSE;
1771 if (db->db_state == DB_CACHED) {
1773 * Ensure that this block's dnode has been decrypted if
1774 * the caller has requested decrypted data.
1776 err = dbuf_read_verify_dnode_crypt(db, flags);
1779 * If the arc buf is compressed or encrypted and the caller
1780 * requested uncompressed data, we need to untransform it
1781 * before returning. We also call arc_untransform() on any
1782 * unauthenticated blocks, which will verify their MAC if
1783 * the key is now available.
1785 if (err == 0 && db->db_buf != NULL &&
1786 (flags & DB_RF_NO_DECRYPT) == 0 &&
1787 (arc_is_encrypted(db->db_buf) ||
1788 arc_is_unauthenticated(db->db_buf) ||
1789 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1790 spa_t *spa = dn->dn_objset->os_spa;
1791 zbookmark_phys_t zb;
1793 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1794 db->db.db_object, db->db_level, db->db_blkid);
1795 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1796 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1797 dbuf_set_data(db, db->db_buf);
1799 mutex_exit(&db->db_mtx);
1800 if (err == 0 && prefetch) {
1801 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1802 B_FALSE, flags & DB_RF_HAVESTRUCT);
1804 DB_DNODE_EXIT(db);
1805 DBUF_STAT_BUMP(hash_hits);
1806 } else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
1807 boolean_t need_wait = B_FALSE;
1809 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1811 if (zio == NULL && (db->db_state == DB_NOFILL ||
1812 (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1813 spa_t *spa = dn->dn_objset->os_spa;
1814 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1815 need_wait = B_TRUE;
1817 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1819 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1820 * for us
1822 if (!err && prefetch) {
1823 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1824 db->db_state != DB_CACHED,
1825 flags & DB_RF_HAVESTRUCT);
1828 DB_DNODE_EXIT(db);
1829 DBUF_STAT_BUMP(hash_misses);
1832 * If we created a zio_root we must execute it to avoid
1833 * leaking it, even if it isn't attached to any work due
1834 * to an error in dbuf_read_impl().
1836 if (need_wait) {
1837 if (err == 0)
1838 err = zio_wait(zio);
1839 else
1840 VERIFY0(zio_wait(zio));
1842 } else {
1844 * Another reader came in while the dbuf was in flight
1845 * between UNCACHED and CACHED. Either a writer will finish
1846 * writing the buffer (sending the dbuf to CACHED) or the
1847 * first reader's request will reach the read_done callback
1848 * and send the dbuf to CACHED. Otherwise, a failure
1849 * occurred and the dbuf went to UNCACHED.
1851 mutex_exit(&db->db_mtx);
1852 if (prefetch) {
1853 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1854 B_TRUE, flags & DB_RF_HAVESTRUCT);
1856 DB_DNODE_EXIT(db);
1857 DBUF_STAT_BUMP(hash_misses);
1859 /* Skip the wait per the caller's request. */
1860 if ((flags & DB_RF_NEVERWAIT) == 0) {
1861 mutex_enter(&db->db_mtx);
1862 while (db->db_state == DB_READ ||
1863 db->db_state == DB_FILL) {
1864 ASSERT(db->db_state == DB_READ ||
1865 (flags & DB_RF_HAVESTRUCT) == 0);
1866 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1867 db, zio_t *, zio);
1868 cv_wait(&db->db_changed, &db->db_mtx);
1870 if (db->db_state == DB_UNCACHED)
1871 err = SET_ERROR(EIO);
1872 mutex_exit(&db->db_mtx);
1876 return (err);
1879 static void
1880 dbuf_noread(dmu_buf_impl_t *db)
1882 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1883 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1884 mutex_enter(&db->db_mtx);
1885 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1886 cv_wait(&db->db_changed, &db->db_mtx);
1887 if (db->db_state == DB_UNCACHED) {
1888 ASSERT(db->db_buf == NULL);
1889 ASSERT(db->db.db_data == NULL);
1890 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1891 db->db_state = DB_FILL;
1892 DTRACE_SET_STATE(db, "assigning filled buffer");
1893 } else if (db->db_state == DB_NOFILL) {
1894 dbuf_clear_data(db);
1895 } else {
1896 ASSERT3U(db->db_state, ==, DB_CACHED);
1898 mutex_exit(&db->db_mtx);
1901 void
1902 dbuf_unoverride(dbuf_dirty_record_t *dr)
1904 dmu_buf_impl_t *db = dr->dr_dbuf;
1905 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1906 uint64_t txg = dr->dr_txg;
1907 boolean_t release;
1909 ASSERT(MUTEX_HELD(&db->db_mtx));
1911 * This assert is valid because dmu_sync() expects to be called by
1912 * a zilog's get_data while holding a range lock. This call only
1913 * comes from dbuf_dirty() callers who must also hold a range lock.
1915 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1916 ASSERT(db->db_level == 0);
1918 if (db->db_blkid == DMU_BONUS_BLKID ||
1919 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1920 return;
1922 ASSERT(db->db_data_pending != dr);
1924 /* free this block */
1925 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1926 zio_free(db->db_objset->os_spa, txg, bp);
1928 release = !dr->dt.dl.dr_brtwrite;
1929 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1930 dr->dt.dl.dr_nopwrite = B_FALSE;
1931 dr->dt.dl.dr_brtwrite = B_FALSE;
1932 dr->dt.dl.dr_has_raw_params = B_FALSE;
1935 * Release the already-written buffer, so we leave it in
1936 * a consistent dirty state. Note that all callers are
1937 * modifying the buffer, so they will immediately do
1938 * another (redundant) arc_release(). Therefore, leave
1939 * the buf thawed to save the effort of freezing &
1940 * immediately re-thawing it.
1942 if (release)
1943 arc_release(dr->dt.dl.dr_data, db);
1947 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1948 * data blocks in the free range, so that any future readers will find
1949 * empty blocks.
1951 void
1952 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1953 dmu_tx_t *tx)
1955 dmu_buf_impl_t *db_search;
1956 dmu_buf_impl_t *db, *db_next;
1957 uint64_t txg = tx->tx_txg;
1958 avl_index_t where;
1959 dbuf_dirty_record_t *dr;
1961 if (end_blkid > dn->dn_maxblkid &&
1962 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1963 end_blkid = dn->dn_maxblkid;
1964 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1965 (u_longlong_t)end_blkid);
1967 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1968 db_search->db_level = 0;
1969 db_search->db_blkid = start_blkid;
1970 db_search->db_state = DB_SEARCH;
1972 mutex_enter(&dn->dn_dbufs_mtx);
1973 db = avl_find(&dn->dn_dbufs, db_search, &where);
1974 ASSERT3P(db, ==, NULL);
1976 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1978 for (; db != NULL; db = db_next) {
1979 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1980 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1982 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1983 break;
1985 ASSERT3U(db->db_blkid, >=, start_blkid);
1987 /* found a level 0 buffer in the range */
1988 mutex_enter(&db->db_mtx);
1989 if (dbuf_undirty(db, tx)) {
1990 /* mutex has been dropped and dbuf destroyed */
1991 continue;
1994 if (db->db_state == DB_UNCACHED ||
1995 db->db_state == DB_NOFILL ||
1996 db->db_state == DB_EVICTING) {
1997 ASSERT(db->db.db_data == NULL);
1998 mutex_exit(&db->db_mtx);
1999 continue;
2001 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2002 /* will be handled in dbuf_read_done or dbuf_rele */
2003 db->db_freed_in_flight = TRUE;
2004 mutex_exit(&db->db_mtx);
2005 continue;
2007 if (zfs_refcount_count(&db->db_holds) == 0) {
2008 ASSERT(db->db_buf);
2009 dbuf_destroy(db);
2010 continue;
2012 /* The dbuf is referenced */
2014 dr = list_head(&db->db_dirty_records);
2015 if (dr != NULL) {
2016 if (dr->dr_txg == txg) {
2018 * This buffer is "in-use", re-adjust the file
2019 * size to reflect that this buffer may
2020 * contain new data when we sync.
2022 if (db->db_blkid != DMU_SPILL_BLKID &&
2023 db->db_blkid > dn->dn_maxblkid)
2024 dn->dn_maxblkid = db->db_blkid;
2025 dbuf_unoverride(dr);
2026 } else {
2028 * This dbuf is not dirty in the open context.
2029 * Either uncache it (if its not referenced in
2030 * the open context) or reset its contents to
2031 * empty.
2033 dbuf_fix_old_data(db, txg);
2036 /* clear the contents if its cached */
2037 if (db->db_state == DB_CACHED) {
2038 ASSERT(db->db.db_data != NULL);
2039 arc_release(db->db_buf, db);
2040 rw_enter(&db->db_rwlock, RW_WRITER);
2041 memset(db->db.db_data, 0, db->db.db_size);
2042 rw_exit(&db->db_rwlock);
2043 arc_buf_freeze(db->db_buf);
2046 mutex_exit(&db->db_mtx);
2049 mutex_exit(&dn->dn_dbufs_mtx);
2050 kmem_free(db_search, sizeof (dmu_buf_impl_t));
2053 void
2054 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2056 arc_buf_t *buf, *old_buf;
2057 dbuf_dirty_record_t *dr;
2058 int osize = db->db.db_size;
2059 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2060 dnode_t *dn;
2062 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2064 DB_DNODE_ENTER(db);
2065 dn = DB_DNODE(db);
2068 * XXX we should be doing a dbuf_read, checking the return
2069 * value and returning that up to our callers
2071 dmu_buf_will_dirty(&db->db, tx);
2073 /* create the data buffer for the new block */
2074 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2076 /* copy old block data to the new block */
2077 old_buf = db->db_buf;
2078 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2079 /* zero the remainder */
2080 if (size > osize)
2081 memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2083 mutex_enter(&db->db_mtx);
2084 dbuf_set_data(db, buf);
2085 arc_buf_destroy(old_buf, db);
2086 db->db.db_size = size;
2088 dr = list_head(&db->db_dirty_records);
2089 /* dirty record added by dmu_buf_will_dirty() */
2090 VERIFY(dr != NULL);
2091 if (db->db_level == 0)
2092 dr->dt.dl.dr_data = buf;
2093 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2094 ASSERT3U(dr->dr_accounted, ==, osize);
2095 dr->dr_accounted = size;
2096 mutex_exit(&db->db_mtx);
2098 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2099 DB_DNODE_EXIT(db);
2102 void
2103 dbuf_release_bp(dmu_buf_impl_t *db)
2105 objset_t *os __maybe_unused = db->db_objset;
2107 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2108 ASSERT(arc_released(os->os_phys_buf) ||
2109 list_link_active(&os->os_dsl_dataset->ds_synced_link));
2110 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2112 (void) arc_release(db->db_buf, db);
2116 * We already have a dirty record for this TXG, and we are being
2117 * dirtied again.
2119 static void
2120 dbuf_redirty(dbuf_dirty_record_t *dr)
2122 dmu_buf_impl_t *db = dr->dr_dbuf;
2124 ASSERT(MUTEX_HELD(&db->db_mtx));
2126 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2128 * If this buffer has already been written out,
2129 * we now need to reset its state.
2131 dbuf_unoverride(dr);
2132 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2133 db->db_state != DB_NOFILL) {
2134 /* Already released on initial dirty, so just thaw. */
2135 ASSERT(arc_released(db->db_buf));
2136 arc_buf_thaw(db->db_buf);
2141 dbuf_dirty_record_t *
2142 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2144 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2145 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2146 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2147 ASSERT(dn->dn_maxblkid >= blkid);
2149 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2150 list_link_init(&dr->dr_dirty_node);
2151 list_link_init(&dr->dr_dbuf_node);
2152 dr->dr_dnode = dn;
2153 dr->dr_txg = tx->tx_txg;
2154 dr->dt.dll.dr_blkid = blkid;
2155 dr->dr_accounted = dn->dn_datablksz;
2158 * There should not be any dbuf for the block that we're dirtying.
2159 * Otherwise the buffer contents could be inconsistent between the
2160 * dbuf and the lightweight dirty record.
2162 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2163 NULL));
2165 mutex_enter(&dn->dn_mtx);
2166 int txgoff = tx->tx_txg & TXG_MASK;
2167 if (dn->dn_free_ranges[txgoff] != NULL) {
2168 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2171 if (dn->dn_nlevels == 1) {
2172 ASSERT3U(blkid, <, dn->dn_nblkptr);
2173 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2174 mutex_exit(&dn->dn_mtx);
2175 rw_exit(&dn->dn_struct_rwlock);
2176 dnode_setdirty(dn, tx);
2177 } else {
2178 mutex_exit(&dn->dn_mtx);
2180 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2181 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2182 1, blkid >> epbs, FTAG);
2183 rw_exit(&dn->dn_struct_rwlock);
2184 if (parent_db == NULL) {
2185 kmem_free(dr, sizeof (*dr));
2186 return (NULL);
2188 int err = dbuf_read(parent_db, NULL,
2189 (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2190 if (err != 0) {
2191 dbuf_rele(parent_db, FTAG);
2192 kmem_free(dr, sizeof (*dr));
2193 return (NULL);
2196 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2197 dbuf_rele(parent_db, FTAG);
2198 mutex_enter(&parent_dr->dt.di.dr_mtx);
2199 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2200 list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2201 mutex_exit(&parent_dr->dt.di.dr_mtx);
2202 dr->dr_parent = parent_dr;
2205 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2207 return (dr);
2210 dbuf_dirty_record_t *
2211 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2213 dnode_t *dn;
2214 objset_t *os;
2215 dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2216 int txgoff = tx->tx_txg & TXG_MASK;
2217 boolean_t drop_struct_rwlock = B_FALSE;
2219 ASSERT(tx->tx_txg != 0);
2220 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2221 DMU_TX_DIRTY_BUF(tx, db);
2223 DB_DNODE_ENTER(db);
2224 dn = DB_DNODE(db);
2226 * Shouldn't dirty a regular buffer in syncing context. Private
2227 * objects may be dirtied in syncing context, but only if they
2228 * were already pre-dirtied in open context.
2230 #ifdef ZFS_DEBUG
2231 if (dn->dn_objset->os_dsl_dataset != NULL) {
2232 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2233 RW_READER, FTAG);
2235 ASSERT(!dmu_tx_is_syncing(tx) ||
2236 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2237 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2238 dn->dn_objset->os_dsl_dataset == NULL);
2239 if (dn->dn_objset->os_dsl_dataset != NULL)
2240 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2241 #endif
2243 * We make this assert for private objects as well, but after we
2244 * check if we're already dirty. They are allowed to re-dirty
2245 * in syncing context.
2247 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2248 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2249 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2251 mutex_enter(&db->db_mtx);
2253 * XXX make this true for indirects too? The problem is that
2254 * transactions created with dmu_tx_create_assigned() from
2255 * syncing context don't bother holding ahead.
2257 ASSERT(db->db_level != 0 ||
2258 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2259 db->db_state == DB_NOFILL);
2261 mutex_enter(&dn->dn_mtx);
2262 dnode_set_dirtyctx(dn, tx, db);
2263 if (tx->tx_txg > dn->dn_dirty_txg)
2264 dn->dn_dirty_txg = tx->tx_txg;
2265 mutex_exit(&dn->dn_mtx);
2267 if (db->db_blkid == DMU_SPILL_BLKID)
2268 dn->dn_have_spill = B_TRUE;
2271 * If this buffer is already dirty, we're done.
2273 dr_head = list_head(&db->db_dirty_records);
2274 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2275 db->db.db_object == DMU_META_DNODE_OBJECT);
2276 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2277 if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2278 DB_DNODE_EXIT(db);
2280 dbuf_redirty(dr_next);
2281 mutex_exit(&db->db_mtx);
2282 return (dr_next);
2286 * Only valid if not already dirty.
2288 ASSERT(dn->dn_object == 0 ||
2289 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2290 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2292 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2295 * We should only be dirtying in syncing context if it's the
2296 * mos or we're initializing the os or it's a special object.
2297 * However, we are allowed to dirty in syncing context provided
2298 * we already dirtied it in open context. Hence we must make
2299 * this assertion only if we're not already dirty.
2301 os = dn->dn_objset;
2302 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2303 #ifdef ZFS_DEBUG
2304 if (dn->dn_objset->os_dsl_dataset != NULL)
2305 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2306 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2307 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2308 if (dn->dn_objset->os_dsl_dataset != NULL)
2309 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2310 #endif
2311 ASSERT(db->db.db_size != 0);
2313 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2315 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2316 dmu_objset_willuse_space(os, db->db.db_size, tx);
2320 * If this buffer is dirty in an old transaction group we need
2321 * to make a copy of it so that the changes we make in this
2322 * transaction group won't leak out when we sync the older txg.
2324 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2325 list_link_init(&dr->dr_dirty_node);
2326 list_link_init(&dr->dr_dbuf_node);
2327 dr->dr_dnode = dn;
2328 if (db->db_level == 0) {
2329 void *data_old = db->db_buf;
2331 if (db->db_state != DB_NOFILL) {
2332 if (db->db_blkid == DMU_BONUS_BLKID) {
2333 dbuf_fix_old_data(db, tx->tx_txg);
2334 data_old = db->db.db_data;
2335 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2337 * Release the data buffer from the cache so
2338 * that we can modify it without impacting
2339 * possible other users of this cached data
2340 * block. Note that indirect blocks and
2341 * private objects are not released until the
2342 * syncing state (since they are only modified
2343 * then).
2345 arc_release(db->db_buf, db);
2346 dbuf_fix_old_data(db, tx->tx_txg);
2347 data_old = db->db_buf;
2349 ASSERT(data_old != NULL);
2351 dr->dt.dl.dr_data = data_old;
2352 } else {
2353 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2354 list_create(&dr->dt.di.dr_children,
2355 sizeof (dbuf_dirty_record_t),
2356 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2358 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2359 dr->dr_accounted = db->db.db_size;
2361 dr->dr_dbuf = db;
2362 dr->dr_txg = tx->tx_txg;
2363 list_insert_before(&db->db_dirty_records, dr_next, dr);
2366 * We could have been freed_in_flight between the dbuf_noread
2367 * and dbuf_dirty. We win, as though the dbuf_noread() had
2368 * happened after the free.
2370 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2371 db->db_blkid != DMU_SPILL_BLKID) {
2372 mutex_enter(&dn->dn_mtx);
2373 if (dn->dn_free_ranges[txgoff] != NULL) {
2374 range_tree_clear(dn->dn_free_ranges[txgoff],
2375 db->db_blkid, 1);
2377 mutex_exit(&dn->dn_mtx);
2378 db->db_freed_in_flight = FALSE;
2382 * This buffer is now part of this txg
2384 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2385 db->db_dirtycnt += 1;
2386 ASSERT3U(db->db_dirtycnt, <=, 3);
2388 mutex_exit(&db->db_mtx);
2390 if (db->db_blkid == DMU_BONUS_BLKID ||
2391 db->db_blkid == DMU_SPILL_BLKID) {
2392 mutex_enter(&dn->dn_mtx);
2393 ASSERT(!list_link_active(&dr->dr_dirty_node));
2394 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2395 mutex_exit(&dn->dn_mtx);
2396 dnode_setdirty(dn, tx);
2397 DB_DNODE_EXIT(db);
2398 return (dr);
2401 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2402 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2403 drop_struct_rwlock = B_TRUE;
2407 * If we are overwriting a dedup BP, then unless it is snapshotted,
2408 * when we get to syncing context we will need to decrement its
2409 * refcount in the DDT. Prefetch the relevant DDT block so that
2410 * syncing context won't have to wait for the i/o.
2412 if (db->db_blkptr != NULL) {
2413 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2414 ddt_prefetch(os->os_spa, db->db_blkptr);
2415 dmu_buf_unlock_parent(db, dblt, FTAG);
2419 * We need to hold the dn_struct_rwlock to make this assertion,
2420 * because it protects dn_phys / dn_next_nlevels from changing.
2422 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2423 dn->dn_phys->dn_nlevels > db->db_level ||
2424 dn->dn_next_nlevels[txgoff] > db->db_level ||
2425 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2426 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2429 if (db->db_level == 0) {
2430 ASSERT(!db->db_objset->os_raw_receive ||
2431 dn->dn_maxblkid >= db->db_blkid);
2432 dnode_new_blkid(dn, db->db_blkid, tx,
2433 drop_struct_rwlock, B_FALSE);
2434 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2437 if (db->db_level+1 < dn->dn_nlevels) {
2438 dmu_buf_impl_t *parent = db->db_parent;
2439 dbuf_dirty_record_t *di;
2440 int parent_held = FALSE;
2442 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2443 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2444 parent = dbuf_hold_level(dn, db->db_level + 1,
2445 db->db_blkid >> epbs, FTAG);
2446 ASSERT(parent != NULL);
2447 parent_held = TRUE;
2449 if (drop_struct_rwlock)
2450 rw_exit(&dn->dn_struct_rwlock);
2451 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2452 di = dbuf_dirty(parent, tx);
2453 if (parent_held)
2454 dbuf_rele(parent, FTAG);
2456 mutex_enter(&db->db_mtx);
2458 * Since we've dropped the mutex, it's possible that
2459 * dbuf_undirty() might have changed this out from under us.
2461 if (list_head(&db->db_dirty_records) == dr ||
2462 dn->dn_object == DMU_META_DNODE_OBJECT) {
2463 mutex_enter(&di->dt.di.dr_mtx);
2464 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2465 ASSERT(!list_link_active(&dr->dr_dirty_node));
2466 list_insert_tail(&di->dt.di.dr_children, dr);
2467 mutex_exit(&di->dt.di.dr_mtx);
2468 dr->dr_parent = di;
2470 mutex_exit(&db->db_mtx);
2471 } else {
2472 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2473 ASSERT(db->db_blkid < dn->dn_nblkptr);
2474 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2475 mutex_enter(&dn->dn_mtx);
2476 ASSERT(!list_link_active(&dr->dr_dirty_node));
2477 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2478 mutex_exit(&dn->dn_mtx);
2479 if (drop_struct_rwlock)
2480 rw_exit(&dn->dn_struct_rwlock);
2483 dnode_setdirty(dn, tx);
2484 DB_DNODE_EXIT(db);
2485 return (dr);
2488 static void
2489 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2491 dmu_buf_impl_t *db = dr->dr_dbuf;
2493 if (dr->dt.dl.dr_data != db->db.db_data) {
2494 struct dnode *dn = dr->dr_dnode;
2495 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2497 kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2498 arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2500 db->db_data_pending = NULL;
2501 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2502 list_remove(&db->db_dirty_records, dr);
2503 if (dr->dr_dbuf->db_level != 0) {
2504 mutex_destroy(&dr->dt.di.dr_mtx);
2505 list_destroy(&dr->dt.di.dr_children);
2507 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2508 ASSERT3U(db->db_dirtycnt, >, 0);
2509 db->db_dirtycnt -= 1;
2513 * Undirty a buffer in the transaction group referenced by the given
2514 * transaction. Return whether this evicted the dbuf.
2516 boolean_t
2517 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2519 uint64_t txg = tx->tx_txg;
2520 boolean_t brtwrite;
2522 ASSERT(txg != 0);
2525 * Due to our use of dn_nlevels below, this can only be called
2526 * in open context, unless we are operating on the MOS.
2527 * From syncing context, dn_nlevels may be different from the
2528 * dn_nlevels used when dbuf was dirtied.
2530 ASSERT(db->db_objset ==
2531 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2532 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2533 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2534 ASSERT0(db->db_level);
2535 ASSERT(MUTEX_HELD(&db->db_mtx));
2538 * If this buffer is not dirty, we're done.
2540 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2541 if (dr == NULL)
2542 return (B_FALSE);
2543 ASSERT(dr->dr_dbuf == db);
2545 brtwrite = dr->dt.dl.dr_brtwrite;
2546 if (brtwrite) {
2548 * We are freeing a block that we cloned in the same
2549 * transaction group.
2551 brt_pending_remove(dmu_objset_spa(db->db_objset),
2552 &dr->dt.dl.dr_overridden_by, tx);
2555 dnode_t *dn = dr->dr_dnode;
2557 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2559 ASSERT(db->db.db_size != 0);
2561 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2562 dr->dr_accounted, txg);
2564 list_remove(&db->db_dirty_records, dr);
2567 * Note that there are three places in dbuf_dirty()
2568 * where this dirty record may be put on a list.
2569 * Make sure to do a list_remove corresponding to
2570 * every one of those list_insert calls.
2572 if (dr->dr_parent) {
2573 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2574 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2575 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2576 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2577 db->db_level + 1 == dn->dn_nlevels) {
2578 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2579 mutex_enter(&dn->dn_mtx);
2580 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2581 mutex_exit(&dn->dn_mtx);
2584 if (db->db_state != DB_NOFILL && !brtwrite) {
2585 dbuf_unoverride(dr);
2587 ASSERT(db->db_buf != NULL);
2588 ASSERT(dr->dt.dl.dr_data != NULL);
2589 if (dr->dt.dl.dr_data != db->db_buf)
2590 arc_buf_destroy(dr->dt.dl.dr_data, db);
2593 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2595 ASSERT(db->db_dirtycnt > 0);
2596 db->db_dirtycnt -= 1;
2598 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2599 ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2600 arc_released(db->db_buf));
2601 dbuf_destroy(db);
2602 return (B_TRUE);
2605 return (B_FALSE);
2608 static void
2609 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2611 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2612 boolean_t undirty = B_FALSE;
2614 ASSERT(tx->tx_txg != 0);
2615 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2618 * Quick check for dirtiness. For already dirty blocks, this
2619 * reduces runtime of this function by >90%, and overall performance
2620 * by 50% for some workloads (e.g. file deletion with indirect blocks
2621 * cached).
2623 mutex_enter(&db->db_mtx);
2625 if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2626 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2628 * It's possible that it is already dirty but not cached,
2629 * because there are some calls to dbuf_dirty() that don't
2630 * go through dmu_buf_will_dirty().
2632 if (dr != NULL) {
2633 if (dr->dt.dl.dr_brtwrite) {
2635 * Block cloning: If we are dirtying a cloned
2636 * block, we cannot simply redirty it, because
2637 * this dr has no data associated with it.
2638 * We will go through a full undirtying below,
2639 * before dirtying it again.
2641 undirty = B_TRUE;
2642 } else {
2643 /* This dbuf is already dirty and cached. */
2644 dbuf_redirty(dr);
2645 mutex_exit(&db->db_mtx);
2646 return;
2650 mutex_exit(&db->db_mtx);
2652 DB_DNODE_ENTER(db);
2653 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2654 flags |= DB_RF_HAVESTRUCT;
2655 DB_DNODE_EXIT(db);
2658 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2659 * want to make sure dbuf_read() will read the pending cloned block and
2660 * not the uderlying block that is being replaced. dbuf_undirty() will
2661 * do dbuf_unoverride(), so we will end up with cloned block content,
2662 * without overridden BP.
2664 (void) dbuf_read(db, NULL, flags);
2665 if (undirty) {
2666 mutex_enter(&db->db_mtx);
2667 VERIFY(!dbuf_undirty(db, tx));
2668 mutex_exit(&db->db_mtx);
2670 (void) dbuf_dirty(db, tx);
2673 void
2674 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2676 dmu_buf_will_dirty_impl(db_fake,
2677 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2680 boolean_t
2681 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2683 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2684 dbuf_dirty_record_t *dr;
2686 mutex_enter(&db->db_mtx);
2687 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2688 mutex_exit(&db->db_mtx);
2689 return (dr != NULL);
2692 void
2693 dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
2695 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2698 * Block cloning: We are going to clone into this block, so undirty
2699 * modifications done to this block so far in this txg. This includes
2700 * writes and clones into this block.
2702 mutex_enter(&db->db_mtx);
2703 DBUF_VERIFY(db);
2704 VERIFY(!dbuf_undirty(db, tx));
2705 ASSERT3P(dbuf_find_dirty_eq(db, tx->tx_txg), ==, NULL);
2706 if (db->db_buf != NULL) {
2707 arc_buf_destroy(db->db_buf, db);
2708 db->db_buf = NULL;
2709 dbuf_clear_data(db);
2712 db->db_state = DB_NOFILL;
2713 DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone");
2715 DBUF_VERIFY(db);
2716 mutex_exit(&db->db_mtx);
2718 dbuf_noread(db);
2719 (void) dbuf_dirty(db, tx);
2722 void
2723 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2725 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2727 mutex_enter(&db->db_mtx);
2728 db->db_state = DB_NOFILL;
2729 DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2730 mutex_exit(&db->db_mtx);
2732 dbuf_noread(db);
2733 (void) dbuf_dirty(db, tx);
2736 void
2737 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2739 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2741 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2742 ASSERT(tx->tx_txg != 0);
2743 ASSERT(db->db_level == 0);
2744 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2746 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2747 dmu_tx_private_ok(tx));
2749 mutex_enter(&db->db_mtx);
2750 if (db->db_state == DB_NOFILL) {
2752 * Block cloning: We will be completely overwriting a block
2753 * cloned in this transaction group, so let's undirty the
2754 * pending clone and mark the block as uncached. This will be
2755 * as if the clone was never done.
2757 VERIFY(!dbuf_undirty(db, tx));
2758 db->db_state = DB_UNCACHED;
2760 mutex_exit(&db->db_mtx);
2762 dbuf_noread(db);
2763 (void) dbuf_dirty(db, tx);
2767 * This function is effectively the same as dmu_buf_will_dirty(), but
2768 * indicates the caller expects raw encrypted data in the db, and provides
2769 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2770 * blkptr_t when this dbuf is written. This is only used for blocks of
2771 * dnodes, during raw receive.
2773 void
2774 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2775 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2777 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2778 dbuf_dirty_record_t *dr;
2781 * dr_has_raw_params is only processed for blocks of dnodes
2782 * (see dbuf_sync_dnode_leaf_crypt()).
2784 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2785 ASSERT3U(db->db_level, ==, 0);
2786 ASSERT(db->db_objset->os_raw_receive);
2788 dmu_buf_will_dirty_impl(db_fake,
2789 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2791 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2793 ASSERT3P(dr, !=, NULL);
2795 dr->dt.dl.dr_has_raw_params = B_TRUE;
2796 dr->dt.dl.dr_byteorder = byteorder;
2797 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2798 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2799 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2802 static void
2803 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2805 struct dirty_leaf *dl;
2806 dbuf_dirty_record_t *dr;
2808 dr = list_head(&db->db_dirty_records);
2809 ASSERT3P(dr, !=, NULL);
2810 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2811 dl = &dr->dt.dl;
2812 dl->dr_overridden_by = *bp;
2813 dl->dr_override_state = DR_OVERRIDDEN;
2814 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2817 void
2818 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2820 (void) tx;
2821 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2822 dbuf_states_t old_state;
2823 mutex_enter(&db->db_mtx);
2824 DBUF_VERIFY(db);
2826 old_state = db->db_state;
2827 db->db_state = DB_CACHED;
2828 if (old_state == DB_FILL) {
2829 if (db->db_level == 0 && db->db_freed_in_flight) {
2830 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2831 /* we were freed while filling */
2832 /* XXX dbuf_undirty? */
2833 memset(db->db.db_data, 0, db->db.db_size);
2834 db->db_freed_in_flight = FALSE;
2835 DTRACE_SET_STATE(db,
2836 "fill done handling freed in flight");
2837 } else {
2838 DTRACE_SET_STATE(db, "fill done");
2840 cv_broadcast(&db->db_changed);
2842 mutex_exit(&db->db_mtx);
2845 void
2846 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2847 bp_embedded_type_t etype, enum zio_compress comp,
2848 int uncompressed_size, int compressed_size, int byteorder,
2849 dmu_tx_t *tx)
2851 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2852 struct dirty_leaf *dl;
2853 dmu_object_type_t type;
2854 dbuf_dirty_record_t *dr;
2856 if (etype == BP_EMBEDDED_TYPE_DATA) {
2857 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2858 SPA_FEATURE_EMBEDDED_DATA));
2861 DB_DNODE_ENTER(db);
2862 type = DB_DNODE(db)->dn_type;
2863 DB_DNODE_EXIT(db);
2865 ASSERT0(db->db_level);
2866 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2868 dmu_buf_will_not_fill(dbuf, tx);
2870 dr = list_head(&db->db_dirty_records);
2871 ASSERT3P(dr, !=, NULL);
2872 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2873 dl = &dr->dt.dl;
2874 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2875 data, comp, uncompressed_size, compressed_size);
2876 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2877 BP_SET_TYPE(&dl->dr_overridden_by, type);
2878 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2879 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2881 dl->dr_override_state = DR_OVERRIDDEN;
2882 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2885 void
2886 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2888 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2889 dmu_object_type_t type;
2890 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2891 SPA_FEATURE_REDACTED_DATASETS));
2893 DB_DNODE_ENTER(db);
2894 type = DB_DNODE(db)->dn_type;
2895 DB_DNODE_EXIT(db);
2897 ASSERT0(db->db_level);
2898 dmu_buf_will_not_fill(dbuf, tx);
2900 blkptr_t bp = { { { {0} } } };
2901 BP_SET_TYPE(&bp, type);
2902 BP_SET_LEVEL(&bp, 0);
2903 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2904 BP_SET_REDACTED(&bp);
2905 BPE_SET_LSIZE(&bp, dbuf->db_size);
2907 dbuf_override_impl(db, &bp, tx);
2911 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2912 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2914 void
2915 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2917 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2918 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2919 ASSERT(db->db_level == 0);
2920 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2921 ASSERT(buf != NULL);
2922 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2923 ASSERT(tx->tx_txg != 0);
2925 arc_return_buf(buf, db);
2926 ASSERT(arc_released(buf));
2928 mutex_enter(&db->db_mtx);
2930 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2931 cv_wait(&db->db_changed, &db->db_mtx);
2933 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2935 if (db->db_state == DB_CACHED &&
2936 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2938 * In practice, we will never have a case where we have an
2939 * encrypted arc buffer while additional holds exist on the
2940 * dbuf. We don't handle this here so we simply assert that
2941 * fact instead.
2943 ASSERT(!arc_is_encrypted(buf));
2944 mutex_exit(&db->db_mtx);
2945 (void) dbuf_dirty(db, tx);
2946 memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2947 arc_buf_destroy(buf, db);
2948 return;
2951 if (db->db_state == DB_CACHED) {
2952 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2954 ASSERT(db->db_buf != NULL);
2955 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2956 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2958 if (!arc_released(db->db_buf)) {
2959 ASSERT(dr->dt.dl.dr_override_state ==
2960 DR_OVERRIDDEN);
2961 arc_release(db->db_buf, db);
2963 dr->dt.dl.dr_data = buf;
2964 arc_buf_destroy(db->db_buf, db);
2965 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2966 arc_release(db->db_buf, db);
2967 arc_buf_destroy(db->db_buf, db);
2969 db->db_buf = NULL;
2971 ASSERT(db->db_buf == NULL);
2972 dbuf_set_data(db, buf);
2973 db->db_state = DB_FILL;
2974 DTRACE_SET_STATE(db, "filling assigned arcbuf");
2975 mutex_exit(&db->db_mtx);
2976 (void) dbuf_dirty(db, tx);
2977 dmu_buf_fill_done(&db->db, tx);
2980 void
2981 dbuf_destroy(dmu_buf_impl_t *db)
2983 dnode_t *dn;
2984 dmu_buf_impl_t *parent = db->db_parent;
2985 dmu_buf_impl_t *dndb;
2987 ASSERT(MUTEX_HELD(&db->db_mtx));
2988 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2990 if (db->db_buf != NULL) {
2991 arc_buf_destroy(db->db_buf, db);
2992 db->db_buf = NULL;
2995 if (db->db_blkid == DMU_BONUS_BLKID) {
2996 int slots = DB_DNODE(db)->dn_num_slots;
2997 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2998 if (db->db.db_data != NULL) {
2999 kmem_free(db->db.db_data, bonuslen);
3000 arc_space_return(bonuslen, ARC_SPACE_BONUS);
3001 db->db_state = DB_UNCACHED;
3002 DTRACE_SET_STATE(db, "buffer cleared");
3006 dbuf_clear_data(db);
3008 if (multilist_link_active(&db->db_cache_link)) {
3009 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3010 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3012 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3013 (void) zfs_refcount_remove_many(
3014 &dbuf_caches[db->db_caching_status].size,
3015 db->db.db_size, db);
3017 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3018 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3019 } else {
3020 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3021 DBUF_STAT_BUMPDOWN(cache_count);
3022 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3023 db->db.db_size);
3025 db->db_caching_status = DB_NO_CACHE;
3028 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3029 ASSERT(db->db_data_pending == NULL);
3030 ASSERT(list_is_empty(&db->db_dirty_records));
3032 db->db_state = DB_EVICTING;
3033 DTRACE_SET_STATE(db, "buffer eviction started");
3034 db->db_blkptr = NULL;
3037 * Now that db_state is DB_EVICTING, nobody else can find this via
3038 * the hash table. We can now drop db_mtx, which allows us to
3039 * acquire the dn_dbufs_mtx.
3041 mutex_exit(&db->db_mtx);
3043 DB_DNODE_ENTER(db);
3044 dn = DB_DNODE(db);
3045 dndb = dn->dn_dbuf;
3046 if (db->db_blkid != DMU_BONUS_BLKID) {
3047 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3048 if (needlock)
3049 mutex_enter_nested(&dn->dn_dbufs_mtx,
3050 NESTED_SINGLE);
3051 avl_remove(&dn->dn_dbufs, db);
3052 membar_producer();
3053 DB_DNODE_EXIT(db);
3054 if (needlock)
3055 mutex_exit(&dn->dn_dbufs_mtx);
3057 * Decrementing the dbuf count means that the hold corresponding
3058 * to the removed dbuf is no longer discounted in dnode_move(),
3059 * so the dnode cannot be moved until after we release the hold.
3060 * The membar_producer() ensures visibility of the decremented
3061 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3062 * release any lock.
3064 mutex_enter(&dn->dn_mtx);
3065 dnode_rele_and_unlock(dn, db, B_TRUE);
3066 db->db_dnode_handle = NULL;
3068 dbuf_hash_remove(db);
3069 } else {
3070 DB_DNODE_EXIT(db);
3073 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3075 db->db_parent = NULL;
3077 ASSERT(db->db_buf == NULL);
3078 ASSERT(db->db.db_data == NULL);
3079 ASSERT(db->db_hash_next == NULL);
3080 ASSERT(db->db_blkptr == NULL);
3081 ASSERT(db->db_data_pending == NULL);
3082 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3083 ASSERT(!multilist_link_active(&db->db_cache_link));
3086 * If this dbuf is referenced from an indirect dbuf,
3087 * decrement the ref count on the indirect dbuf.
3089 if (parent && parent != dndb) {
3090 mutex_enter(&parent->db_mtx);
3091 dbuf_rele_and_unlock(parent, db, B_TRUE);
3094 kmem_cache_free(dbuf_kmem_cache, db);
3095 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3099 * Note: While bpp will always be updated if the function returns success,
3100 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3101 * this happens when the dnode is the meta-dnode, or {user|group|project}used
3102 * object.
3104 __attribute__((always_inline))
3105 static inline int
3106 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3107 dmu_buf_impl_t **parentp, blkptr_t **bpp)
3109 *parentp = NULL;
3110 *bpp = NULL;
3112 ASSERT(blkid != DMU_BONUS_BLKID);
3114 if (blkid == DMU_SPILL_BLKID) {
3115 mutex_enter(&dn->dn_mtx);
3116 if (dn->dn_have_spill &&
3117 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3118 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3119 else
3120 *bpp = NULL;
3121 dbuf_add_ref(dn->dn_dbuf, NULL);
3122 *parentp = dn->dn_dbuf;
3123 mutex_exit(&dn->dn_mtx);
3124 return (0);
3127 int nlevels =
3128 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3129 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3131 ASSERT3U(level * epbs, <, 64);
3132 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3134 * This assertion shouldn't trip as long as the max indirect block size
3135 * is less than 1M. The reason for this is that up to that point,
3136 * the number of levels required to address an entire object with blocks
3137 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
3138 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3139 * (i.e. we can address the entire object), objects will all use at most
3140 * N-1 levels and the assertion won't overflow. However, once epbs is
3141 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
3142 * enough to address an entire object, so objects will have 5 levels,
3143 * but then this assertion will overflow.
3145 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3146 * need to redo this logic to handle overflows.
3148 ASSERT(level >= nlevels ||
3149 ((nlevels - level - 1) * epbs) +
3150 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3151 if (level >= nlevels ||
3152 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3153 ((nlevels - level - 1) * epbs)) ||
3154 (fail_sparse &&
3155 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3156 /* the buffer has no parent yet */
3157 return (SET_ERROR(ENOENT));
3158 } else if (level < nlevels-1) {
3159 /* this block is referenced from an indirect block */
3160 int err;
3162 err = dbuf_hold_impl(dn, level + 1,
3163 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3165 if (err)
3166 return (err);
3167 err = dbuf_read(*parentp, NULL,
3168 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3169 if (err) {
3170 dbuf_rele(*parentp, NULL);
3171 *parentp = NULL;
3172 return (err);
3174 rw_enter(&(*parentp)->db_rwlock, RW_READER);
3175 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3176 (blkid & ((1ULL << epbs) - 1));
3177 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3178 ASSERT(BP_IS_HOLE(*bpp));
3179 rw_exit(&(*parentp)->db_rwlock);
3180 return (0);
3181 } else {
3182 /* the block is referenced from the dnode */
3183 ASSERT3U(level, ==, nlevels-1);
3184 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3185 blkid < dn->dn_phys->dn_nblkptr);
3186 if (dn->dn_dbuf) {
3187 dbuf_add_ref(dn->dn_dbuf, NULL);
3188 *parentp = dn->dn_dbuf;
3190 *bpp = &dn->dn_phys->dn_blkptr[blkid];
3191 return (0);
3195 static dmu_buf_impl_t *
3196 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3197 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3199 objset_t *os = dn->dn_objset;
3200 dmu_buf_impl_t *db, *odb;
3202 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3203 ASSERT(dn->dn_type != DMU_OT_NONE);
3205 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3207 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3208 offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3210 db->db_objset = os;
3211 db->db.db_object = dn->dn_object;
3212 db->db_level = level;
3213 db->db_blkid = blkid;
3214 db->db_dirtycnt = 0;
3215 db->db_dnode_handle = dn->dn_handle;
3216 db->db_parent = parent;
3217 db->db_blkptr = blkptr;
3218 db->db_hash = hash;
3220 db->db_user = NULL;
3221 db->db_user_immediate_evict = FALSE;
3222 db->db_freed_in_flight = FALSE;
3223 db->db_pending_evict = FALSE;
3225 if (blkid == DMU_BONUS_BLKID) {
3226 ASSERT3P(parent, ==, dn->dn_dbuf);
3227 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3228 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3229 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3230 db->db.db_offset = DMU_BONUS_BLKID;
3231 db->db_state = DB_UNCACHED;
3232 DTRACE_SET_STATE(db, "bonus buffer created");
3233 db->db_caching_status = DB_NO_CACHE;
3234 /* the bonus dbuf is not placed in the hash table */
3235 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3236 return (db);
3237 } else if (blkid == DMU_SPILL_BLKID) {
3238 db->db.db_size = (blkptr != NULL) ?
3239 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3240 db->db.db_offset = 0;
3241 } else {
3242 int blocksize =
3243 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3244 db->db.db_size = blocksize;
3245 db->db.db_offset = db->db_blkid * blocksize;
3249 * Hold the dn_dbufs_mtx while we get the new dbuf
3250 * in the hash table *and* added to the dbufs list.
3251 * This prevents a possible deadlock with someone
3252 * trying to look up this dbuf before it's added to the
3253 * dn_dbufs list.
3255 mutex_enter(&dn->dn_dbufs_mtx);
3256 db->db_state = DB_EVICTING; /* not worth logging this state change */
3257 if ((odb = dbuf_hash_insert(db)) != NULL) {
3258 /* someone else inserted it first */
3259 mutex_exit(&dn->dn_dbufs_mtx);
3260 kmem_cache_free(dbuf_kmem_cache, db);
3261 DBUF_STAT_BUMP(hash_insert_race);
3262 return (odb);
3264 avl_add(&dn->dn_dbufs, db);
3266 db->db_state = DB_UNCACHED;
3267 DTRACE_SET_STATE(db, "regular buffer created");
3268 db->db_caching_status = DB_NO_CACHE;
3269 mutex_exit(&dn->dn_dbufs_mtx);
3270 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3272 if (parent && parent != dn->dn_dbuf)
3273 dbuf_add_ref(parent, db);
3275 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3276 zfs_refcount_count(&dn->dn_holds) > 0);
3277 (void) zfs_refcount_add(&dn->dn_holds, db);
3279 dprintf_dbuf(db, "db=%p\n", db);
3281 return (db);
3285 * This function returns a block pointer and information about the object,
3286 * given a dnode and a block. This is a publicly accessible version of
3287 * dbuf_findbp that only returns some information, rather than the
3288 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
3289 * should be locked as (at least) a reader.
3292 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3293 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3295 dmu_buf_impl_t *dbp = NULL;
3296 blkptr_t *bp2;
3297 int err = 0;
3298 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3300 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3301 if (err == 0) {
3302 ASSERT3P(bp2, !=, NULL);
3303 *bp = *bp2;
3304 if (dbp != NULL)
3305 dbuf_rele(dbp, NULL);
3306 if (datablkszsec != NULL)
3307 *datablkszsec = dn->dn_phys->dn_datablkszsec;
3308 if (indblkshift != NULL)
3309 *indblkshift = dn->dn_phys->dn_indblkshift;
3312 return (err);
3315 typedef struct dbuf_prefetch_arg {
3316 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
3317 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3318 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3319 int dpa_curlevel; /* The current level that we're reading */
3320 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3321 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3322 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3323 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3324 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3325 void *dpa_arg; /* prefetch completion arg */
3326 } dbuf_prefetch_arg_t;
3328 static void
3329 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3331 if (dpa->dpa_cb != NULL) {
3332 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3333 dpa->dpa_zb.zb_blkid, io_done);
3335 kmem_free(dpa, sizeof (*dpa));
3338 static void
3339 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3340 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3342 (void) zio, (void) zb, (void) iobp;
3343 dbuf_prefetch_arg_t *dpa = private;
3345 if (abuf != NULL)
3346 arc_buf_destroy(abuf, private);
3348 dbuf_prefetch_fini(dpa, B_TRUE);
3352 * Actually issue the prefetch read for the block given.
3354 static void
3355 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3357 ASSERT(!BP_IS_REDACTED(bp) ||
3358 dsl_dataset_feature_is_active(
3359 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3360 SPA_FEATURE_REDACTED_DATASETS));
3362 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3363 return (dbuf_prefetch_fini(dpa, B_FALSE));
3365 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3366 arc_flags_t aflags =
3367 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3368 ARC_FLAG_NO_BUF;
3370 /* dnodes are always read as raw and then converted later */
3371 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3372 dpa->dpa_curlevel == 0)
3373 zio_flags |= ZIO_FLAG_RAW;
3375 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3376 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3377 ASSERT(dpa->dpa_zio != NULL);
3378 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3379 dbuf_issue_final_prefetch_done, dpa,
3380 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3384 * Called when an indirect block above our prefetch target is read in. This
3385 * will either read in the next indirect block down the tree or issue the actual
3386 * prefetch if the next block down is our target.
3388 static void
3389 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3390 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3392 (void) zb, (void) iobp;
3393 dbuf_prefetch_arg_t *dpa = private;
3395 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3396 ASSERT3S(dpa->dpa_curlevel, >, 0);
3398 if (abuf == NULL) {
3399 ASSERT(zio == NULL || zio->io_error != 0);
3400 dbuf_prefetch_fini(dpa, B_TRUE);
3401 return;
3403 ASSERT(zio == NULL || zio->io_error == 0);
3406 * The dpa_dnode is only valid if we are called with a NULL
3407 * zio. This indicates that the arc_read() returned without
3408 * first calling zio_read() to issue a physical read. Once
3409 * a physical read is made the dpa_dnode must be invalidated
3410 * as the locks guarding it may have been dropped. If the
3411 * dpa_dnode is still valid, then we want to add it to the dbuf
3412 * cache. To do so, we must hold the dbuf associated with the block
3413 * we just prefetched, read its contents so that we associate it
3414 * with an arc_buf_t, and then release it.
3416 if (zio != NULL) {
3417 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3418 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3419 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3420 } else {
3421 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3423 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3425 dpa->dpa_dnode = NULL;
3426 } else if (dpa->dpa_dnode != NULL) {
3427 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3428 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3429 dpa->dpa_zb.zb_level));
3430 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3431 dpa->dpa_curlevel, curblkid, FTAG);
3432 if (db == NULL) {
3433 arc_buf_destroy(abuf, private);
3434 dbuf_prefetch_fini(dpa, B_TRUE);
3435 return;
3437 (void) dbuf_read(db, NULL,
3438 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3439 dbuf_rele(db, FTAG);
3442 dpa->dpa_curlevel--;
3443 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3444 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3445 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3446 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3448 ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3449 dsl_dataset_feature_is_active(
3450 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3451 SPA_FEATURE_REDACTED_DATASETS)));
3452 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3453 arc_buf_destroy(abuf, private);
3454 dbuf_prefetch_fini(dpa, B_TRUE);
3455 return;
3456 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3457 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3458 dbuf_issue_final_prefetch(dpa, bp);
3459 } else {
3460 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3461 zbookmark_phys_t zb;
3463 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3464 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3465 iter_aflags |= ARC_FLAG_L2CACHE;
3467 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3469 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3470 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3472 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3473 bp, dbuf_prefetch_indirect_done, dpa,
3474 ZIO_PRIORITY_SYNC_READ,
3475 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3476 &iter_aflags, &zb);
3479 arc_buf_destroy(abuf, private);
3483 * Issue prefetch reads for the given block on the given level. If the indirect
3484 * blocks above that block are not in memory, we will read them in
3485 * asynchronously. As a result, this call never blocks waiting for a read to
3486 * complete. Note that the prefetch might fail if the dataset is encrypted and
3487 * the encryption key is unmapped before the IO completes.
3490 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3491 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3492 void *arg)
3494 blkptr_t bp;
3495 int epbs, nlevels, curlevel;
3496 uint64_t curblkid;
3498 ASSERT(blkid != DMU_BONUS_BLKID);
3499 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3501 if (blkid > dn->dn_maxblkid)
3502 goto no_issue;
3504 if (level == 0 && dnode_block_freed(dn, blkid))
3505 goto no_issue;
3508 * This dnode hasn't been written to disk yet, so there's nothing to
3509 * prefetch.
3511 nlevels = dn->dn_phys->dn_nlevels;
3512 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3513 goto no_issue;
3515 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3516 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3517 goto no_issue;
3519 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3520 level, blkid, NULL);
3521 if (db != NULL) {
3522 mutex_exit(&db->db_mtx);
3524 * This dbuf already exists. It is either CACHED, or
3525 * (we assume) about to be read or filled.
3527 goto no_issue;
3531 * Find the closest ancestor (indirect block) of the target block
3532 * that is present in the cache. In this indirect block, we will
3533 * find the bp that is at curlevel, curblkid.
3535 curlevel = level;
3536 curblkid = blkid;
3537 while (curlevel < nlevels - 1) {
3538 int parent_level = curlevel + 1;
3539 uint64_t parent_blkid = curblkid >> epbs;
3540 dmu_buf_impl_t *db;
3542 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3543 FALSE, TRUE, FTAG, &db) == 0) {
3544 blkptr_t *bpp = db->db_buf->b_data;
3545 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3546 dbuf_rele(db, FTAG);
3547 break;
3550 curlevel = parent_level;
3551 curblkid = parent_blkid;
3554 if (curlevel == nlevels - 1) {
3555 /* No cached indirect blocks found. */
3556 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3557 bp = dn->dn_phys->dn_blkptr[curblkid];
3559 ASSERT(!BP_IS_REDACTED(&bp) ||
3560 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3561 SPA_FEATURE_REDACTED_DATASETS));
3562 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3563 goto no_issue;
3565 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3567 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3568 ZIO_FLAG_CANFAIL);
3570 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3571 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3572 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3573 dn->dn_object, level, blkid);
3574 dpa->dpa_curlevel = curlevel;
3575 dpa->dpa_prio = prio;
3576 dpa->dpa_aflags = aflags;
3577 dpa->dpa_spa = dn->dn_objset->os_spa;
3578 dpa->dpa_dnode = dn;
3579 dpa->dpa_epbs = epbs;
3580 dpa->dpa_zio = pio;
3581 dpa->dpa_cb = cb;
3582 dpa->dpa_arg = arg;
3584 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3585 dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3586 else if (dnode_level_is_l2cacheable(&bp, dn, level))
3587 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3590 * If we have the indirect just above us, no need to do the asynchronous
3591 * prefetch chain; we'll just run the last step ourselves. If we're at
3592 * a higher level, though, we want to issue the prefetches for all the
3593 * indirect blocks asynchronously, so we can go on with whatever we were
3594 * doing.
3596 if (curlevel == level) {
3597 ASSERT3U(curblkid, ==, blkid);
3598 dbuf_issue_final_prefetch(dpa, &bp);
3599 } else {
3600 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3601 zbookmark_phys_t zb;
3603 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3604 if (dnode_level_is_l2cacheable(&bp, dn, level))
3605 iter_aflags |= ARC_FLAG_L2CACHE;
3607 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3608 dn->dn_object, curlevel, curblkid);
3609 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3610 &bp, dbuf_prefetch_indirect_done, dpa,
3611 ZIO_PRIORITY_SYNC_READ,
3612 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3613 &iter_aflags, &zb);
3616 * We use pio here instead of dpa_zio since it's possible that
3617 * dpa may have already been freed.
3619 zio_nowait(pio);
3620 return (1);
3621 no_issue:
3622 if (cb != NULL)
3623 cb(arg, level, blkid, B_FALSE);
3624 return (0);
3628 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3629 arc_flags_t aflags)
3632 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3636 * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3637 * the case of encrypted, compressed and uncompressed buffers by
3638 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3639 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3641 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3643 noinline static void
3644 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3646 dbuf_dirty_record_t *dr = db->db_data_pending;
3647 arc_buf_t *data = dr->dt.dl.dr_data;
3648 enum zio_compress compress_type = arc_get_compression(data);
3649 uint8_t complevel = arc_get_complevel(data);
3651 if (arc_is_encrypted(data)) {
3652 boolean_t byteorder;
3653 uint8_t salt[ZIO_DATA_SALT_LEN];
3654 uint8_t iv[ZIO_DATA_IV_LEN];
3655 uint8_t mac[ZIO_DATA_MAC_LEN];
3657 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3658 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3659 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3660 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3661 compress_type, complevel));
3662 } else if (compress_type != ZIO_COMPRESS_OFF) {
3663 dbuf_set_data(db, arc_alloc_compressed_buf(
3664 dn->dn_objset->os_spa, db, arc_buf_size(data),
3665 arc_buf_lsize(data), compress_type, complevel));
3666 } else {
3667 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3668 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3671 rw_enter(&db->db_rwlock, RW_WRITER);
3672 memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3673 rw_exit(&db->db_rwlock);
3677 * Returns with db_holds incremented, and db_mtx not held.
3678 * Note: dn_struct_rwlock must be held.
3681 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3682 boolean_t fail_sparse, boolean_t fail_uncached,
3683 const void *tag, dmu_buf_impl_t **dbp)
3685 dmu_buf_impl_t *db, *parent = NULL;
3686 uint64_t hv;
3688 /* If the pool has been created, verify the tx_sync_lock is not held */
3689 spa_t *spa = dn->dn_objset->os_spa;
3690 dsl_pool_t *dp = spa->spa_dsl_pool;
3691 if (dp != NULL) {
3692 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3695 ASSERT(blkid != DMU_BONUS_BLKID);
3696 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3697 ASSERT3U(dn->dn_nlevels, >, level);
3699 *dbp = NULL;
3701 /* dbuf_find() returns with db_mtx held */
3702 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3704 if (db == NULL) {
3705 blkptr_t *bp = NULL;
3706 int err;
3708 if (fail_uncached)
3709 return (SET_ERROR(ENOENT));
3711 ASSERT3P(parent, ==, NULL);
3712 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3713 if (fail_sparse) {
3714 if (err == 0 && bp && BP_IS_HOLE(bp))
3715 err = SET_ERROR(ENOENT);
3716 if (err) {
3717 if (parent)
3718 dbuf_rele(parent, NULL);
3719 return (err);
3722 if (err && err != ENOENT)
3723 return (err);
3724 db = dbuf_create(dn, level, blkid, parent, bp, hv);
3727 if (fail_uncached && db->db_state != DB_CACHED) {
3728 mutex_exit(&db->db_mtx);
3729 return (SET_ERROR(ENOENT));
3732 if (db->db_buf != NULL) {
3733 arc_buf_access(db->db_buf);
3734 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3737 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3740 * If this buffer is currently syncing out, and we are
3741 * still referencing it from db_data, we need to make a copy
3742 * of it in case we decide we want to dirty it again in this txg.
3744 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3745 dn->dn_object != DMU_META_DNODE_OBJECT &&
3746 db->db_state == DB_CACHED && db->db_data_pending) {
3747 dbuf_dirty_record_t *dr = db->db_data_pending;
3748 if (dr->dt.dl.dr_data == db->db_buf) {
3749 ASSERT3P(db->db_buf, !=, NULL);
3750 dbuf_hold_copy(dn, db);
3754 if (multilist_link_active(&db->db_cache_link)) {
3755 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3756 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3757 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3759 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3760 (void) zfs_refcount_remove_many(
3761 &dbuf_caches[db->db_caching_status].size,
3762 db->db.db_size, db);
3764 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3765 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3766 } else {
3767 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3768 DBUF_STAT_BUMPDOWN(cache_count);
3769 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3770 db->db.db_size);
3772 db->db_caching_status = DB_NO_CACHE;
3774 (void) zfs_refcount_add(&db->db_holds, tag);
3775 DBUF_VERIFY(db);
3776 mutex_exit(&db->db_mtx);
3778 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3779 if (parent)
3780 dbuf_rele(parent, NULL);
3782 ASSERT3P(DB_DNODE(db), ==, dn);
3783 ASSERT3U(db->db_blkid, ==, blkid);
3784 ASSERT3U(db->db_level, ==, level);
3785 *dbp = db;
3787 return (0);
3790 dmu_buf_impl_t *
3791 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3793 return (dbuf_hold_level(dn, 0, blkid, tag));
3796 dmu_buf_impl_t *
3797 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3799 dmu_buf_impl_t *db;
3800 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3801 return (err ? NULL : db);
3804 void
3805 dbuf_create_bonus(dnode_t *dn)
3807 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3809 ASSERT(dn->dn_bonus == NULL);
3810 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3811 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3815 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3817 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3819 if (db->db_blkid != DMU_SPILL_BLKID)
3820 return (SET_ERROR(ENOTSUP));
3821 if (blksz == 0)
3822 blksz = SPA_MINBLOCKSIZE;
3823 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3824 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3826 dbuf_new_size(db, blksz, tx);
3828 return (0);
3831 void
3832 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3834 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3837 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3838 void
3839 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3841 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3842 VERIFY3S(holds, >, 1);
3845 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3846 boolean_t
3847 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3848 const void *tag)
3850 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3851 dmu_buf_impl_t *found_db;
3852 boolean_t result = B_FALSE;
3854 if (blkid == DMU_BONUS_BLKID)
3855 found_db = dbuf_find_bonus(os, obj);
3856 else
3857 found_db = dbuf_find(os, obj, 0, blkid, NULL);
3859 if (found_db != NULL) {
3860 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3861 (void) zfs_refcount_add(&db->db_holds, tag);
3862 result = B_TRUE;
3864 mutex_exit(&found_db->db_mtx);
3866 return (result);
3870 * If you call dbuf_rele() you had better not be referencing the dnode handle
3871 * unless you have some other direct or indirect hold on the dnode. (An indirect
3872 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3873 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3874 * dnode's parent dbuf evicting its dnode handles.
3876 void
3877 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3879 mutex_enter(&db->db_mtx);
3880 dbuf_rele_and_unlock(db, tag, B_FALSE);
3883 void
3884 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3886 dbuf_rele((dmu_buf_impl_t *)db, tag);
3890 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3891 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3892 * argument should be set if we are already in the dbuf-evicting code
3893 * path, in which case we don't want to recursively evict. This allows us to
3894 * avoid deeply nested stacks that would have a call flow similar to this:
3896 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3897 * ^ |
3898 * | |
3899 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3902 void
3903 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3905 int64_t holds;
3906 uint64_t size;
3908 ASSERT(MUTEX_HELD(&db->db_mtx));
3909 DBUF_VERIFY(db);
3912 * Remove the reference to the dbuf before removing its hold on the
3913 * dnode so we can guarantee in dnode_move() that a referenced bonus
3914 * buffer has a corresponding dnode hold.
3916 holds = zfs_refcount_remove(&db->db_holds, tag);
3917 ASSERT(holds >= 0);
3920 * We can't freeze indirects if there is a possibility that they
3921 * may be modified in the current syncing context.
3923 if (db->db_buf != NULL &&
3924 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3925 arc_buf_freeze(db->db_buf);
3928 if (holds == db->db_dirtycnt &&
3929 db->db_level == 0 && db->db_user_immediate_evict)
3930 dbuf_evict_user(db);
3932 if (holds == 0) {
3933 if (db->db_blkid == DMU_BONUS_BLKID) {
3934 dnode_t *dn;
3935 boolean_t evict_dbuf = db->db_pending_evict;
3938 * If the dnode moves here, we cannot cross this
3939 * barrier until the move completes.
3941 DB_DNODE_ENTER(db);
3943 dn = DB_DNODE(db);
3944 atomic_dec_32(&dn->dn_dbufs_count);
3947 * Decrementing the dbuf count means that the bonus
3948 * buffer's dnode hold is no longer discounted in
3949 * dnode_move(). The dnode cannot move until after
3950 * the dnode_rele() below.
3952 DB_DNODE_EXIT(db);
3955 * Do not reference db after its lock is dropped.
3956 * Another thread may evict it.
3958 mutex_exit(&db->db_mtx);
3960 if (evict_dbuf)
3961 dnode_evict_bonus(dn);
3963 dnode_rele(dn, db);
3964 } else if (db->db_buf == NULL) {
3966 * This is a special case: we never associated this
3967 * dbuf with any data allocated from the ARC.
3969 ASSERT(db->db_state == DB_UNCACHED ||
3970 db->db_state == DB_NOFILL);
3971 dbuf_destroy(db);
3972 } else if (arc_released(db->db_buf)) {
3974 * This dbuf has anonymous data associated with it.
3976 dbuf_destroy(db);
3977 } else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
3978 db->db_pending_evict) {
3979 dbuf_destroy(db);
3980 } else if (!multilist_link_active(&db->db_cache_link)) {
3981 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3983 dbuf_cached_state_t dcs =
3984 dbuf_include_in_metadata_cache(db) ?
3985 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3986 db->db_caching_status = dcs;
3988 multilist_insert(&dbuf_caches[dcs].cache, db);
3989 uint64_t db_size = db->db.db_size;
3990 size = zfs_refcount_add_many(
3991 &dbuf_caches[dcs].size, db_size, db);
3992 uint8_t db_level = db->db_level;
3993 mutex_exit(&db->db_mtx);
3995 if (dcs == DB_DBUF_METADATA_CACHE) {
3996 DBUF_STAT_BUMP(metadata_cache_count);
3997 DBUF_STAT_MAX(metadata_cache_size_bytes_max,
3998 size);
3999 } else {
4000 DBUF_STAT_BUMP(cache_count);
4001 DBUF_STAT_MAX(cache_size_bytes_max, size);
4002 DBUF_STAT_BUMP(cache_levels[db_level]);
4003 DBUF_STAT_INCR(cache_levels_bytes[db_level],
4004 db_size);
4007 if (dcs == DB_DBUF_CACHE && !evicting)
4008 dbuf_evict_notify(size);
4010 } else {
4011 mutex_exit(&db->db_mtx);
4016 #pragma weak dmu_buf_refcount = dbuf_refcount
4017 uint64_t
4018 dbuf_refcount(dmu_buf_impl_t *db)
4020 return (zfs_refcount_count(&db->db_holds));
4023 uint64_t
4024 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4026 uint64_t holds;
4027 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4029 mutex_enter(&db->db_mtx);
4030 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4031 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4032 mutex_exit(&db->db_mtx);
4034 return (holds);
4037 void *
4038 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4039 dmu_buf_user_t *new_user)
4041 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4043 mutex_enter(&db->db_mtx);
4044 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4045 if (db->db_user == old_user)
4046 db->db_user = new_user;
4047 else
4048 old_user = db->db_user;
4049 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4050 mutex_exit(&db->db_mtx);
4052 return (old_user);
4055 void *
4056 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4058 return (dmu_buf_replace_user(db_fake, NULL, user));
4061 void *
4062 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4064 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4066 db->db_user_immediate_evict = TRUE;
4067 return (dmu_buf_set_user(db_fake, user));
4070 void *
4071 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4073 return (dmu_buf_replace_user(db_fake, user, NULL));
4076 void *
4077 dmu_buf_get_user(dmu_buf_t *db_fake)
4079 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4081 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4082 return (db->db_user);
4085 void
4086 dmu_buf_user_evict_wait(void)
4088 taskq_wait(dbu_evict_taskq);
4091 blkptr_t *
4092 dmu_buf_get_blkptr(dmu_buf_t *db)
4094 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4095 return (dbi->db_blkptr);
4098 objset_t *
4099 dmu_buf_get_objset(dmu_buf_t *db)
4101 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4102 return (dbi->db_objset);
4105 dnode_t *
4106 dmu_buf_dnode_enter(dmu_buf_t *db)
4108 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4109 DB_DNODE_ENTER(dbi);
4110 return (DB_DNODE(dbi));
4113 void
4114 dmu_buf_dnode_exit(dmu_buf_t *db)
4116 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4117 DB_DNODE_EXIT(dbi);
4120 static void
4121 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4123 /* ASSERT(dmu_tx_is_syncing(tx) */
4124 ASSERT(MUTEX_HELD(&db->db_mtx));
4126 if (db->db_blkptr != NULL)
4127 return;
4129 if (db->db_blkid == DMU_SPILL_BLKID) {
4130 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4131 BP_ZERO(db->db_blkptr);
4132 return;
4134 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4136 * This buffer was allocated at a time when there was
4137 * no available blkptrs from the dnode, or it was
4138 * inappropriate to hook it in (i.e., nlevels mismatch).
4140 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4141 ASSERT(db->db_parent == NULL);
4142 db->db_parent = dn->dn_dbuf;
4143 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4144 DBUF_VERIFY(db);
4145 } else {
4146 dmu_buf_impl_t *parent = db->db_parent;
4147 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4149 ASSERT(dn->dn_phys->dn_nlevels > 1);
4150 if (parent == NULL) {
4151 mutex_exit(&db->db_mtx);
4152 rw_enter(&dn->dn_struct_rwlock, RW_READER);
4153 parent = dbuf_hold_level(dn, db->db_level + 1,
4154 db->db_blkid >> epbs, db);
4155 rw_exit(&dn->dn_struct_rwlock);
4156 mutex_enter(&db->db_mtx);
4157 db->db_parent = parent;
4159 db->db_blkptr = (blkptr_t *)parent->db.db_data +
4160 (db->db_blkid & ((1ULL << epbs) - 1));
4161 DBUF_VERIFY(db);
4165 static void
4166 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4168 dmu_buf_impl_t *db = dr->dr_dbuf;
4169 void *data = dr->dt.dl.dr_data;
4171 ASSERT0(db->db_level);
4172 ASSERT(MUTEX_HELD(&db->db_mtx));
4173 ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4174 ASSERT(data != NULL);
4176 dnode_t *dn = dr->dr_dnode;
4177 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4178 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4179 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4181 dbuf_sync_leaf_verify_bonus_dnode(dr);
4183 dbuf_undirty_bonus(dr);
4184 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4188 * When syncing out a blocks of dnodes, adjust the block to deal with
4189 * encryption. Normally, we make sure the block is decrypted before writing
4190 * it. If we have crypt params, then we are writing a raw (encrypted) block,
4191 * from a raw receive. In this case, set the ARC buf's crypt params so
4192 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4194 static void
4195 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4197 int err;
4198 dmu_buf_impl_t *db = dr->dr_dbuf;
4200 ASSERT(MUTEX_HELD(&db->db_mtx));
4201 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4202 ASSERT3U(db->db_level, ==, 0);
4204 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4205 zbookmark_phys_t zb;
4208 * Unfortunately, there is currently no mechanism for
4209 * syncing context to handle decryption errors. An error
4210 * here is only possible if an attacker maliciously
4211 * changed a dnode block and updated the associated
4212 * checksums going up the block tree.
4214 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4215 db->db.db_object, db->db_level, db->db_blkid);
4216 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4217 &zb, B_TRUE);
4218 if (err)
4219 panic("Invalid dnode block MAC");
4220 } else if (dr->dt.dl.dr_has_raw_params) {
4221 (void) arc_release(dr->dt.dl.dr_data, db);
4222 arc_convert_to_raw(dr->dt.dl.dr_data,
4223 dmu_objset_id(db->db_objset),
4224 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4225 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4230 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4231 * is critical the we not allow the compiler to inline this function in to
4232 * dbuf_sync_list() thereby drastically bloating the stack usage.
4234 noinline static void
4235 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4237 dmu_buf_impl_t *db = dr->dr_dbuf;
4238 dnode_t *dn = dr->dr_dnode;
4240 ASSERT(dmu_tx_is_syncing(tx));
4242 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4244 mutex_enter(&db->db_mtx);
4246 ASSERT(db->db_level > 0);
4247 DBUF_VERIFY(db);
4249 /* Read the block if it hasn't been read yet. */
4250 if (db->db_buf == NULL) {
4251 mutex_exit(&db->db_mtx);
4252 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4253 mutex_enter(&db->db_mtx);
4255 ASSERT3U(db->db_state, ==, DB_CACHED);
4256 ASSERT(db->db_buf != NULL);
4258 /* Indirect block size must match what the dnode thinks it is. */
4259 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4260 dbuf_check_blkptr(dn, db);
4262 /* Provide the pending dirty record to child dbufs */
4263 db->db_data_pending = dr;
4265 mutex_exit(&db->db_mtx);
4267 dbuf_write(dr, db->db_buf, tx);
4269 zio_t *zio = dr->dr_zio;
4270 mutex_enter(&dr->dt.di.dr_mtx);
4271 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4272 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4273 mutex_exit(&dr->dt.di.dr_mtx);
4274 zio_nowait(zio);
4278 * Verify that the size of the data in our bonus buffer does not exceed
4279 * its recorded size.
4281 * The purpose of this verification is to catch any cases in development
4282 * where the size of a phys structure (i.e space_map_phys_t) grows and,
4283 * due to incorrect feature management, older pools expect to read more
4284 * data even though they didn't actually write it to begin with.
4286 * For a example, this would catch an error in the feature logic where we
4287 * open an older pool and we expect to write the space map histogram of
4288 * a space map with size SPACE_MAP_SIZE_V0.
4290 static void
4291 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4293 #ifdef ZFS_DEBUG
4294 dnode_t *dn = dr->dr_dnode;
4297 * Encrypted bonus buffers can have data past their bonuslen.
4298 * Skip the verification of these blocks.
4300 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4301 return;
4303 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4304 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4305 ASSERT3U(bonuslen, <=, maxbonuslen);
4307 arc_buf_t *datap = dr->dt.dl.dr_data;
4308 char *datap_end = ((char *)datap) + bonuslen;
4309 char *datap_max = ((char *)datap) + maxbonuslen;
4311 /* ensure that everything is zero after our data */
4312 for (; datap_end < datap_max; datap_end++)
4313 ASSERT(*datap_end == 0);
4314 #endif
4317 static blkptr_t *
4318 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4320 /* This must be a lightweight dirty record. */
4321 ASSERT3P(dr->dr_dbuf, ==, NULL);
4322 dnode_t *dn = dr->dr_dnode;
4324 if (dn->dn_phys->dn_nlevels == 1) {
4325 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4326 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4327 } else {
4328 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4329 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4330 VERIFY3U(parent_db->db_level, ==, 1);
4331 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4332 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4333 blkptr_t *bp = parent_db->db.db_data;
4334 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4338 static void
4339 dbuf_lightweight_ready(zio_t *zio)
4341 dbuf_dirty_record_t *dr = zio->io_private;
4342 blkptr_t *bp = zio->io_bp;
4344 if (zio->io_error != 0)
4345 return;
4347 dnode_t *dn = dr->dr_dnode;
4349 blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4350 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4351 int64_t delta = bp_get_dsize_sync(spa, bp) -
4352 bp_get_dsize_sync(spa, bp_orig);
4353 dnode_diduse_space(dn, delta);
4355 uint64_t blkid = dr->dt.dll.dr_blkid;
4356 mutex_enter(&dn->dn_mtx);
4357 if (blkid > dn->dn_phys->dn_maxblkid) {
4358 ASSERT0(dn->dn_objset->os_raw_receive);
4359 dn->dn_phys->dn_maxblkid = blkid;
4361 mutex_exit(&dn->dn_mtx);
4363 if (!BP_IS_EMBEDDED(bp)) {
4364 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4365 BP_SET_FILL(bp, fill);
4368 dmu_buf_impl_t *parent_db;
4369 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4370 if (dr->dr_parent == NULL) {
4371 parent_db = dn->dn_dbuf;
4372 } else {
4373 parent_db = dr->dr_parent->dr_dbuf;
4375 rw_enter(&parent_db->db_rwlock, RW_WRITER);
4376 *bp_orig = *bp;
4377 rw_exit(&parent_db->db_rwlock);
4380 static void
4381 dbuf_lightweight_done(zio_t *zio)
4383 dbuf_dirty_record_t *dr = zio->io_private;
4385 VERIFY0(zio->io_error);
4387 objset_t *os = dr->dr_dnode->dn_objset;
4388 dmu_tx_t *tx = os->os_synctx;
4390 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4391 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4392 } else {
4393 dsl_dataset_t *ds = os->os_dsl_dataset;
4394 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4395 dsl_dataset_block_born(ds, zio->io_bp, tx);
4398 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4399 zio->io_txg);
4401 abd_free(dr->dt.dll.dr_abd);
4402 kmem_free(dr, sizeof (*dr));
4405 noinline static void
4406 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4408 dnode_t *dn = dr->dr_dnode;
4409 zio_t *pio;
4410 if (dn->dn_phys->dn_nlevels == 1) {
4411 pio = dn->dn_zio;
4412 } else {
4413 pio = dr->dr_parent->dr_zio;
4416 zbookmark_phys_t zb = {
4417 .zb_objset = dmu_objset_id(dn->dn_objset),
4418 .zb_object = dn->dn_object,
4419 .zb_level = 0,
4420 .zb_blkid = dr->dt.dll.dr_blkid,
4424 * See comment in dbuf_write(). This is so that zio->io_bp_orig
4425 * will have the old BP in dbuf_lightweight_done().
4427 dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4429 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4430 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4431 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4432 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4433 dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
4434 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4436 zio_nowait(dr->dr_zio);
4440 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4441 * critical the we not allow the compiler to inline this function in to
4442 * dbuf_sync_list() thereby drastically bloating the stack usage.
4444 noinline static void
4445 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4447 arc_buf_t **datap = &dr->dt.dl.dr_data;
4448 dmu_buf_impl_t *db = dr->dr_dbuf;
4449 dnode_t *dn = dr->dr_dnode;
4450 objset_t *os;
4451 uint64_t txg = tx->tx_txg;
4453 ASSERT(dmu_tx_is_syncing(tx));
4455 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4457 mutex_enter(&db->db_mtx);
4459 * To be synced, we must be dirtied. But we
4460 * might have been freed after the dirty.
4462 if (db->db_state == DB_UNCACHED) {
4463 /* This buffer has been freed since it was dirtied */
4464 ASSERT(db->db.db_data == NULL);
4465 } else if (db->db_state == DB_FILL) {
4466 /* This buffer was freed and is now being re-filled */
4467 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4468 } else if (db->db_state == DB_READ) {
4470 * This buffer has a clone we need to write, and an in-flight
4471 * read on the BP we're about to clone. Its safe to issue the
4472 * write here because the read has already been issued and the
4473 * contents won't change.
4475 ASSERT(dr->dt.dl.dr_brtwrite &&
4476 dr->dt.dl.dr_override_state == DR_OVERRIDDEN);
4477 } else {
4478 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4480 DBUF_VERIFY(db);
4482 if (db->db_blkid == DMU_SPILL_BLKID) {
4483 mutex_enter(&dn->dn_mtx);
4484 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4486 * In the previous transaction group, the bonus buffer
4487 * was entirely used to store the attributes for the
4488 * dnode which overrode the dn_spill field. However,
4489 * when adding more attributes to the file a spill
4490 * block was required to hold the extra attributes.
4492 * Make sure to clear the garbage left in the dn_spill
4493 * field from the previous attributes in the bonus
4494 * buffer. Otherwise, after writing out the spill
4495 * block to the new allocated dva, it will free
4496 * the old block pointed to by the invalid dn_spill.
4498 db->db_blkptr = NULL;
4500 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4501 mutex_exit(&dn->dn_mtx);
4505 * If this is a bonus buffer, simply copy the bonus data into the
4506 * dnode. It will be written out when the dnode is synced (and it
4507 * will be synced, since it must have been dirty for dbuf_sync to
4508 * be called).
4510 if (db->db_blkid == DMU_BONUS_BLKID) {
4511 ASSERT(dr->dr_dbuf == db);
4512 dbuf_sync_bonus(dr, tx);
4513 return;
4516 os = dn->dn_objset;
4519 * This function may have dropped the db_mtx lock allowing a dmu_sync
4520 * operation to sneak in. As a result, we need to ensure that we
4521 * don't check the dr_override_state until we have returned from
4522 * dbuf_check_blkptr.
4524 dbuf_check_blkptr(dn, db);
4527 * If this buffer is in the middle of an immediate write,
4528 * wait for the synchronous IO to complete.
4530 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4531 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4532 cv_wait(&db->db_changed, &db->db_mtx);
4536 * If this is a dnode block, ensure it is appropriately encrypted
4537 * or decrypted, depending on what we are writing to it this txg.
4539 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4540 dbuf_prepare_encrypted_dnode_leaf(dr);
4542 if (db->db_state != DB_NOFILL &&
4543 dn->dn_object != DMU_META_DNODE_OBJECT &&
4544 zfs_refcount_count(&db->db_holds) > 1 &&
4545 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4546 *datap == db->db_buf) {
4548 * If this buffer is currently "in use" (i.e., there
4549 * are active holds and db_data still references it),
4550 * then make a copy before we start the write so that
4551 * any modifications from the open txg will not leak
4552 * into this write.
4554 * NOTE: this copy does not need to be made for
4555 * objects only modified in the syncing context (e.g.
4556 * DNONE_DNODE blocks).
4558 int psize = arc_buf_size(*datap);
4559 int lsize = arc_buf_lsize(*datap);
4560 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4561 enum zio_compress compress_type = arc_get_compression(*datap);
4562 uint8_t complevel = arc_get_complevel(*datap);
4564 if (arc_is_encrypted(*datap)) {
4565 boolean_t byteorder;
4566 uint8_t salt[ZIO_DATA_SALT_LEN];
4567 uint8_t iv[ZIO_DATA_IV_LEN];
4568 uint8_t mac[ZIO_DATA_MAC_LEN];
4570 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4571 *datap = arc_alloc_raw_buf(os->os_spa, db,
4572 dmu_objset_id(os), byteorder, salt, iv, mac,
4573 dn->dn_type, psize, lsize, compress_type,
4574 complevel);
4575 } else if (compress_type != ZIO_COMPRESS_OFF) {
4576 ASSERT3U(type, ==, ARC_BUFC_DATA);
4577 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4578 psize, lsize, compress_type, complevel);
4579 } else {
4580 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4582 memcpy((*datap)->b_data, db->db.db_data, psize);
4584 db->db_data_pending = dr;
4586 mutex_exit(&db->db_mtx);
4588 dbuf_write(dr, *datap, tx);
4590 ASSERT(!list_link_active(&dr->dr_dirty_node));
4591 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4592 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4593 } else {
4594 zio_nowait(dr->dr_zio);
4598 void
4599 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4601 dbuf_dirty_record_t *dr;
4603 while ((dr = list_head(list))) {
4604 if (dr->dr_zio != NULL) {
4606 * If we find an already initialized zio then we
4607 * are processing the meta-dnode, and we have finished.
4608 * The dbufs for all dnodes are put back on the list
4609 * during processing, so that we can zio_wait()
4610 * these IOs after initiating all child IOs.
4612 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4613 DMU_META_DNODE_OBJECT);
4614 break;
4616 list_remove(list, dr);
4617 if (dr->dr_dbuf == NULL) {
4618 dbuf_sync_lightweight(dr, tx);
4619 } else {
4620 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4621 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4622 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4624 if (dr->dr_dbuf->db_level > 0)
4625 dbuf_sync_indirect(dr, tx);
4626 else
4627 dbuf_sync_leaf(dr, tx);
4632 static void
4633 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4635 (void) buf;
4636 dmu_buf_impl_t *db = vdb;
4637 dnode_t *dn;
4638 blkptr_t *bp = zio->io_bp;
4639 blkptr_t *bp_orig = &zio->io_bp_orig;
4640 spa_t *spa = zio->io_spa;
4641 int64_t delta;
4642 uint64_t fill = 0;
4643 int i;
4645 ASSERT3P(db->db_blkptr, !=, NULL);
4646 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4648 DB_DNODE_ENTER(db);
4649 dn = DB_DNODE(db);
4650 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4651 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4652 zio->io_prev_space_delta = delta;
4654 if (bp->blk_birth != 0) {
4655 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4656 BP_GET_TYPE(bp) == dn->dn_type) ||
4657 (db->db_blkid == DMU_SPILL_BLKID &&
4658 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4659 BP_IS_EMBEDDED(bp));
4660 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4663 mutex_enter(&db->db_mtx);
4665 #ifdef ZFS_DEBUG
4666 if (db->db_blkid == DMU_SPILL_BLKID) {
4667 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4668 ASSERT(!(BP_IS_HOLE(bp)) &&
4669 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4671 #endif
4673 if (db->db_level == 0) {
4674 mutex_enter(&dn->dn_mtx);
4675 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4676 db->db_blkid != DMU_SPILL_BLKID) {
4677 ASSERT0(db->db_objset->os_raw_receive);
4678 dn->dn_phys->dn_maxblkid = db->db_blkid;
4680 mutex_exit(&dn->dn_mtx);
4682 if (dn->dn_type == DMU_OT_DNODE) {
4683 i = 0;
4684 while (i < db->db.db_size) {
4685 dnode_phys_t *dnp =
4686 (void *)(((char *)db->db.db_data) + i);
4688 i += DNODE_MIN_SIZE;
4689 if (dnp->dn_type != DMU_OT_NONE) {
4690 fill++;
4691 for (int j = 0; j < dnp->dn_nblkptr;
4692 j++) {
4693 (void) zfs_blkptr_verify(spa,
4694 &dnp->dn_blkptr[j],
4695 BLK_CONFIG_SKIP,
4696 BLK_VERIFY_HALT);
4698 if (dnp->dn_flags &
4699 DNODE_FLAG_SPILL_BLKPTR) {
4700 (void) zfs_blkptr_verify(spa,
4701 DN_SPILL_BLKPTR(dnp),
4702 BLK_CONFIG_SKIP,
4703 BLK_VERIFY_HALT);
4705 i += dnp->dn_extra_slots *
4706 DNODE_MIN_SIZE;
4709 } else {
4710 if (BP_IS_HOLE(bp)) {
4711 fill = 0;
4712 } else {
4713 fill = 1;
4716 } else {
4717 blkptr_t *ibp = db->db.db_data;
4718 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4719 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4720 if (BP_IS_HOLE(ibp))
4721 continue;
4722 (void) zfs_blkptr_verify(spa, ibp,
4723 BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4724 fill += BP_GET_FILL(ibp);
4727 DB_DNODE_EXIT(db);
4729 if (!BP_IS_EMBEDDED(bp))
4730 BP_SET_FILL(bp, fill);
4732 mutex_exit(&db->db_mtx);
4734 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4735 *db->db_blkptr = *bp;
4736 dmu_buf_unlock_parent(db, dblt, FTAG);
4740 * This function gets called just prior to running through the compression
4741 * stage of the zio pipeline. If we're an indirect block comprised of only
4742 * holes, then we want this indirect to be compressed away to a hole. In
4743 * order to do that we must zero out any information about the holes that
4744 * this indirect points to prior to before we try to compress it.
4746 static void
4747 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4749 (void) zio, (void) buf;
4750 dmu_buf_impl_t *db = vdb;
4751 dnode_t *dn;
4752 blkptr_t *bp;
4753 unsigned int epbs, i;
4755 ASSERT3U(db->db_level, >, 0);
4756 DB_DNODE_ENTER(db);
4757 dn = DB_DNODE(db);
4758 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4759 ASSERT3U(epbs, <, 31);
4761 /* Determine if all our children are holes */
4762 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4763 if (!BP_IS_HOLE(bp))
4764 break;
4768 * If all the children are holes, then zero them all out so that
4769 * we may get compressed away.
4771 if (i == 1ULL << epbs) {
4773 * We only found holes. Grab the rwlock to prevent
4774 * anybody from reading the blocks we're about to
4775 * zero out.
4777 rw_enter(&db->db_rwlock, RW_WRITER);
4778 memset(db->db.db_data, 0, db->db.db_size);
4779 rw_exit(&db->db_rwlock);
4781 DB_DNODE_EXIT(db);
4784 static void
4785 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4787 (void) buf;
4788 dmu_buf_impl_t *db = vdb;
4789 blkptr_t *bp_orig = &zio->io_bp_orig;
4790 blkptr_t *bp = db->db_blkptr;
4791 objset_t *os = db->db_objset;
4792 dmu_tx_t *tx = os->os_synctx;
4794 ASSERT0(zio->io_error);
4795 ASSERT(db->db_blkptr == bp);
4798 * For nopwrites and rewrites we ensure that the bp matches our
4799 * original and bypass all the accounting.
4801 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4802 ASSERT(BP_EQUAL(bp, bp_orig));
4803 } else {
4804 dsl_dataset_t *ds = os->os_dsl_dataset;
4805 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4806 dsl_dataset_block_born(ds, bp, tx);
4809 mutex_enter(&db->db_mtx);
4811 DBUF_VERIFY(db);
4813 dbuf_dirty_record_t *dr = db->db_data_pending;
4814 dnode_t *dn = dr->dr_dnode;
4815 ASSERT(!list_link_active(&dr->dr_dirty_node));
4816 ASSERT(dr->dr_dbuf == db);
4817 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4818 list_remove(&db->db_dirty_records, dr);
4820 #ifdef ZFS_DEBUG
4821 if (db->db_blkid == DMU_SPILL_BLKID) {
4822 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4823 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4824 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4826 #endif
4828 if (db->db_level == 0) {
4829 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4830 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4831 if (db->db_state != DB_NOFILL) {
4832 if (dr->dt.dl.dr_data != NULL &&
4833 dr->dt.dl.dr_data != db->db_buf) {
4834 arc_buf_destroy(dr->dt.dl.dr_data, db);
4837 } else {
4838 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4839 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4840 if (!BP_IS_HOLE(db->db_blkptr)) {
4841 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4842 SPA_BLKPTRSHIFT;
4843 ASSERT3U(db->db_blkid, <=,
4844 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4845 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4846 db->db.db_size);
4848 mutex_destroy(&dr->dt.di.dr_mtx);
4849 list_destroy(&dr->dt.di.dr_children);
4852 cv_broadcast(&db->db_changed);
4853 ASSERT(db->db_dirtycnt > 0);
4854 db->db_dirtycnt -= 1;
4855 db->db_data_pending = NULL;
4856 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4858 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4859 zio->io_txg);
4861 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4864 static void
4865 dbuf_write_nofill_ready(zio_t *zio)
4867 dbuf_write_ready(zio, NULL, zio->io_private);
4870 static void
4871 dbuf_write_nofill_done(zio_t *zio)
4873 dbuf_write_done(zio, NULL, zio->io_private);
4876 static void
4877 dbuf_write_override_ready(zio_t *zio)
4879 dbuf_dirty_record_t *dr = zio->io_private;
4880 dmu_buf_impl_t *db = dr->dr_dbuf;
4882 dbuf_write_ready(zio, NULL, db);
4885 static void
4886 dbuf_write_override_done(zio_t *zio)
4888 dbuf_dirty_record_t *dr = zio->io_private;
4889 dmu_buf_impl_t *db = dr->dr_dbuf;
4890 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4892 mutex_enter(&db->db_mtx);
4893 if (!BP_EQUAL(zio->io_bp, obp)) {
4894 if (!BP_IS_HOLE(obp))
4895 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4896 arc_release(dr->dt.dl.dr_data, db);
4898 mutex_exit(&db->db_mtx);
4900 dbuf_write_done(zio, NULL, db);
4902 if (zio->io_abd != NULL)
4903 abd_free(zio->io_abd);
4906 typedef struct dbuf_remap_impl_callback_arg {
4907 objset_t *drica_os;
4908 uint64_t drica_blk_birth;
4909 dmu_tx_t *drica_tx;
4910 } dbuf_remap_impl_callback_arg_t;
4912 static void
4913 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4914 void *arg)
4916 dbuf_remap_impl_callback_arg_t *drica = arg;
4917 objset_t *os = drica->drica_os;
4918 spa_t *spa = dmu_objset_spa(os);
4919 dmu_tx_t *tx = drica->drica_tx;
4921 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4923 if (os == spa_meta_objset(spa)) {
4924 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4925 } else {
4926 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4927 size, drica->drica_blk_birth, tx);
4931 static void
4932 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4934 blkptr_t bp_copy = *bp;
4935 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4936 dbuf_remap_impl_callback_arg_t drica;
4938 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4940 drica.drica_os = dn->dn_objset;
4941 drica.drica_blk_birth = bp->blk_birth;
4942 drica.drica_tx = tx;
4943 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4944 &drica)) {
4946 * If the blkptr being remapped is tracked by a livelist,
4947 * then we need to make sure the livelist reflects the update.
4948 * First, cancel out the old blkptr by appending a 'FREE'
4949 * entry. Next, add an 'ALLOC' to track the new version. This
4950 * way we avoid trying to free an inaccurate blkptr at delete.
4951 * Note that embedded blkptrs are not tracked in livelists.
4953 if (dn->dn_objset != spa_meta_objset(spa)) {
4954 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4955 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4956 bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4957 ASSERT(!BP_IS_EMBEDDED(bp));
4958 ASSERT(dsl_dir_is_clone(ds->ds_dir));
4959 ASSERT(spa_feature_is_enabled(spa,
4960 SPA_FEATURE_LIVELIST));
4961 bplist_append(&ds->ds_dir->dd_pending_frees,
4962 bp);
4963 bplist_append(&ds->ds_dir->dd_pending_allocs,
4964 &bp_copy);
4969 * The db_rwlock prevents dbuf_read_impl() from
4970 * dereferencing the BP while we are changing it. To
4971 * avoid lock contention, only grab it when we are actually
4972 * changing the BP.
4974 if (rw != NULL)
4975 rw_enter(rw, RW_WRITER);
4976 *bp = bp_copy;
4977 if (rw != NULL)
4978 rw_exit(rw);
4983 * Remap any existing BP's to concrete vdevs, if possible.
4985 static void
4986 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4988 spa_t *spa = dmu_objset_spa(db->db_objset);
4989 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4991 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4992 return;
4994 if (db->db_level > 0) {
4995 blkptr_t *bp = db->db.db_data;
4996 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4997 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4999 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5000 dnode_phys_t *dnp = db->db.db_data;
5001 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
5002 DMU_OT_DNODE);
5003 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5004 i += dnp[i].dn_extra_slots + 1) {
5005 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5006 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5007 &dn->dn_dbuf->db_rwlock);
5008 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5009 tx);
5016 /* Issue I/O to commit a dirty buffer to disk. */
5017 static void
5018 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5020 dmu_buf_impl_t *db = dr->dr_dbuf;
5021 dnode_t *dn = dr->dr_dnode;
5022 objset_t *os;
5023 dmu_buf_impl_t *parent = db->db_parent;
5024 uint64_t txg = tx->tx_txg;
5025 zbookmark_phys_t zb;
5026 zio_prop_t zp;
5027 zio_t *pio; /* parent I/O */
5028 int wp_flag = 0;
5030 ASSERT(dmu_tx_is_syncing(tx));
5032 os = dn->dn_objset;
5034 if (db->db_state != DB_NOFILL) {
5035 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5037 * Private object buffers are released here rather
5038 * than in dbuf_dirty() since they are only modified
5039 * in the syncing context and we don't want the
5040 * overhead of making multiple copies of the data.
5042 if (BP_IS_HOLE(db->db_blkptr)) {
5043 arc_buf_thaw(data);
5044 } else {
5045 dbuf_release_bp(db);
5047 dbuf_remap(dn, db, tx);
5051 if (parent != dn->dn_dbuf) {
5052 /* Our parent is an indirect block. */
5053 /* We have a dirty parent that has been scheduled for write. */
5054 ASSERT(parent && parent->db_data_pending);
5055 /* Our parent's buffer is one level closer to the dnode. */
5056 ASSERT(db->db_level == parent->db_level-1);
5058 * We're about to modify our parent's db_data by modifying
5059 * our block pointer, so the parent must be released.
5061 ASSERT(arc_released(parent->db_buf));
5062 pio = parent->db_data_pending->dr_zio;
5063 } else {
5064 /* Our parent is the dnode itself. */
5065 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5066 db->db_blkid != DMU_SPILL_BLKID) ||
5067 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5068 if (db->db_blkid != DMU_SPILL_BLKID)
5069 ASSERT3P(db->db_blkptr, ==,
5070 &dn->dn_phys->dn_blkptr[db->db_blkid]);
5071 pio = dn->dn_zio;
5074 ASSERT(db->db_level == 0 || data == db->db_buf);
5075 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
5076 ASSERT(pio);
5078 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5079 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5080 db->db.db_object, db->db_level, db->db_blkid);
5082 if (db->db_blkid == DMU_SPILL_BLKID)
5083 wp_flag = WP_SPILL;
5084 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5086 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5089 * We copy the blkptr now (rather than when we instantiate the dirty
5090 * record), because its value can change between open context and
5091 * syncing context. We do not need to hold dn_struct_rwlock to read
5092 * db_blkptr because we are in syncing context.
5094 dr->dr_bp_copy = *db->db_blkptr;
5096 if (db->db_level == 0 &&
5097 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5099 * The BP for this block has been provided by open context
5100 * (by dmu_sync() or dmu_buf_write_embedded()).
5102 abd_t *contents = (data != NULL) ?
5103 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5105 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5106 contents, db->db.db_size, db->db.db_size, &zp,
5107 dbuf_write_override_ready, NULL,
5108 dbuf_write_override_done,
5109 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5110 mutex_enter(&db->db_mtx);
5111 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5112 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5113 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5114 dr->dt.dl.dr_brtwrite);
5115 mutex_exit(&db->db_mtx);
5116 } else if (db->db_state == DB_NOFILL) {
5117 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5118 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5119 dr->dr_zio = zio_write(pio, os->os_spa, txg,
5120 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5121 dbuf_write_nofill_ready, NULL,
5122 dbuf_write_nofill_done, db,
5123 ZIO_PRIORITY_ASYNC_WRITE,
5124 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5125 } else {
5126 ASSERT(arc_released(data));
5129 * For indirect blocks, we want to setup the children
5130 * ready callback so that we can properly handle an indirect
5131 * block that only contains holes.
5133 arc_write_done_func_t *children_ready_cb = NULL;
5134 if (db->db_level != 0)
5135 children_ready_cb = dbuf_write_children_ready;
5137 dr->dr_zio = arc_write(pio, os->os_spa, txg,
5138 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5139 dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5140 children_ready_cb, dbuf_write_done, db,
5141 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5145 EXPORT_SYMBOL(dbuf_find);
5146 EXPORT_SYMBOL(dbuf_is_metadata);
5147 EXPORT_SYMBOL(dbuf_destroy);
5148 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5149 EXPORT_SYMBOL(dbuf_whichblock);
5150 EXPORT_SYMBOL(dbuf_read);
5151 EXPORT_SYMBOL(dbuf_unoverride);
5152 EXPORT_SYMBOL(dbuf_free_range);
5153 EXPORT_SYMBOL(dbuf_new_size);
5154 EXPORT_SYMBOL(dbuf_release_bp);
5155 EXPORT_SYMBOL(dbuf_dirty);
5156 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5157 EXPORT_SYMBOL(dmu_buf_will_dirty);
5158 EXPORT_SYMBOL(dmu_buf_is_dirty);
5159 EXPORT_SYMBOL(dmu_buf_will_clone);
5160 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5161 EXPORT_SYMBOL(dmu_buf_will_fill);
5162 EXPORT_SYMBOL(dmu_buf_fill_done);
5163 EXPORT_SYMBOL(dmu_buf_rele);
5164 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5165 EXPORT_SYMBOL(dbuf_prefetch);
5166 EXPORT_SYMBOL(dbuf_hold_impl);
5167 EXPORT_SYMBOL(dbuf_hold);
5168 EXPORT_SYMBOL(dbuf_hold_level);
5169 EXPORT_SYMBOL(dbuf_create_bonus);
5170 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5171 EXPORT_SYMBOL(dbuf_rm_spill);
5172 EXPORT_SYMBOL(dbuf_add_ref);
5173 EXPORT_SYMBOL(dbuf_rele);
5174 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5175 EXPORT_SYMBOL(dbuf_refcount);
5176 EXPORT_SYMBOL(dbuf_sync_list);
5177 EXPORT_SYMBOL(dmu_buf_set_user);
5178 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5179 EXPORT_SYMBOL(dmu_buf_get_user);
5180 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5182 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5183 "Maximum size in bytes of the dbuf cache.");
5185 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5186 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5188 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5189 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5191 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5192 "Maximum size in bytes of dbuf metadata cache.");
5194 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5195 "Set size of dbuf cache to log2 fraction of arc size.");
5197 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5198 "Set size of dbuf metadata cache to log2 fraction of arc size.");
5200 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5201 "Set size of dbuf cache mutex array as log2 shift.");