ZIL: Call brt_pending_add() replaying TX_CLONE_RANGE
[zfs.git] / module / zfs / dnode_sync.c
blob8e39af83bb0a9a8fda5aead96fbc16fe9c155b74
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2020 Oxide Computer Company
29 #include <sys/zfs_context.h>
30 #include <sys/dbuf.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_recv.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/spa.h>
38 #include <sys/range_tree.h>
39 #include <sys/zfeature.h>
41 static void
42 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
44 dmu_buf_impl_t *db;
45 int txgoff = tx->tx_txg & TXG_MASK;
46 int nblkptr = dn->dn_phys->dn_nblkptr;
47 int old_toplvl = dn->dn_phys->dn_nlevels - 1;
48 int new_level = dn->dn_next_nlevels[txgoff];
49 int i;
51 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
53 /* this dnode can't be paged out because it's dirty */
54 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
55 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
57 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
58 ASSERT(db != NULL);
60 dn->dn_phys->dn_nlevels = new_level;
61 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
62 (u_longlong_t)dn->dn_object, dn->dn_phys->dn_nlevels);
65 * Lock ordering requires that we hold the children's db_mutexes (by
66 * calling dbuf_find()) before holding the parent's db_rwlock. The lock
67 * order is imposed by dbuf_read's steps of "grab the lock to protect
68 * db_parent, get db_parent, hold db_parent's db_rwlock".
70 dmu_buf_impl_t *children[DN_MAX_NBLKPTR];
71 ASSERT3U(nblkptr, <=, DN_MAX_NBLKPTR);
72 for (i = 0; i < nblkptr; i++) {
73 children[i] = dbuf_find(dn->dn_objset, dn->dn_object,
74 old_toplvl, i, NULL);
77 /* transfer dnode's block pointers to new indirect block */
78 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
79 if (dn->dn_dbuf != NULL)
80 rw_enter(&dn->dn_dbuf->db_rwlock, RW_WRITER);
81 rw_enter(&db->db_rwlock, RW_WRITER);
82 ASSERT(db->db.db_data);
83 ASSERT(arc_released(db->db_buf));
84 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
85 memcpy(db->db.db_data, dn->dn_phys->dn_blkptr,
86 sizeof (blkptr_t) * nblkptr);
87 arc_buf_freeze(db->db_buf);
89 /* set dbuf's parent pointers to new indirect buf */
90 for (i = 0; i < nblkptr; i++) {
91 dmu_buf_impl_t *child = children[i];
93 if (child == NULL)
94 continue;
95 #ifdef ZFS_DEBUG
96 DB_DNODE_ENTER(child);
97 ASSERT3P(DB_DNODE(child), ==, dn);
98 DB_DNODE_EXIT(child);
99 #endif /* DEBUG */
100 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
101 ASSERT(child->db_parent->db_level == db->db_level);
102 ASSERT(child->db_blkptr !=
103 &dn->dn_phys->dn_blkptr[child->db_blkid]);
104 mutex_exit(&child->db_mtx);
105 continue;
107 ASSERT(child->db_parent == NULL ||
108 child->db_parent == dn->dn_dbuf);
110 child->db_parent = db;
111 dbuf_add_ref(db, child);
112 if (db->db.db_data)
113 child->db_blkptr = (blkptr_t *)db->db.db_data + i;
114 else
115 child->db_blkptr = NULL;
116 dprintf_dbuf_bp(child, child->db_blkptr,
117 "changed db_blkptr to new indirect %s", "");
119 mutex_exit(&child->db_mtx);
122 memset(dn->dn_phys->dn_blkptr, 0, sizeof (blkptr_t) * nblkptr);
124 rw_exit(&db->db_rwlock);
125 if (dn->dn_dbuf != NULL)
126 rw_exit(&dn->dn_dbuf->db_rwlock);
128 dbuf_rele(db, FTAG);
130 rw_exit(&dn->dn_struct_rwlock);
133 static void
134 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
136 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
137 uint64_t bytesfreed = 0;
139 dprintf("ds=%p obj=%llx num=%d\n", ds, (u_longlong_t)dn->dn_object,
140 num);
142 for (int i = 0; i < num; i++, bp++) {
143 if (BP_IS_HOLE(bp))
144 continue;
146 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
147 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
150 * Save some useful information on the holes being
151 * punched, including logical size, type, and indirection
152 * level. Retaining birth time enables detection of when
153 * holes are punched for reducing the number of free
154 * records transmitted during a zfs send.
157 uint64_t lsize = BP_GET_LSIZE(bp);
158 dmu_object_type_t type = BP_GET_TYPE(bp);
159 uint64_t lvl = BP_GET_LEVEL(bp);
161 memset(bp, 0, sizeof (blkptr_t));
163 if (spa_feature_is_active(dn->dn_objset->os_spa,
164 SPA_FEATURE_HOLE_BIRTH)) {
165 BP_SET_LSIZE(bp, lsize);
166 BP_SET_TYPE(bp, type);
167 BP_SET_LEVEL(bp, lvl);
168 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
171 dnode_diduse_space(dn, -bytesfreed);
174 #ifdef ZFS_DEBUG
175 static void
176 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
178 uint64_t off, num, i, j;
179 unsigned int epbs;
180 int err;
181 uint64_t txg = tx->tx_txg;
182 dnode_t *dn;
184 DB_DNODE_ENTER(db);
185 dn = DB_DNODE(db);
186 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
187 off = start - (db->db_blkid << epbs);
188 num = end - start + 1;
190 ASSERT3U(dn->dn_phys->dn_indblkshift, >=, SPA_BLKPTRSHIFT);
191 ASSERT3U(end + 1, >=, start);
192 ASSERT3U(start, >=, (db->db_blkid << epbs));
193 ASSERT3U(db->db_level, >, 0);
194 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
195 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
196 ASSERT(db->db_blkptr != NULL);
198 for (i = off; i < off+num; i++) {
199 uint64_t *buf;
200 dmu_buf_impl_t *child;
201 dbuf_dirty_record_t *dr;
203 ASSERT(db->db_level == 1);
205 rw_enter(&dn->dn_struct_rwlock, RW_READER);
206 err = dbuf_hold_impl(dn, db->db_level - 1,
207 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
208 rw_exit(&dn->dn_struct_rwlock);
209 if (err == ENOENT)
210 continue;
211 ASSERT(err == 0);
212 ASSERT(child->db_level == 0);
213 dr = dbuf_find_dirty_eq(child, txg);
215 /* data_old better be zeroed */
216 if (dr) {
217 buf = dr->dt.dl.dr_data->b_data;
218 for (j = 0; j < child->db.db_size >> 3; j++) {
219 if (buf[j] != 0) {
220 panic("freed data not zero: "
221 "child=%p i=%llu off=%llu "
222 "num=%llu\n",
223 (void *)child, (u_longlong_t)i,
224 (u_longlong_t)off,
225 (u_longlong_t)num);
231 * db_data better be zeroed unless it's dirty in a
232 * future txg.
234 mutex_enter(&child->db_mtx);
235 buf = child->db.db_data;
236 if (buf != NULL && child->db_state != DB_FILL &&
237 list_is_empty(&child->db_dirty_records)) {
238 for (j = 0; j < child->db.db_size >> 3; j++) {
239 if (buf[j] != 0) {
240 panic("freed data not zero: "
241 "child=%p i=%llu off=%llu "
242 "num=%llu\n",
243 (void *)child, (u_longlong_t)i,
244 (u_longlong_t)off,
245 (u_longlong_t)num);
249 mutex_exit(&child->db_mtx);
251 dbuf_rele(child, FTAG);
253 DB_DNODE_EXIT(db);
255 #endif
258 * We don't usually free the indirect blocks here. If in one txg we have a
259 * free_range and a write to the same indirect block, it's important that we
260 * preserve the hole's birth times. Therefore, we don't free any any indirect
261 * blocks in free_children(). If an indirect block happens to turn into all
262 * holes, it will be freed by dbuf_write_children_ready, which happens at a
263 * point in the syncing process where we know for certain the contents of the
264 * indirect block.
266 * However, if we're freeing a dnode, its space accounting must go to zero
267 * before we actually try to free the dnode, or we will trip an assertion. In
268 * addition, we know the case described above cannot occur, because the dnode is
269 * being freed. Therefore, we free the indirect blocks immediately in that
270 * case.
272 static void
273 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
274 boolean_t free_indirects, dmu_tx_t *tx)
276 dnode_t *dn;
277 blkptr_t *bp;
278 dmu_buf_impl_t *subdb;
279 uint64_t start, end, dbstart, dbend;
280 unsigned int epbs, shift, i;
283 * There is a small possibility that this block will not be cached:
284 * 1 - if level > 1 and there are no children with level <= 1
285 * 2 - if this block was evicted since we read it from
286 * dmu_tx_hold_free().
288 if (db->db_state != DB_CACHED)
289 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
292 * If we modify this indirect block, and we are not freeing the
293 * dnode (!free_indirects), then this indirect block needs to get
294 * written to disk by dbuf_write(). If it is dirty, we know it will
295 * be written (otherwise, we would have incorrect on-disk state
296 * because the space would be freed but still referenced by the BP
297 * in this indirect block). Therefore we VERIFY that it is
298 * dirty.
300 * Our VERIFY covers some cases that do not actually have to be
301 * dirty, but the open-context code happens to dirty. E.g. if the
302 * blocks we are freeing are all holes, because in that case, we
303 * are only freeing part of this indirect block, so it is an
304 * ancestor of the first or last block to be freed. The first and
305 * last L1 indirect blocks are always dirtied by dnode_free_range().
307 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
308 VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0);
309 dmu_buf_unlock_parent(db, dblt, FTAG);
311 dbuf_release_bp(db);
312 bp = db->db.db_data;
314 DB_DNODE_ENTER(db);
315 dn = DB_DNODE(db);
316 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
317 ASSERT3U(epbs, <, 31);
318 shift = (db->db_level - 1) * epbs;
319 dbstart = db->db_blkid << epbs;
320 start = blkid >> shift;
321 if (dbstart < start) {
322 bp += start - dbstart;
323 } else {
324 start = dbstart;
326 dbend = ((db->db_blkid + 1) << epbs) - 1;
327 end = (blkid + nblks - 1) >> shift;
328 if (dbend <= end)
329 end = dbend;
331 ASSERT3U(start, <=, end);
333 if (db->db_level == 1) {
334 FREE_VERIFY(db, start, end, tx);
335 rw_enter(&db->db_rwlock, RW_WRITER);
336 free_blocks(dn, bp, end - start + 1, tx);
337 rw_exit(&db->db_rwlock);
338 } else {
339 for (uint64_t id = start; id <= end; id++, bp++) {
340 if (BP_IS_HOLE(bp))
341 continue;
342 rw_enter(&dn->dn_struct_rwlock, RW_READER);
343 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
344 id, TRUE, FALSE, FTAG, &subdb));
345 rw_exit(&dn->dn_struct_rwlock);
346 ASSERT3P(bp, ==, subdb->db_blkptr);
348 free_children(subdb, blkid, nblks, free_indirects, tx);
349 dbuf_rele(subdb, FTAG);
353 if (free_indirects) {
354 rw_enter(&db->db_rwlock, RW_WRITER);
355 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
356 ASSERT(BP_IS_HOLE(bp));
357 memset(db->db.db_data, 0, db->db.db_size);
358 free_blocks(dn, db->db_blkptr, 1, tx);
359 rw_exit(&db->db_rwlock);
362 DB_DNODE_EXIT(db);
363 arc_buf_freeze(db->db_buf);
367 * Traverse the indicated range of the provided file
368 * and "free" all the blocks contained there.
370 static void
371 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
372 boolean_t free_indirects, dmu_tx_t *tx)
374 blkptr_t *bp = dn->dn_phys->dn_blkptr;
375 int dnlevel = dn->dn_phys->dn_nlevels;
376 boolean_t trunc = B_FALSE;
378 if (blkid > dn->dn_phys->dn_maxblkid)
379 return;
381 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
382 if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
383 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
384 trunc = B_TRUE;
387 /* There are no indirect blocks in the object */
388 if (dnlevel == 1) {
389 if (blkid >= dn->dn_phys->dn_nblkptr) {
390 /* this range was never made persistent */
391 return;
393 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
394 free_blocks(dn, bp + blkid, nblks, tx);
395 } else {
396 int shift = (dnlevel - 1) *
397 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
398 int start = blkid >> shift;
399 int end = (blkid + nblks - 1) >> shift;
400 dmu_buf_impl_t *db;
402 ASSERT(start < dn->dn_phys->dn_nblkptr);
403 bp += start;
404 for (int i = start; i <= end; i++, bp++) {
405 if (BP_IS_HOLE(bp))
406 continue;
407 rw_enter(&dn->dn_struct_rwlock, RW_READER);
408 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
409 TRUE, FALSE, FTAG, &db));
410 rw_exit(&dn->dn_struct_rwlock);
411 free_children(db, blkid, nblks, free_indirects, tx);
412 dbuf_rele(db, FTAG);
417 * Do not truncate the maxblkid if we are performing a raw
418 * receive. The raw receive sets the maxblkid manually and
419 * must not be overridden. Usually, the last DRR_FREE record
420 * will be at the maxblkid, because the source system sets
421 * the maxblkid when truncating. However, if the last block
422 * was freed by overwriting with zeros and being compressed
423 * away to a hole, the source system will generate a DRR_FREE
424 * record while leaving the maxblkid after the end of that
425 * record. In this case we need to leave the maxblkid as
426 * indicated in the DRR_OBJECT record, so that it matches the
427 * source system, ensuring that the cryptographic hashes will
428 * match.
430 if (trunc && !dn->dn_objset->os_raw_receive) {
431 uint64_t off __maybe_unused;
432 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
434 off = (dn->dn_phys->dn_maxblkid + 1) *
435 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
436 ASSERT(off < dn->dn_phys->dn_maxblkid ||
437 dn->dn_phys->dn_maxblkid == 0 ||
438 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
442 typedef struct dnode_sync_free_range_arg {
443 dnode_t *dsfra_dnode;
444 dmu_tx_t *dsfra_tx;
445 boolean_t dsfra_free_indirects;
446 } dnode_sync_free_range_arg_t;
448 static void
449 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
451 dnode_sync_free_range_arg_t *dsfra = arg;
452 dnode_t *dn = dsfra->dsfra_dnode;
454 mutex_exit(&dn->dn_mtx);
455 dnode_sync_free_range_impl(dn, blkid, nblks,
456 dsfra->dsfra_free_indirects, dsfra->dsfra_tx);
457 mutex_enter(&dn->dn_mtx);
461 * Try to kick all the dnode's dbufs out of the cache...
463 void
464 dnode_evict_dbufs(dnode_t *dn)
466 dmu_buf_impl_t *db_marker;
467 dmu_buf_impl_t *db, *db_next;
469 db_marker = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
471 mutex_enter(&dn->dn_dbufs_mtx);
472 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
474 #ifdef ZFS_DEBUG
475 DB_DNODE_ENTER(db);
476 ASSERT3P(DB_DNODE(db), ==, dn);
477 DB_DNODE_EXIT(db);
478 #endif /* DEBUG */
480 mutex_enter(&db->db_mtx);
481 if (db->db_state != DB_EVICTING &&
482 zfs_refcount_is_zero(&db->db_holds)) {
483 db_marker->db_level = db->db_level;
484 db_marker->db_blkid = db->db_blkid;
485 db_marker->db_state = DB_SEARCH;
486 avl_insert_here(&dn->dn_dbufs, db_marker, db,
487 AVL_BEFORE);
490 * We need to use the "marker" dbuf rather than
491 * simply getting the next dbuf, because
492 * dbuf_destroy() may actually remove multiple dbufs.
493 * It can call itself recursively on the parent dbuf,
494 * which may also be removed from dn_dbufs. The code
495 * flow would look like:
497 * dbuf_destroy():
498 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
499 * if (!cacheable || pending_evict)
500 * dbuf_destroy()
502 dbuf_destroy(db);
504 db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
505 avl_remove(&dn->dn_dbufs, db_marker);
506 } else {
507 db->db_pending_evict = TRUE;
508 mutex_exit(&db->db_mtx);
509 db_next = AVL_NEXT(&dn->dn_dbufs, db);
512 mutex_exit(&dn->dn_dbufs_mtx);
514 kmem_free(db_marker, sizeof (dmu_buf_impl_t));
516 dnode_evict_bonus(dn);
519 void
520 dnode_evict_bonus(dnode_t *dn)
522 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
523 if (dn->dn_bonus != NULL) {
524 if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
525 mutex_enter(&dn->dn_bonus->db_mtx);
526 dbuf_destroy(dn->dn_bonus);
527 dn->dn_bonus = NULL;
528 } else {
529 dn->dn_bonus->db_pending_evict = TRUE;
532 rw_exit(&dn->dn_struct_rwlock);
535 static void
536 dnode_undirty_dbufs(list_t *list)
538 dbuf_dirty_record_t *dr;
540 while ((dr = list_head(list))) {
541 dmu_buf_impl_t *db = dr->dr_dbuf;
542 uint64_t txg = dr->dr_txg;
544 if (db->db_level != 0)
545 dnode_undirty_dbufs(&dr->dt.di.dr_children);
547 mutex_enter(&db->db_mtx);
548 /* XXX - use dbuf_undirty()? */
549 list_remove(list, dr);
550 ASSERT(list_head(&db->db_dirty_records) == dr);
551 list_remove_head(&db->db_dirty_records);
552 ASSERT(list_is_empty(&db->db_dirty_records));
553 db->db_dirtycnt -= 1;
554 if (db->db_level == 0) {
555 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
556 dr->dt.dl.dr_data == db->db_buf);
557 dbuf_unoverride(dr);
558 } else {
559 mutex_destroy(&dr->dt.di.dr_mtx);
560 list_destroy(&dr->dt.di.dr_children);
562 kmem_free(dr, sizeof (dbuf_dirty_record_t));
563 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
567 static void
568 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
570 int txgoff = tx->tx_txg & TXG_MASK;
572 ASSERT(dmu_tx_is_syncing(tx));
575 * Our contents should have been freed in dnode_sync() by the
576 * free range record inserted by the caller of dnode_free().
578 ASSERT0(DN_USED_BYTES(dn->dn_phys));
579 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
581 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
582 dnode_evict_dbufs(dn);
585 * XXX - It would be nice to assert this, but we may still
586 * have residual holds from async evictions from the arc...
588 * zfs_obj_to_path() also depends on this being
589 * commented out.
591 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
594 /* Undirty next bits */
595 dn->dn_next_nlevels[txgoff] = 0;
596 dn->dn_next_indblkshift[txgoff] = 0;
597 dn->dn_next_blksz[txgoff] = 0;
598 dn->dn_next_maxblkid[txgoff] = 0;
600 /* ASSERT(blkptrs are zero); */
601 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
602 ASSERT(dn->dn_type != DMU_OT_NONE);
604 ASSERT(dn->dn_free_txg > 0);
605 if (dn->dn_allocated_txg != dn->dn_free_txg)
606 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
607 memset(dn->dn_phys, 0, sizeof (dnode_phys_t) * dn->dn_num_slots);
608 dnode_free_interior_slots(dn);
610 mutex_enter(&dn->dn_mtx);
611 dn->dn_type = DMU_OT_NONE;
612 dn->dn_maxblkid = 0;
613 dn->dn_allocated_txg = 0;
614 dn->dn_free_txg = 0;
615 dn->dn_have_spill = B_FALSE;
616 dn->dn_num_slots = 1;
617 mutex_exit(&dn->dn_mtx);
619 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
621 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
623 * Now that we've released our hold, the dnode may
624 * be evicted, so we mustn't access it.
629 * Write out the dnode's dirty buffers.
631 void
632 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
634 objset_t *os = dn->dn_objset;
635 dnode_phys_t *dnp = dn->dn_phys;
636 int txgoff = tx->tx_txg & TXG_MASK;
637 list_t *list = &dn->dn_dirty_records[txgoff];
638 static const dnode_phys_t zerodn __maybe_unused = { 0 };
639 boolean_t kill_spill = B_FALSE;
641 ASSERT(dmu_tx_is_syncing(tx));
642 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
643 ASSERT(dnp->dn_type != DMU_OT_NONE ||
644 memcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
645 DNODE_VERIFY(dn);
647 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
650 * Do user accounting if it is enabled and this is not
651 * an encrypted receive.
653 if (dmu_objset_userused_enabled(os) &&
654 !DMU_OBJECT_IS_SPECIAL(dn->dn_object) &&
655 (!os->os_encrypted || !dmu_objset_is_receiving(os))) {
656 mutex_enter(&dn->dn_mtx);
657 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
658 dn->dn_oldflags = dn->dn_phys->dn_flags;
659 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
660 if (dmu_objset_userobjused_enabled(dn->dn_objset))
661 dn->dn_phys->dn_flags |=
662 DNODE_FLAG_USEROBJUSED_ACCOUNTED;
663 mutex_exit(&dn->dn_mtx);
664 dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
665 } else if (!(os->os_encrypted && dmu_objset_is_receiving(os))) {
667 * Once we account for it, we should always account for it,
668 * except for the case of a raw receive. We will not be able
669 * to account for it until the receiving dataset has been
670 * mounted.
672 ASSERT(!(dn->dn_phys->dn_flags &
673 DNODE_FLAG_USERUSED_ACCOUNTED));
674 ASSERT(!(dn->dn_phys->dn_flags &
675 DNODE_FLAG_USEROBJUSED_ACCOUNTED));
678 mutex_enter(&dn->dn_mtx);
679 if (dn->dn_allocated_txg == tx->tx_txg) {
680 /* The dnode is newly allocated or reallocated */
681 if (dnp->dn_type == DMU_OT_NONE) {
682 /* this is a first alloc, not a realloc */
683 dnp->dn_nlevels = 1;
684 dnp->dn_nblkptr = dn->dn_nblkptr;
687 dnp->dn_type = dn->dn_type;
688 dnp->dn_bonustype = dn->dn_bonustype;
689 dnp->dn_bonuslen = dn->dn_bonuslen;
692 dnp->dn_extra_slots = dn->dn_num_slots - 1;
694 ASSERT(dnp->dn_nlevels > 1 ||
695 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
696 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
697 BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
698 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
699 ASSERT(dnp->dn_nlevels < 2 ||
700 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
701 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
703 if (dn->dn_next_type[txgoff] != 0) {
704 dnp->dn_type = dn->dn_type;
705 dn->dn_next_type[txgoff] = 0;
708 if (dn->dn_next_blksz[txgoff] != 0) {
709 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
710 SPA_MINBLOCKSIZE) == 0);
711 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
712 dn->dn_maxblkid == 0 || list_head(list) != NULL ||
713 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
714 dnp->dn_datablkszsec ||
715 !range_tree_is_empty(dn->dn_free_ranges[txgoff]));
716 dnp->dn_datablkszsec =
717 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
718 dn->dn_next_blksz[txgoff] = 0;
721 if (dn->dn_next_bonuslen[txgoff] != 0) {
722 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
723 dnp->dn_bonuslen = 0;
724 else
725 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
726 ASSERT(dnp->dn_bonuslen <=
727 DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1));
728 dn->dn_next_bonuslen[txgoff] = 0;
731 if (dn->dn_next_bonustype[txgoff] != 0) {
732 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
733 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
734 dn->dn_next_bonustype[txgoff] = 0;
737 boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
738 dn->dn_free_txg <= tx->tx_txg;
741 * Remove the spill block if we have been explicitly asked to
742 * remove it, or if the object is being removed.
744 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
745 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
746 kill_spill = B_TRUE;
747 dn->dn_rm_spillblk[txgoff] = 0;
750 if (dn->dn_next_indblkshift[txgoff] != 0) {
751 ASSERT(dnp->dn_nlevels == 1);
752 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
753 dn->dn_next_indblkshift[txgoff] = 0;
757 * Just take the live (open-context) values for checksum and compress.
758 * Strictly speaking it's a future leak, but nothing bad happens if we
759 * start using the new checksum or compress algorithm a little early.
761 dnp->dn_checksum = dn->dn_checksum;
762 dnp->dn_compress = dn->dn_compress;
764 mutex_exit(&dn->dn_mtx);
766 if (kill_spill) {
767 free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx);
768 mutex_enter(&dn->dn_mtx);
769 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
770 mutex_exit(&dn->dn_mtx);
773 /* process all the "freed" ranges in the file */
774 if (dn->dn_free_ranges[txgoff] != NULL) {
775 dnode_sync_free_range_arg_t dsfra;
776 dsfra.dsfra_dnode = dn;
777 dsfra.dsfra_tx = tx;
778 dsfra.dsfra_free_indirects = freeing_dnode;
779 mutex_enter(&dn->dn_mtx);
780 if (freeing_dnode) {
781 ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
782 0, dn->dn_maxblkid + 1));
785 * Because dnode_sync_free_range() must drop dn_mtx during its
786 * processing, using it as a callback to range_tree_vacate() is
787 * not safe. No other operations (besides destroy) are allowed
788 * once range_tree_vacate() has begun, and dropping dn_mtx
789 * would leave a window open for another thread to observe that
790 * invalid (and unsafe) state.
792 range_tree_walk(dn->dn_free_ranges[txgoff],
793 dnode_sync_free_range, &dsfra);
794 range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
795 range_tree_destroy(dn->dn_free_ranges[txgoff]);
796 dn->dn_free_ranges[txgoff] = NULL;
797 mutex_exit(&dn->dn_mtx);
800 if (freeing_dnode) {
801 dn->dn_objset->os_freed_dnodes++;
802 dnode_sync_free(dn, tx);
803 return;
806 if (dn->dn_num_slots > DNODE_MIN_SLOTS) {
807 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
808 mutex_enter(&ds->ds_lock);
809 ds->ds_feature_activation[SPA_FEATURE_LARGE_DNODE] =
810 (void *)B_TRUE;
811 mutex_exit(&ds->ds_lock);
814 if (dn->dn_next_nlevels[txgoff]) {
815 dnode_increase_indirection(dn, tx);
816 dn->dn_next_nlevels[txgoff] = 0;
820 * This must be done after dnode_sync_free_range()
821 * and dnode_increase_indirection(). See dnode_new_blkid()
822 * for an explanation of the high bit being set.
824 if (dn->dn_next_maxblkid[txgoff]) {
825 mutex_enter(&dn->dn_mtx);
826 dnp->dn_maxblkid =
827 dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET;
828 dn->dn_next_maxblkid[txgoff] = 0;
829 mutex_exit(&dn->dn_mtx);
832 if (dn->dn_next_nblkptr[txgoff]) {
833 /* this should only happen on a realloc */
834 ASSERT(dn->dn_allocated_txg == tx->tx_txg);
835 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
836 /* zero the new blkptrs we are gaining */
837 memset(dnp->dn_blkptr + dnp->dn_nblkptr, 0,
838 sizeof (blkptr_t) *
839 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
840 #ifdef ZFS_DEBUG
841 } else {
842 int i;
843 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
844 /* the blkptrs we are losing better be unallocated */
845 for (i = 0; i < dnp->dn_nblkptr; i++) {
846 if (i >= dn->dn_next_nblkptr[txgoff])
847 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
849 #endif
851 mutex_enter(&dn->dn_mtx);
852 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
853 dn->dn_next_nblkptr[txgoff] = 0;
854 mutex_exit(&dn->dn_mtx);
857 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
859 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
860 ASSERT3P(list_head(list), ==, NULL);
861 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
864 ASSERT3U(dnp->dn_bonuslen, <=, DN_MAX_BONUS_LEN(dnp));
867 * Although we have dropped our reference to the dnode, it
868 * can't be evicted until its written, and we haven't yet
869 * initiated the IO for the dnode's dbuf. Additionally, the caller
870 * has already added a reference to the dnode because it's on the
871 * os_synced_dnodes list.