4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2020 Oxide Computer Company
29 #include <sys/zfs_context.h>
31 #include <sys/dnode.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_recv.h>
36 #include <sys/dsl_dataset.h>
38 #include <sys/range_tree.h>
39 #include <sys/zfeature.h>
42 dnode_increase_indirection(dnode_t
*dn
, dmu_tx_t
*tx
)
45 int txgoff
= tx
->tx_txg
& TXG_MASK
;
46 int nblkptr
= dn
->dn_phys
->dn_nblkptr
;
47 int old_toplvl
= dn
->dn_phys
->dn_nlevels
- 1;
48 int new_level
= dn
->dn_next_nlevels
[txgoff
];
51 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
53 /* this dnode can't be paged out because it's dirty */
54 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
55 ASSERT(new_level
> 1 && dn
->dn_phys
->dn_nlevels
> 0);
57 db
= dbuf_hold_level(dn
, dn
->dn_phys
->dn_nlevels
, 0, FTAG
);
60 dn
->dn_phys
->dn_nlevels
= new_level
;
61 dprintf("os=%p obj=%llu, increase to %d\n", dn
->dn_objset
,
62 (u_longlong_t
)dn
->dn_object
, dn
->dn_phys
->dn_nlevels
);
65 * Lock ordering requires that we hold the children's db_mutexes (by
66 * calling dbuf_find()) before holding the parent's db_rwlock. The lock
67 * order is imposed by dbuf_read's steps of "grab the lock to protect
68 * db_parent, get db_parent, hold db_parent's db_rwlock".
70 dmu_buf_impl_t
*children
[DN_MAX_NBLKPTR
];
71 ASSERT3U(nblkptr
, <=, DN_MAX_NBLKPTR
);
72 for (i
= 0; i
< nblkptr
; i
++) {
74 dbuf_find(dn
->dn_objset
, dn
->dn_object
, old_toplvl
, i
);
77 /* transfer dnode's block pointers to new indirect block */
78 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
|DB_RF_HAVESTRUCT
);
79 if (dn
->dn_dbuf
!= NULL
)
80 rw_enter(&dn
->dn_dbuf
->db_rwlock
, RW_WRITER
);
81 rw_enter(&db
->db_rwlock
, RW_WRITER
);
82 ASSERT(db
->db
.db_data
);
83 ASSERT(arc_released(db
->db_buf
));
84 ASSERT3U(sizeof (blkptr_t
) * nblkptr
, <=, db
->db
.db_size
);
85 bcopy(dn
->dn_phys
->dn_blkptr
, db
->db
.db_data
,
86 sizeof (blkptr_t
) * nblkptr
);
87 arc_buf_freeze(db
->db_buf
);
89 /* set dbuf's parent pointers to new indirect buf */
90 for (i
= 0; i
< nblkptr
; i
++) {
91 dmu_buf_impl_t
*child
= children
[i
];
96 DB_DNODE_ENTER(child
);
97 ASSERT3P(DB_DNODE(child
), ==, dn
);
100 if (child
->db_parent
&& child
->db_parent
!= dn
->dn_dbuf
) {
101 ASSERT(child
->db_parent
->db_level
== db
->db_level
);
102 ASSERT(child
->db_blkptr
!=
103 &dn
->dn_phys
->dn_blkptr
[child
->db_blkid
]);
104 mutex_exit(&child
->db_mtx
);
107 ASSERT(child
->db_parent
== NULL
||
108 child
->db_parent
== dn
->dn_dbuf
);
110 child
->db_parent
= db
;
111 dbuf_add_ref(db
, child
);
113 child
->db_blkptr
= (blkptr_t
*)db
->db
.db_data
+ i
;
115 child
->db_blkptr
= NULL
;
116 dprintf_dbuf_bp(child
, child
->db_blkptr
,
117 "changed db_blkptr to new indirect %s", "");
119 mutex_exit(&child
->db_mtx
);
122 bzero(dn
->dn_phys
->dn_blkptr
, sizeof (blkptr_t
) * nblkptr
);
124 rw_exit(&db
->db_rwlock
);
125 if (dn
->dn_dbuf
!= NULL
)
126 rw_exit(&dn
->dn_dbuf
->db_rwlock
);
130 rw_exit(&dn
->dn_struct_rwlock
);
134 free_blocks(dnode_t
*dn
, blkptr_t
*bp
, int num
, dmu_tx_t
*tx
)
136 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
137 uint64_t bytesfreed
= 0;
139 dprintf("ds=%p obj=%llx num=%d\n", ds
, (u_longlong_t
)dn
->dn_object
,
142 for (int i
= 0; i
< num
; i
++, bp
++) {
146 bytesfreed
+= dsl_dataset_block_kill(ds
, bp
, tx
, B_FALSE
);
147 ASSERT3U(bytesfreed
, <=, DN_USED_BYTES(dn
->dn_phys
));
150 * Save some useful information on the holes being
151 * punched, including logical size, type, and indirection
152 * level. Retaining birth time enables detection of when
153 * holes are punched for reducing the number of free
154 * records transmitted during a zfs send.
157 uint64_t lsize
= BP_GET_LSIZE(bp
);
158 dmu_object_type_t type
= BP_GET_TYPE(bp
);
159 uint64_t lvl
= BP_GET_LEVEL(bp
);
161 bzero(bp
, sizeof (blkptr_t
));
163 if (spa_feature_is_active(dn
->dn_objset
->os_spa
,
164 SPA_FEATURE_HOLE_BIRTH
)) {
165 BP_SET_LSIZE(bp
, lsize
);
166 BP_SET_TYPE(bp
, type
);
167 BP_SET_LEVEL(bp
, lvl
);
168 BP_SET_BIRTH(bp
, dmu_tx_get_txg(tx
), 0);
171 dnode_diduse_space(dn
, -bytesfreed
);
176 free_verify(dmu_buf_impl_t
*db
, uint64_t start
, uint64_t end
, dmu_tx_t
*tx
)
180 uint64_t txg
= tx
->tx_txg
;
185 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
186 off
= start
- (db
->db_blkid
* 1<<epbs
);
187 num
= end
- start
+ 1;
189 ASSERT3U(off
, >=, 0);
190 ASSERT3U(num
, >=, 0);
191 ASSERT3U(db
->db_level
, >, 0);
192 ASSERT3U(db
->db
.db_size
, ==, 1 << dn
->dn_phys
->dn_indblkshift
);
193 ASSERT3U(off
+num
, <=, db
->db
.db_size
>> SPA_BLKPTRSHIFT
);
194 ASSERT(db
->db_blkptr
!= NULL
);
196 for (i
= off
; i
< off
+num
; i
++) {
198 dmu_buf_impl_t
*child
;
199 dbuf_dirty_record_t
*dr
;
202 ASSERT(db
->db_level
== 1);
204 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
205 err
= dbuf_hold_impl(dn
, db
->db_level
- 1,
206 (db
->db_blkid
<< epbs
) + i
, TRUE
, FALSE
, FTAG
, &child
);
207 rw_exit(&dn
->dn_struct_rwlock
);
211 ASSERT(child
->db_level
== 0);
212 dr
= dbuf_find_dirty_eq(child
, txg
);
214 /* data_old better be zeroed */
216 buf
= dr
->dt
.dl
.dr_data
->b_data
;
217 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
219 panic("freed data not zero: "
220 "child=%p i=%d off=%d num=%d\n",
221 (void *)child
, i
, off
, num
);
227 * db_data better be zeroed unless it's dirty in a
230 mutex_enter(&child
->db_mtx
);
231 buf
= child
->db
.db_data
;
232 if (buf
!= NULL
&& child
->db_state
!= DB_FILL
&&
233 list_is_empty(&child
->db_dirty_records
)) {
234 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
236 panic("freed data not zero: "
237 "child=%p i=%d off=%d num=%d\n",
238 (void *)child
, i
, off
, num
);
242 mutex_exit(&child
->db_mtx
);
244 dbuf_rele(child
, FTAG
);
251 * We don't usually free the indirect blocks here. If in one txg we have a
252 * free_range and a write to the same indirect block, it's important that we
253 * preserve the hole's birth times. Therefore, we don't free any any indirect
254 * blocks in free_children(). If an indirect block happens to turn into all
255 * holes, it will be freed by dbuf_write_children_ready, which happens at a
256 * point in the syncing process where we know for certain the contents of the
259 * However, if we're freeing a dnode, its space accounting must go to zero
260 * before we actually try to free the dnode, or we will trip an assertion. In
261 * addition, we know the case described above cannot occur, because the dnode is
262 * being freed. Therefore, we free the indirect blocks immediately in that
266 free_children(dmu_buf_impl_t
*db
, uint64_t blkid
, uint64_t nblks
,
267 boolean_t free_indirects
, dmu_tx_t
*tx
)
271 dmu_buf_impl_t
*subdb
;
272 uint64_t start
, end
, dbstart
, dbend
;
273 unsigned int epbs
, shift
, i
;
276 * There is a small possibility that this block will not be cached:
277 * 1 - if level > 1 and there are no children with level <= 1
278 * 2 - if this block was evicted since we read it from
279 * dmu_tx_hold_free().
281 if (db
->db_state
!= DB_CACHED
)
282 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
);
285 * If we modify this indirect block, and we are not freeing the
286 * dnode (!free_indirects), then this indirect block needs to get
287 * written to disk by dbuf_write(). If it is dirty, we know it will
288 * be written (otherwise, we would have incorrect on-disk state
289 * because the space would be freed but still referenced by the BP
290 * in this indirect block). Therefore we VERIFY that it is
293 * Our VERIFY covers some cases that do not actually have to be
294 * dirty, but the open-context code happens to dirty. E.g. if the
295 * blocks we are freeing are all holes, because in that case, we
296 * are only freeing part of this indirect block, so it is an
297 * ancestor of the first or last block to be freed. The first and
298 * last L1 indirect blocks are always dirtied by dnode_free_range().
300 db_lock_type_t dblt
= dmu_buf_lock_parent(db
, RW_READER
, FTAG
);
301 VERIFY(BP_GET_FILL(db
->db_blkptr
) == 0 || db
->db_dirtycnt
> 0);
302 dmu_buf_unlock_parent(db
, dblt
, FTAG
);
309 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
310 ASSERT3U(epbs
, <, 31);
311 shift
= (db
->db_level
- 1) * epbs
;
312 dbstart
= db
->db_blkid
<< epbs
;
313 start
= blkid
>> shift
;
314 if (dbstart
< start
) {
315 bp
+= start
- dbstart
;
319 dbend
= ((db
->db_blkid
+ 1) << epbs
) - 1;
320 end
= (blkid
+ nblks
- 1) >> shift
;
324 ASSERT3U(start
, <=, end
);
326 if (db
->db_level
== 1) {
327 FREE_VERIFY(db
, start
, end
, tx
);
328 rw_enter(&db
->db_rwlock
, RW_WRITER
);
329 free_blocks(dn
, bp
, end
- start
+ 1, tx
);
330 rw_exit(&db
->db_rwlock
);
332 for (uint64_t id
= start
; id
<= end
; id
++, bp
++) {
335 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
336 VERIFY0(dbuf_hold_impl(dn
, db
->db_level
- 1,
337 id
, TRUE
, FALSE
, FTAG
, &subdb
));
338 rw_exit(&dn
->dn_struct_rwlock
);
339 ASSERT3P(bp
, ==, subdb
->db_blkptr
);
341 free_children(subdb
, blkid
, nblks
, free_indirects
, tx
);
342 dbuf_rele(subdb
, FTAG
);
346 if (free_indirects
) {
347 rw_enter(&db
->db_rwlock
, RW_WRITER
);
348 for (i
= 0, bp
= db
->db
.db_data
; i
< 1 << epbs
; i
++, bp
++)
349 ASSERT(BP_IS_HOLE(bp
));
350 bzero(db
->db
.db_data
, db
->db
.db_size
);
351 free_blocks(dn
, db
->db_blkptr
, 1, tx
);
352 rw_exit(&db
->db_rwlock
);
356 arc_buf_freeze(db
->db_buf
);
360 * Traverse the indicated range of the provided file
361 * and "free" all the blocks contained there.
364 dnode_sync_free_range_impl(dnode_t
*dn
, uint64_t blkid
, uint64_t nblks
,
365 boolean_t free_indirects
, dmu_tx_t
*tx
)
367 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
368 int dnlevel
= dn
->dn_phys
->dn_nlevels
;
369 boolean_t trunc
= B_FALSE
;
371 if (blkid
> dn
->dn_phys
->dn_maxblkid
)
374 ASSERT(dn
->dn_phys
->dn_maxblkid
< UINT64_MAX
);
375 if (blkid
+ nblks
> dn
->dn_phys
->dn_maxblkid
) {
376 nblks
= dn
->dn_phys
->dn_maxblkid
- blkid
+ 1;
380 /* There are no indirect blocks in the object */
382 if (blkid
>= dn
->dn_phys
->dn_nblkptr
) {
383 /* this range was never made persistent */
386 ASSERT3U(blkid
+ nblks
, <=, dn
->dn_phys
->dn_nblkptr
);
387 free_blocks(dn
, bp
+ blkid
, nblks
, tx
);
389 int shift
= (dnlevel
- 1) *
390 (dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
);
391 int start
= blkid
>> shift
;
392 int end
= (blkid
+ nblks
- 1) >> shift
;
395 ASSERT(start
< dn
->dn_phys
->dn_nblkptr
);
397 for (int i
= start
; i
<= end
; i
++, bp
++) {
400 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
401 VERIFY0(dbuf_hold_impl(dn
, dnlevel
- 1, i
,
402 TRUE
, FALSE
, FTAG
, &db
));
403 rw_exit(&dn
->dn_struct_rwlock
);
404 free_children(db
, blkid
, nblks
, free_indirects
, tx
);
410 * Do not truncate the maxblkid if we are performing a raw
411 * receive. The raw receive sets the maxblkid manually and
412 * must not be overridden. Usually, the last DRR_FREE record
413 * will be at the maxblkid, because the source system sets
414 * the maxblkid when truncating. However, if the last block
415 * was freed by overwriting with zeros and being compressed
416 * away to a hole, the source system will generate a DRR_FREE
417 * record while leaving the maxblkid after the end of that
418 * record. In this case we need to leave the maxblkid as
419 * indicated in the DRR_OBJECT record, so that it matches the
420 * source system, ensuring that the cryptographic hashes will
423 if (trunc
&& !dn
->dn_objset
->os_raw_receive
) {
424 uint64_t off __maybe_unused
;
425 dn
->dn_phys
->dn_maxblkid
= blkid
== 0 ? 0 : blkid
- 1;
427 off
= (dn
->dn_phys
->dn_maxblkid
+ 1) *
428 (dn
->dn_phys
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
429 ASSERT(off
< dn
->dn_phys
->dn_maxblkid
||
430 dn
->dn_phys
->dn_maxblkid
== 0 ||
431 dnode_next_offset(dn
, 0, &off
, 1, 1, 0) != 0);
435 typedef struct dnode_sync_free_range_arg
{
436 dnode_t
*dsfra_dnode
;
438 boolean_t dsfra_free_indirects
;
439 } dnode_sync_free_range_arg_t
;
442 dnode_sync_free_range(void *arg
, uint64_t blkid
, uint64_t nblks
)
444 dnode_sync_free_range_arg_t
*dsfra
= arg
;
445 dnode_t
*dn
= dsfra
->dsfra_dnode
;
447 mutex_exit(&dn
->dn_mtx
);
448 dnode_sync_free_range_impl(dn
, blkid
, nblks
,
449 dsfra
->dsfra_free_indirects
, dsfra
->dsfra_tx
);
450 mutex_enter(&dn
->dn_mtx
);
454 * Try to kick all the dnode's dbufs out of the cache...
457 dnode_evict_dbufs(dnode_t
*dn
)
459 dmu_buf_impl_t
*db_marker
;
460 dmu_buf_impl_t
*db
, *db_next
;
462 db_marker
= kmem_alloc(sizeof (dmu_buf_impl_t
), KM_SLEEP
);
464 mutex_enter(&dn
->dn_dbufs_mtx
);
465 for (db
= avl_first(&dn
->dn_dbufs
); db
!= NULL
; db
= db_next
) {
469 ASSERT3P(DB_DNODE(db
), ==, dn
);
473 mutex_enter(&db
->db_mtx
);
474 if (db
->db_state
!= DB_EVICTING
&&
475 zfs_refcount_is_zero(&db
->db_holds
)) {
476 db_marker
->db_level
= db
->db_level
;
477 db_marker
->db_blkid
= db
->db_blkid
;
478 db_marker
->db_state
= DB_SEARCH
;
479 avl_insert_here(&dn
->dn_dbufs
, db_marker
, db
,
483 * We need to use the "marker" dbuf rather than
484 * simply getting the next dbuf, because
485 * dbuf_destroy() may actually remove multiple dbufs.
486 * It can call itself recursively on the parent dbuf,
487 * which may also be removed from dn_dbufs. The code
488 * flow would look like:
491 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
492 * if (!cacheable || pending_evict)
497 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db_marker
);
498 avl_remove(&dn
->dn_dbufs
, db_marker
);
500 db
->db_pending_evict
= TRUE
;
501 mutex_exit(&db
->db_mtx
);
502 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db
);
505 mutex_exit(&dn
->dn_dbufs_mtx
);
507 kmem_free(db_marker
, sizeof (dmu_buf_impl_t
));
509 dnode_evict_bonus(dn
);
513 dnode_evict_bonus(dnode_t
*dn
)
515 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
516 if (dn
->dn_bonus
!= NULL
) {
517 if (zfs_refcount_is_zero(&dn
->dn_bonus
->db_holds
)) {
518 mutex_enter(&dn
->dn_bonus
->db_mtx
);
519 dbuf_destroy(dn
->dn_bonus
);
522 dn
->dn_bonus
->db_pending_evict
= TRUE
;
525 rw_exit(&dn
->dn_struct_rwlock
);
529 dnode_undirty_dbufs(list_t
*list
)
531 dbuf_dirty_record_t
*dr
;
533 while ((dr
= list_head(list
))) {
534 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
535 uint64_t txg
= dr
->dr_txg
;
537 if (db
->db_level
!= 0)
538 dnode_undirty_dbufs(&dr
->dt
.di
.dr_children
);
540 mutex_enter(&db
->db_mtx
);
541 /* XXX - use dbuf_undirty()? */
542 list_remove(list
, dr
);
543 ASSERT(list_head(&db
->db_dirty_records
) == dr
);
544 list_remove_head(&db
->db_dirty_records
);
545 ASSERT(list_is_empty(&db
->db_dirty_records
));
546 db
->db_dirtycnt
-= 1;
547 if (db
->db_level
== 0) {
548 ASSERT(db
->db_blkid
== DMU_BONUS_BLKID
||
549 dr
->dt
.dl
.dr_data
== db
->db_buf
);
552 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
553 list_destroy(&dr
->dt
.di
.dr_children
);
555 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
556 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
, B_FALSE
);
561 dnode_sync_free(dnode_t
*dn
, dmu_tx_t
*tx
)
563 int txgoff
= tx
->tx_txg
& TXG_MASK
;
565 ASSERT(dmu_tx_is_syncing(tx
));
568 * Our contents should have been freed in dnode_sync() by the
569 * free range record inserted by the caller of dnode_free().
571 ASSERT0(DN_USED_BYTES(dn
->dn_phys
));
572 ASSERT(BP_IS_HOLE(dn
->dn_phys
->dn_blkptr
));
574 dnode_undirty_dbufs(&dn
->dn_dirty_records
[txgoff
]);
575 dnode_evict_dbufs(dn
);
578 * XXX - It would be nice to assert this, but we may still
579 * have residual holds from async evictions from the arc...
581 * zfs_obj_to_path() also depends on this being
584 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
587 /* Undirty next bits */
588 dn
->dn_next_nlevels
[txgoff
] = 0;
589 dn
->dn_next_indblkshift
[txgoff
] = 0;
590 dn
->dn_next_blksz
[txgoff
] = 0;
591 dn
->dn_next_maxblkid
[txgoff
] = 0;
593 /* ASSERT(blkptrs are zero); */
594 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
595 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
597 ASSERT(dn
->dn_free_txg
> 0);
598 if (dn
->dn_allocated_txg
!= dn
->dn_free_txg
)
599 dmu_buf_will_dirty(&dn
->dn_dbuf
->db
, tx
);
600 bzero(dn
->dn_phys
, sizeof (dnode_phys_t
) * dn
->dn_num_slots
);
601 dnode_free_interior_slots(dn
);
603 mutex_enter(&dn
->dn_mtx
);
604 dn
->dn_type
= DMU_OT_NONE
;
606 dn
->dn_allocated_txg
= 0;
608 dn
->dn_have_spill
= B_FALSE
;
609 dn
->dn_num_slots
= 1;
610 mutex_exit(&dn
->dn_mtx
);
612 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
614 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
616 * Now that we've released our hold, the dnode may
617 * be evicted, so we mustn't access it.
622 * Write out the dnode's dirty buffers.
625 dnode_sync(dnode_t
*dn
, dmu_tx_t
*tx
)
627 objset_t
*os
= dn
->dn_objset
;
628 dnode_phys_t
*dnp
= dn
->dn_phys
;
629 int txgoff
= tx
->tx_txg
& TXG_MASK
;
630 list_t
*list
= &dn
->dn_dirty_records
[txgoff
];
631 static const dnode_phys_t zerodn __maybe_unused
= { 0 };
632 boolean_t kill_spill
= B_FALSE
;
634 ASSERT(dmu_tx_is_syncing(tx
));
635 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
|| dn
->dn_allocated_txg
);
636 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
||
637 bcmp(dnp
, &zerodn
, DNODE_MIN_SIZE
) == 0);
640 ASSERT(dn
->dn_dbuf
== NULL
|| arc_released(dn
->dn_dbuf
->db_buf
));
643 * Do user accounting if it is enabled and this is not
644 * an encrypted receive.
646 if (dmu_objset_userused_enabled(os
) &&
647 !DMU_OBJECT_IS_SPECIAL(dn
->dn_object
) &&
648 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
649 mutex_enter(&dn
->dn_mtx
);
650 dn
->dn_oldused
= DN_USED_BYTES(dn
->dn_phys
);
651 dn
->dn_oldflags
= dn
->dn_phys
->dn_flags
;
652 dn
->dn_phys
->dn_flags
|= DNODE_FLAG_USERUSED_ACCOUNTED
;
653 if (dmu_objset_userobjused_enabled(dn
->dn_objset
))
654 dn
->dn_phys
->dn_flags
|=
655 DNODE_FLAG_USEROBJUSED_ACCOUNTED
;
656 mutex_exit(&dn
->dn_mtx
);
657 dmu_objset_userquota_get_ids(dn
, B_FALSE
, tx
);
659 /* Once we account for it, we should always account for it */
660 ASSERT(!(dn
->dn_phys
->dn_flags
&
661 DNODE_FLAG_USERUSED_ACCOUNTED
));
662 ASSERT(!(dn
->dn_phys
->dn_flags
&
663 DNODE_FLAG_USEROBJUSED_ACCOUNTED
));
666 mutex_enter(&dn
->dn_mtx
);
667 if (dn
->dn_allocated_txg
== tx
->tx_txg
) {
668 /* The dnode is newly allocated or reallocated */
669 if (dnp
->dn_type
== DMU_OT_NONE
) {
670 /* this is a first alloc, not a realloc */
672 dnp
->dn_nblkptr
= dn
->dn_nblkptr
;
675 dnp
->dn_type
= dn
->dn_type
;
676 dnp
->dn_bonustype
= dn
->dn_bonustype
;
677 dnp
->dn_bonuslen
= dn
->dn_bonuslen
;
680 dnp
->dn_extra_slots
= dn
->dn_num_slots
- 1;
682 ASSERT(dnp
->dn_nlevels
> 1 ||
683 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
684 BP_IS_EMBEDDED(&dnp
->dn_blkptr
[0]) ||
685 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) ==
686 dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
687 ASSERT(dnp
->dn_nlevels
< 2 ||
688 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
689 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) == 1 << dnp
->dn_indblkshift
);
691 if (dn
->dn_next_type
[txgoff
] != 0) {
692 dnp
->dn_type
= dn
->dn_type
;
693 dn
->dn_next_type
[txgoff
] = 0;
696 if (dn
->dn_next_blksz
[txgoff
] != 0) {
697 ASSERT(P2PHASE(dn
->dn_next_blksz
[txgoff
],
698 SPA_MINBLOCKSIZE
) == 0);
699 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
700 dn
->dn_maxblkid
== 0 || list_head(list
) != NULL
||
701 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
==
702 dnp
->dn_datablkszsec
||
703 !range_tree_is_empty(dn
->dn_free_ranges
[txgoff
]));
704 dnp
->dn_datablkszsec
=
705 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
;
706 dn
->dn_next_blksz
[txgoff
] = 0;
709 if (dn
->dn_next_bonuslen
[txgoff
] != 0) {
710 if (dn
->dn_next_bonuslen
[txgoff
] == DN_ZERO_BONUSLEN
)
711 dnp
->dn_bonuslen
= 0;
713 dnp
->dn_bonuslen
= dn
->dn_next_bonuslen
[txgoff
];
714 ASSERT(dnp
->dn_bonuslen
<=
715 DN_SLOTS_TO_BONUSLEN(dnp
->dn_extra_slots
+ 1));
716 dn
->dn_next_bonuslen
[txgoff
] = 0;
719 if (dn
->dn_next_bonustype
[txgoff
] != 0) {
720 ASSERT(DMU_OT_IS_VALID(dn
->dn_next_bonustype
[txgoff
]));
721 dnp
->dn_bonustype
= dn
->dn_next_bonustype
[txgoff
];
722 dn
->dn_next_bonustype
[txgoff
] = 0;
725 boolean_t freeing_dnode
= dn
->dn_free_txg
> 0 &&
726 dn
->dn_free_txg
<= tx
->tx_txg
;
729 * Remove the spill block if we have been explicitly asked to
730 * remove it, or if the object is being removed.
732 if (dn
->dn_rm_spillblk
[txgoff
] || freeing_dnode
) {
733 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)
735 dn
->dn_rm_spillblk
[txgoff
] = 0;
738 if (dn
->dn_next_indblkshift
[txgoff
] != 0) {
739 ASSERT(dnp
->dn_nlevels
== 1);
740 dnp
->dn_indblkshift
= dn
->dn_next_indblkshift
[txgoff
];
741 dn
->dn_next_indblkshift
[txgoff
] = 0;
745 * Just take the live (open-context) values for checksum and compress.
746 * Strictly speaking it's a future leak, but nothing bad happens if we
747 * start using the new checksum or compress algorithm a little early.
749 dnp
->dn_checksum
= dn
->dn_checksum
;
750 dnp
->dn_compress
= dn
->dn_compress
;
752 mutex_exit(&dn
->dn_mtx
);
755 free_blocks(dn
, DN_SPILL_BLKPTR(dn
->dn_phys
), 1, tx
);
756 mutex_enter(&dn
->dn_mtx
);
757 dnp
->dn_flags
&= ~DNODE_FLAG_SPILL_BLKPTR
;
758 mutex_exit(&dn
->dn_mtx
);
761 /* process all the "freed" ranges in the file */
762 if (dn
->dn_free_ranges
[txgoff
] != NULL
) {
763 dnode_sync_free_range_arg_t dsfra
;
764 dsfra
.dsfra_dnode
= dn
;
766 dsfra
.dsfra_free_indirects
= freeing_dnode
;
767 mutex_enter(&dn
->dn_mtx
);
769 ASSERT(range_tree_contains(dn
->dn_free_ranges
[txgoff
],
770 0, dn
->dn_maxblkid
+ 1));
773 * Because dnode_sync_free_range() must drop dn_mtx during its
774 * processing, using it as a callback to range_tree_vacate() is
775 * not safe. No other operations (besides destroy) are allowed
776 * once range_tree_vacate() has begun, and dropping dn_mtx
777 * would leave a window open for another thread to observe that
778 * invalid (and unsafe) state.
780 range_tree_walk(dn
->dn_free_ranges
[txgoff
],
781 dnode_sync_free_range
, &dsfra
);
782 range_tree_vacate(dn
->dn_free_ranges
[txgoff
], NULL
, NULL
);
783 range_tree_destroy(dn
->dn_free_ranges
[txgoff
]);
784 dn
->dn_free_ranges
[txgoff
] = NULL
;
785 mutex_exit(&dn
->dn_mtx
);
789 dn
->dn_objset
->os_freed_dnodes
++;
790 dnode_sync_free(dn
, tx
);
794 if (dn
->dn_num_slots
> DNODE_MIN_SLOTS
) {
795 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
796 mutex_enter(&ds
->ds_lock
);
797 ds
->ds_feature_activation
[SPA_FEATURE_LARGE_DNODE
] =
799 mutex_exit(&ds
->ds_lock
);
802 if (dn
->dn_next_nlevels
[txgoff
]) {
803 dnode_increase_indirection(dn
, tx
);
804 dn
->dn_next_nlevels
[txgoff
] = 0;
808 * This must be done after dnode_sync_free_range()
809 * and dnode_increase_indirection(). See dnode_new_blkid()
810 * for an explanation of the high bit being set.
812 if (dn
->dn_next_maxblkid
[txgoff
]) {
813 mutex_enter(&dn
->dn_mtx
);
815 dn
->dn_next_maxblkid
[txgoff
] & ~DMU_NEXT_MAXBLKID_SET
;
816 dn
->dn_next_maxblkid
[txgoff
] = 0;
817 mutex_exit(&dn
->dn_mtx
);
820 if (dn
->dn_next_nblkptr
[txgoff
]) {
821 /* this should only happen on a realloc */
822 ASSERT(dn
->dn_allocated_txg
== tx
->tx_txg
);
823 if (dn
->dn_next_nblkptr
[txgoff
] > dnp
->dn_nblkptr
) {
824 /* zero the new blkptrs we are gaining */
825 bzero(dnp
->dn_blkptr
+ dnp
->dn_nblkptr
,
827 (dn
->dn_next_nblkptr
[txgoff
] - dnp
->dn_nblkptr
));
831 ASSERT(dn
->dn_next_nblkptr
[txgoff
] < dnp
->dn_nblkptr
);
832 /* the blkptrs we are losing better be unallocated */
833 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
834 if (i
>= dn
->dn_next_nblkptr
[txgoff
])
835 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[i
]));
839 mutex_enter(&dn
->dn_mtx
);
840 dnp
->dn_nblkptr
= dn
->dn_next_nblkptr
[txgoff
];
841 dn
->dn_next_nblkptr
[txgoff
] = 0;
842 mutex_exit(&dn
->dn_mtx
);
845 dbuf_sync_list(list
, dn
->dn_phys
->dn_nlevels
- 1, tx
);
847 if (!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
)) {
848 ASSERT3P(list_head(list
), ==, NULL
);
849 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
853 * Although we have dropped our reference to the dnode, it
854 * can't be evicted until its written, and we haven't yet
855 * initiated the IO for the dnode's dbuf. Additionally, the caller
856 * has already added a reference to the dnode because it's on the
857 * os_synced_dnodes list.