4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 #include <sys/zfs_context.h>
30 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
36 #include <sys/range_tree.h>
37 #include <sys/zfeature.h>
40 dnode_increase_indirection(dnode_t
*dn
, dmu_tx_t
*tx
)
43 int txgoff
= tx
->tx_txg
& TXG_MASK
;
44 int nblkptr
= dn
->dn_phys
->dn_nblkptr
;
45 int old_toplvl
= dn
->dn_phys
->dn_nlevels
- 1;
46 int new_level
= dn
->dn_next_nlevels
[txgoff
];
49 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
51 /* this dnode can't be paged out because it's dirty */
52 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
53 ASSERT(RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
54 ASSERT(new_level
> 1 && dn
->dn_phys
->dn_nlevels
> 0);
56 db
= dbuf_hold_level(dn
, dn
->dn_phys
->dn_nlevels
, 0, FTAG
);
59 dn
->dn_phys
->dn_nlevels
= new_level
;
60 dprintf("os=%p obj=%llu, increase to %d\n", dn
->dn_objset
,
61 dn
->dn_object
, dn
->dn_phys
->dn_nlevels
);
63 /* transfer dnode's block pointers to new indirect block */
64 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
|DB_RF_HAVESTRUCT
);
65 ASSERT(db
->db
.db_data
);
66 ASSERT(arc_released(db
->db_buf
));
67 ASSERT3U(sizeof (blkptr_t
) * nblkptr
, <=, db
->db
.db_size
);
68 bcopy(dn
->dn_phys
->dn_blkptr
, db
->db
.db_data
,
69 sizeof (blkptr_t
) * nblkptr
);
70 arc_buf_freeze(db
->db_buf
);
72 /* set dbuf's parent pointers to new indirect buf */
73 for (i
= 0; i
< nblkptr
; i
++) {
74 dmu_buf_impl_t
*child
=
75 dbuf_find(dn
->dn_objset
, dn
->dn_object
, old_toplvl
, i
);
80 DB_DNODE_ENTER(child
);
81 ASSERT3P(DB_DNODE(child
), ==, dn
);
84 if (child
->db_parent
&& child
->db_parent
!= dn
->dn_dbuf
) {
85 ASSERT(child
->db_parent
->db_level
== db
->db_level
);
86 ASSERT(child
->db_blkptr
!=
87 &dn
->dn_phys
->dn_blkptr
[child
->db_blkid
]);
88 mutex_exit(&child
->db_mtx
);
91 ASSERT(child
->db_parent
== NULL
||
92 child
->db_parent
== dn
->dn_dbuf
);
94 child
->db_parent
= db
;
95 dbuf_add_ref(db
, child
);
97 child
->db_blkptr
= (blkptr_t
*)db
->db
.db_data
+ i
;
99 child
->db_blkptr
= NULL
;
100 dprintf_dbuf_bp(child
, child
->db_blkptr
,
101 "changed db_blkptr to new indirect %s", "");
103 mutex_exit(&child
->db_mtx
);
106 bzero(dn
->dn_phys
->dn_blkptr
, sizeof (blkptr_t
) * nblkptr
);
110 rw_exit(&dn
->dn_struct_rwlock
);
114 free_blocks(dnode_t
*dn
, blkptr_t
*bp
, int num
, dmu_tx_t
*tx
)
116 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
117 uint64_t bytesfreed
= 0;
119 dprintf("ds=%p obj=%llx num=%d\n", ds
, dn
->dn_object
, num
);
121 for (int i
= 0; i
< num
; i
++, bp
++) {
125 bytesfreed
+= dsl_dataset_block_kill(ds
, bp
, tx
, B_FALSE
);
126 ASSERT3U(bytesfreed
, <=, DN_USED_BYTES(dn
->dn_phys
));
129 * Save some useful information on the holes being
130 * punched, including logical size, type, and indirection
131 * level. Retaining birth time enables detection of when
132 * holes are punched for reducing the number of free
133 * records transmitted during a zfs send.
136 uint64_t lsize
= BP_GET_LSIZE(bp
);
137 dmu_object_type_t type
= BP_GET_TYPE(bp
);
138 uint64_t lvl
= BP_GET_LEVEL(bp
);
140 bzero(bp
, sizeof (blkptr_t
));
142 if (spa_feature_is_active(dn
->dn_objset
->os_spa
,
143 SPA_FEATURE_HOLE_BIRTH
)) {
144 BP_SET_LSIZE(bp
, lsize
);
145 BP_SET_TYPE(bp
, type
);
146 BP_SET_LEVEL(bp
, lvl
);
147 BP_SET_BIRTH(bp
, dmu_tx_get_txg(tx
), 0);
150 dnode_diduse_space(dn
, -bytesfreed
);
155 free_verify(dmu_buf_impl_t
*db
, uint64_t start
, uint64_t end
, dmu_tx_t
*tx
)
159 uint64_t txg
= tx
->tx_txg
;
164 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
165 off
= start
- (db
->db_blkid
* 1<<epbs
);
166 num
= end
- start
+ 1;
168 ASSERT3U(off
, >=, 0);
169 ASSERT3U(num
, >=, 0);
170 ASSERT3U(db
->db_level
, >, 0);
171 ASSERT3U(db
->db
.db_size
, ==, 1 << dn
->dn_phys
->dn_indblkshift
);
172 ASSERT3U(off
+num
, <=, db
->db
.db_size
>> SPA_BLKPTRSHIFT
);
173 ASSERT(db
->db_blkptr
!= NULL
);
175 for (i
= off
; i
< off
+num
; i
++) {
177 dmu_buf_impl_t
*child
;
178 dbuf_dirty_record_t
*dr
;
181 ASSERT(db
->db_level
== 1);
183 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
184 err
= dbuf_hold_impl(dn
, db
->db_level
-1,
185 (db
->db_blkid
<< epbs
) + i
, TRUE
, FALSE
, FTAG
, &child
);
186 rw_exit(&dn
->dn_struct_rwlock
);
190 ASSERT(child
->db_level
== 0);
191 dr
= child
->db_last_dirty
;
192 while (dr
&& dr
->dr_txg
> txg
)
194 ASSERT(dr
== NULL
|| dr
->dr_txg
== txg
);
196 /* data_old better be zeroed */
198 buf
= dr
->dt
.dl
.dr_data
->b_data
;
199 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
201 panic("freed data not zero: "
202 "child=%p i=%d off=%d num=%d\n",
203 (void *)child
, i
, off
, num
);
209 * db_data better be zeroed unless it's dirty in a
212 mutex_enter(&child
->db_mtx
);
213 buf
= child
->db
.db_data
;
214 if (buf
!= NULL
&& child
->db_state
!= DB_FILL
&&
215 child
->db_last_dirty
== NULL
) {
216 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
218 panic("freed data not zero: "
219 "child=%p i=%d off=%d num=%d\n",
220 (void *)child
, i
, off
, num
);
224 mutex_exit(&child
->db_mtx
);
226 dbuf_rele(child
, FTAG
);
233 free_children(dmu_buf_impl_t
*db
, uint64_t blkid
, uint64_t nblks
,
238 dmu_buf_impl_t
*subdb
;
239 uint64_t start
, end
, dbstart
, dbend
;
240 unsigned int epbs
, shift
, i
;
243 * There is a small possibility that this block will not be cached:
244 * 1 - if level > 1 and there are no children with level <= 1
245 * 2 - if this block was evicted since we read it from
246 * dmu_tx_hold_free().
248 if (db
->db_state
!= DB_CACHED
)
249 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
);
256 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
257 ASSERT3U(epbs
, <, 31);
258 shift
= (db
->db_level
- 1) * epbs
;
259 dbstart
= db
->db_blkid
<< epbs
;
260 start
= blkid
>> shift
;
261 if (dbstart
< start
) {
262 bp
+= start
- dbstart
;
266 dbend
= ((db
->db_blkid
+ 1) << epbs
) - 1;
267 end
= (blkid
+ nblks
- 1) >> shift
;
271 ASSERT3U(start
, <=, end
);
273 if (db
->db_level
== 1) {
274 FREE_VERIFY(db
, start
, end
, tx
);
275 free_blocks(dn
, bp
, end
-start
+1, tx
);
277 for (uint64_t id
= start
; id
<= end
; id
++, bp
++) {
280 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
281 VERIFY0(dbuf_hold_impl(dn
, db
->db_level
- 1,
282 id
, TRUE
, FALSE
, FTAG
, &subdb
));
283 rw_exit(&dn
->dn_struct_rwlock
);
284 ASSERT3P(bp
, ==, subdb
->db_blkptr
);
286 free_children(subdb
, blkid
, nblks
, tx
);
287 dbuf_rele(subdb
, FTAG
);
291 /* If this whole block is free, free ourself too. */
292 for (i
= 0, bp
= db
->db
.db_data
; i
< 1 << epbs
; i
++, bp
++) {
296 if (i
== 1 << epbs
) {
298 * We only found holes. Grab the rwlock to prevent
299 * anybody from reading the blocks we're about to
302 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
303 bzero(db
->db
.db_data
, db
->db
.db_size
);
304 rw_exit(&dn
->dn_struct_rwlock
);
305 free_blocks(dn
, db
->db_blkptr
, 1, tx
);
308 * Partial block free; must be marked dirty so that it
309 * will be written out.
311 ASSERT(db
->db_dirtycnt
> 0);
315 arc_buf_freeze(db
->db_buf
);
319 * Traverse the indicated range of the provided file
320 * and "free" all the blocks contained there.
323 dnode_sync_free_range_impl(dnode_t
*dn
, uint64_t blkid
, uint64_t nblks
,
326 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
327 int dnlevel
= dn
->dn_phys
->dn_nlevels
;
328 boolean_t trunc
= B_FALSE
;
330 if (blkid
> dn
->dn_phys
->dn_maxblkid
)
333 ASSERT(dn
->dn_phys
->dn_maxblkid
< UINT64_MAX
);
334 if (blkid
+ nblks
> dn
->dn_phys
->dn_maxblkid
) {
335 nblks
= dn
->dn_phys
->dn_maxblkid
- blkid
+ 1;
339 /* There are no indirect blocks in the object */
341 if (blkid
>= dn
->dn_phys
->dn_nblkptr
) {
342 /* this range was never made persistent */
345 ASSERT3U(blkid
+ nblks
, <=, dn
->dn_phys
->dn_nblkptr
);
346 free_blocks(dn
, bp
+ blkid
, nblks
, tx
);
348 int shift
= (dnlevel
- 1) *
349 (dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
);
350 int start
= blkid
>> shift
;
351 int end
= (blkid
+ nblks
- 1) >> shift
;
354 ASSERT(start
< dn
->dn_phys
->dn_nblkptr
);
356 for (int i
= start
; i
<= end
; i
++, bp
++) {
359 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
360 VERIFY0(dbuf_hold_impl(dn
, dnlevel
- 1, i
,
361 TRUE
, FALSE
, FTAG
, &db
));
362 rw_exit(&dn
->dn_struct_rwlock
);
364 free_children(db
, blkid
, nblks
, tx
);
370 dn
->dn_phys
->dn_maxblkid
= blkid
== 0 ? 0 : blkid
- 1;
372 uint64_t off
= (dn
->dn_phys
->dn_maxblkid
+ 1) *
373 (dn
->dn_phys
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
374 ASSERT(off
< dn
->dn_phys
->dn_maxblkid
||
375 dn
->dn_phys
->dn_maxblkid
== 0 ||
376 dnode_next_offset(dn
, 0, &off
, 1, 1, 0) != 0);
380 typedef struct dnode_sync_free_range_arg
{
381 dnode_t
*dsfra_dnode
;
383 } dnode_sync_free_range_arg_t
;
386 dnode_sync_free_range(void *arg
, uint64_t blkid
, uint64_t nblks
)
388 dnode_sync_free_range_arg_t
*dsfra
= arg
;
389 dnode_t
*dn
= dsfra
->dsfra_dnode
;
391 mutex_exit(&dn
->dn_mtx
);
392 dnode_sync_free_range_impl(dn
, blkid
, nblks
, dsfra
->dsfra_tx
);
393 mutex_enter(&dn
->dn_mtx
);
397 * Try to kick all the dnode's dbufs out of the cache...
400 dnode_evict_dbufs(dnode_t
*dn
)
402 dmu_buf_impl_t db_marker
;
403 dmu_buf_impl_t
*db
, *db_next
;
405 mutex_enter(&dn
->dn_dbufs_mtx
);
406 for (db
= avl_first(&dn
->dn_dbufs
); db
!= NULL
; db
= db_next
) {
410 ASSERT3P(DB_DNODE(db
), ==, dn
);
414 mutex_enter(&db
->db_mtx
);
415 if (db
->db_state
!= DB_EVICTING
&&
416 refcount_is_zero(&db
->db_holds
)) {
417 db_marker
.db_level
= db
->db_level
;
418 db_marker
.db_blkid
= db
->db_blkid
;
419 db_marker
.db_state
= DB_SEARCH
;
420 avl_insert_here(&dn
->dn_dbufs
, &db_marker
, db
,
425 db_next
= AVL_NEXT(&dn
->dn_dbufs
, &db_marker
);
426 avl_remove(&dn
->dn_dbufs
, &db_marker
);
428 db
->db_pending_evict
= TRUE
;
429 mutex_exit(&db
->db_mtx
);
430 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db
);
433 mutex_exit(&dn
->dn_dbufs_mtx
);
435 dnode_evict_bonus(dn
);
439 dnode_evict_bonus(dnode_t
*dn
)
441 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
442 if (dn
->dn_bonus
!= NULL
) {
443 if (refcount_is_zero(&dn
->dn_bonus
->db_holds
)) {
444 mutex_enter(&dn
->dn_bonus
->db_mtx
);
445 dbuf_destroy(dn
->dn_bonus
);
448 dn
->dn_bonus
->db_pending_evict
= TRUE
;
451 rw_exit(&dn
->dn_struct_rwlock
);
455 dnode_undirty_dbufs(list_t
*list
)
457 dbuf_dirty_record_t
*dr
;
459 while (dr
= list_head(list
)) {
460 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
461 uint64_t txg
= dr
->dr_txg
;
463 if (db
->db_level
!= 0)
464 dnode_undirty_dbufs(&dr
->dt
.di
.dr_children
);
466 mutex_enter(&db
->db_mtx
);
467 /* XXX - use dbuf_undirty()? */
468 list_remove(list
, dr
);
469 ASSERT(db
->db_last_dirty
== dr
);
470 db
->db_last_dirty
= NULL
;
471 db
->db_dirtycnt
-= 1;
472 if (db
->db_level
== 0) {
473 ASSERT(db
->db_blkid
== DMU_BONUS_BLKID
||
474 dr
->dt
.dl
.dr_data
== db
->db_buf
);
477 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
478 list_destroy(&dr
->dt
.di
.dr_children
);
480 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
481 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
);
486 dnode_sync_free(dnode_t
*dn
, dmu_tx_t
*tx
)
488 int txgoff
= tx
->tx_txg
& TXG_MASK
;
490 ASSERT(dmu_tx_is_syncing(tx
));
493 * Our contents should have been freed in dnode_sync() by the
494 * free range record inserted by the caller of dnode_free().
496 ASSERT0(DN_USED_BYTES(dn
->dn_phys
));
497 ASSERT(BP_IS_HOLE(dn
->dn_phys
->dn_blkptr
));
499 dnode_undirty_dbufs(&dn
->dn_dirty_records
[txgoff
]);
500 dnode_evict_dbufs(dn
);
503 * XXX - It would be nice to assert this, but we may still
504 * have residual holds from async evictions from the arc...
506 * zfs_obj_to_path() also depends on this being
509 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
512 /* Undirty next bits */
513 dn
->dn_next_nlevels
[txgoff
] = 0;
514 dn
->dn_next_indblkshift
[txgoff
] = 0;
515 dn
->dn_next_blksz
[txgoff
] = 0;
517 /* ASSERT(blkptrs are zero); */
518 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
519 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
521 ASSERT(dn
->dn_free_txg
> 0);
522 if (dn
->dn_allocated_txg
!= dn
->dn_free_txg
)
523 dmu_buf_will_dirty(&dn
->dn_dbuf
->db
, tx
);
524 bzero(dn
->dn_phys
, sizeof (dnode_phys_t
));
526 mutex_enter(&dn
->dn_mtx
);
527 dn
->dn_type
= DMU_OT_NONE
;
529 dn
->dn_allocated_txg
= 0;
531 dn
->dn_have_spill
= B_FALSE
;
532 mutex_exit(&dn
->dn_mtx
);
534 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
536 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
538 * Now that we've released our hold, the dnode may
539 * be evicted, so we musn't access it.
544 * Write out the dnode's dirty buffers.
547 dnode_sync(dnode_t
*dn
, dmu_tx_t
*tx
)
549 dnode_phys_t
*dnp
= dn
->dn_phys
;
550 int txgoff
= tx
->tx_txg
& TXG_MASK
;
551 list_t
*list
= &dn
->dn_dirty_records
[txgoff
];
552 static const dnode_phys_t zerodn
= { 0 };
553 boolean_t kill_spill
= B_FALSE
;
555 ASSERT(dmu_tx_is_syncing(tx
));
556 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
|| dn
->dn_allocated_txg
);
557 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
||
558 bcmp(dnp
, &zerodn
, DNODE_SIZE
) == 0);
561 ASSERT(dn
->dn_dbuf
== NULL
|| arc_released(dn
->dn_dbuf
->db_buf
));
563 if (dmu_objset_userused_enabled(dn
->dn_objset
) &&
564 !DMU_OBJECT_IS_SPECIAL(dn
->dn_object
)) {
565 mutex_enter(&dn
->dn_mtx
);
566 dn
->dn_oldused
= DN_USED_BYTES(dn
->dn_phys
);
567 dn
->dn_oldflags
= dn
->dn_phys
->dn_flags
;
568 dn
->dn_phys
->dn_flags
|= DNODE_FLAG_USERUSED_ACCOUNTED
;
569 mutex_exit(&dn
->dn_mtx
);
570 dmu_objset_userquota_get_ids(dn
, B_FALSE
, tx
);
572 /* Once we account for it, we should always account for it. */
573 ASSERT(!(dn
->dn_phys
->dn_flags
&
574 DNODE_FLAG_USERUSED_ACCOUNTED
));
577 mutex_enter(&dn
->dn_mtx
);
578 if (dn
->dn_allocated_txg
== tx
->tx_txg
) {
579 /* The dnode is newly allocated or reallocated */
580 if (dnp
->dn_type
== DMU_OT_NONE
) {
581 /* this is a first alloc, not a realloc */
583 dnp
->dn_nblkptr
= dn
->dn_nblkptr
;
586 dnp
->dn_type
= dn
->dn_type
;
587 dnp
->dn_bonustype
= dn
->dn_bonustype
;
588 dnp
->dn_bonuslen
= dn
->dn_bonuslen
;
590 ASSERT(dnp
->dn_nlevels
> 1 ||
591 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
592 BP_IS_EMBEDDED(&dnp
->dn_blkptr
[0]) ||
593 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) ==
594 dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
595 ASSERT(dnp
->dn_nlevels
< 2 ||
596 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
597 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) == 1 << dnp
->dn_indblkshift
);
599 if (dn
->dn_next_type
[txgoff
] != 0) {
600 dnp
->dn_type
= dn
->dn_type
;
601 dn
->dn_next_type
[txgoff
] = 0;
604 if (dn
->dn_next_blksz
[txgoff
] != 0) {
605 ASSERT(P2PHASE(dn
->dn_next_blksz
[txgoff
],
606 SPA_MINBLOCKSIZE
) == 0);
607 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
608 dn
->dn_maxblkid
== 0 || list_head(list
) != NULL
||
609 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
==
610 dnp
->dn_datablkszsec
||
611 range_tree_space(dn
->dn_free_ranges
[txgoff
]) != 0);
612 dnp
->dn_datablkszsec
=
613 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
;
614 dn
->dn_next_blksz
[txgoff
] = 0;
617 if (dn
->dn_next_bonuslen
[txgoff
] != 0) {
618 if (dn
->dn_next_bonuslen
[txgoff
] == DN_ZERO_BONUSLEN
)
619 dnp
->dn_bonuslen
= 0;
621 dnp
->dn_bonuslen
= dn
->dn_next_bonuslen
[txgoff
];
622 ASSERT(dnp
->dn_bonuslen
<= DN_MAX_BONUSLEN
);
623 dn
->dn_next_bonuslen
[txgoff
] = 0;
626 if (dn
->dn_next_bonustype
[txgoff
] != 0) {
627 ASSERT(DMU_OT_IS_VALID(dn
->dn_next_bonustype
[txgoff
]));
628 dnp
->dn_bonustype
= dn
->dn_next_bonustype
[txgoff
];
629 dn
->dn_next_bonustype
[txgoff
] = 0;
632 boolean_t freeing_dnode
= dn
->dn_free_txg
> 0 &&
633 dn
->dn_free_txg
<= tx
->tx_txg
;
636 * Remove the spill block if we have been explicitly asked to
637 * remove it, or if the object is being removed.
639 if (dn
->dn_rm_spillblk
[txgoff
] || freeing_dnode
) {
640 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)
642 dn
->dn_rm_spillblk
[txgoff
] = 0;
645 if (dn
->dn_next_indblkshift
[txgoff
] != 0) {
646 ASSERT(dnp
->dn_nlevels
== 1);
647 dnp
->dn_indblkshift
= dn
->dn_next_indblkshift
[txgoff
];
648 dn
->dn_next_indblkshift
[txgoff
] = 0;
652 * Just take the live (open-context) values for checksum and compress.
653 * Strictly speaking it's a future leak, but nothing bad happens if we
654 * start using the new checksum or compress algorithm a little early.
656 dnp
->dn_checksum
= dn
->dn_checksum
;
657 dnp
->dn_compress
= dn
->dn_compress
;
659 mutex_exit(&dn
->dn_mtx
);
662 free_blocks(dn
, &dn
->dn_phys
->dn_spill
, 1, tx
);
663 mutex_enter(&dn
->dn_mtx
);
664 dnp
->dn_flags
&= ~DNODE_FLAG_SPILL_BLKPTR
;
665 mutex_exit(&dn
->dn_mtx
);
668 /* process all the "freed" ranges in the file */
669 if (dn
->dn_free_ranges
[txgoff
] != NULL
) {
670 dnode_sync_free_range_arg_t dsfra
;
671 dsfra
.dsfra_dnode
= dn
;
673 mutex_enter(&dn
->dn_mtx
);
674 range_tree_vacate(dn
->dn_free_ranges
[txgoff
],
675 dnode_sync_free_range
, &dsfra
);
676 range_tree_destroy(dn
->dn_free_ranges
[txgoff
]);
677 dn
->dn_free_ranges
[txgoff
] = NULL
;
678 mutex_exit(&dn
->dn_mtx
);
682 dn
->dn_objset
->os_freed_dnodes
++;
683 dnode_sync_free(dn
, tx
);
687 if (dn
->dn_next_nlevels
[txgoff
]) {
688 dnode_increase_indirection(dn
, tx
);
689 dn
->dn_next_nlevels
[txgoff
] = 0;
692 if (dn
->dn_next_nblkptr
[txgoff
]) {
693 /* this should only happen on a realloc */
694 ASSERT(dn
->dn_allocated_txg
== tx
->tx_txg
);
695 if (dn
->dn_next_nblkptr
[txgoff
] > dnp
->dn_nblkptr
) {
696 /* zero the new blkptrs we are gaining */
697 bzero(dnp
->dn_blkptr
+ dnp
->dn_nblkptr
,
699 (dn
->dn_next_nblkptr
[txgoff
] - dnp
->dn_nblkptr
));
703 ASSERT(dn
->dn_next_nblkptr
[txgoff
] < dnp
->dn_nblkptr
);
704 /* the blkptrs we are losing better be unallocated */
705 for (i
= dn
->dn_next_nblkptr
[txgoff
];
706 i
< dnp
->dn_nblkptr
; i
++)
707 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[i
]));
710 mutex_enter(&dn
->dn_mtx
);
711 dnp
->dn_nblkptr
= dn
->dn_next_nblkptr
[txgoff
];
712 dn
->dn_next_nblkptr
[txgoff
] = 0;
713 mutex_exit(&dn
->dn_mtx
);
716 dbuf_sync_list(list
, dn
->dn_phys
->dn_nlevels
- 1, tx
);
718 if (!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
)) {
719 ASSERT3P(list_head(list
), ==, NULL
);
720 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
724 * Although we have dropped our reference to the dnode, it
725 * can't be evicted until its written, and we haven't yet
726 * initiated the IO for the dnode's dbuf.