Patrick Welche <prlw1@cam.ac.uk>
[netbsd-mini2440.git] / external / cddl / osnet / dist / uts / common / fs / zfs / dnode_sync.c
blob0b1a24e9086988c91b9218d460266cdbeb68b504
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
37 static void
38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
40 dmu_buf_impl_t *db;
41 int txgoff = tx->tx_txg & TXG_MASK;
42 int nblkptr = dn->dn_phys->dn_nblkptr;
43 int old_toplvl = dn->dn_phys->dn_nlevels - 1;
44 int new_level = dn->dn_next_nlevels[txgoff];
45 int i;
47 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
49 /* this dnode can't be paged out because it's dirty */
50 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
51 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
52 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
54 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
55 ASSERT(db != NULL);
57 dn->dn_phys->dn_nlevels = new_level;
58 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
59 dn->dn_object, dn->dn_phys->dn_nlevels);
61 /* check for existing blkptrs in the dnode */
62 for (i = 0; i < nblkptr; i++)
63 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
64 break;
65 if (i != nblkptr) {
66 /* transfer dnode's block pointers to new indirect block */
67 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
68 ASSERT(db->db.db_data);
69 ASSERT(arc_released(db->db_buf));
70 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
71 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
72 sizeof (blkptr_t) * nblkptr);
73 arc_buf_freeze(db->db_buf);
76 /* set dbuf's parent pointers to new indirect buf */
77 for (i = 0; i < nblkptr; i++) {
78 dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
80 if (child == NULL)
81 continue;
82 ASSERT3P(child->db_dnode, ==, dn);
83 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
84 ASSERT(child->db_parent->db_level == db->db_level);
85 ASSERT(child->db_blkptr !=
86 &dn->dn_phys->dn_blkptr[child->db_blkid]);
87 mutex_exit(&child->db_mtx);
88 continue;
90 ASSERT(child->db_parent == NULL ||
91 child->db_parent == dn->dn_dbuf);
93 child->db_parent = db;
94 dbuf_add_ref(db, child);
95 if (db->db.db_data)
96 child->db_blkptr = (blkptr_t *)db->db.db_data + i;
97 else
98 child->db_blkptr = NULL;
99 dprintf_dbuf_bp(child, child->db_blkptr,
100 "changed db_blkptr to new indirect %s", "");
102 mutex_exit(&child->db_mtx);
105 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
107 dbuf_rele(db, FTAG);
109 rw_exit(&dn->dn_struct_rwlock);
112 static int
113 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
115 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
116 uint64_t bytesfreed = 0;
117 int i, blocks_freed = 0;
119 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
121 for (i = 0; i < num; i++, bp++) {
122 if (BP_IS_HOLE(bp))
123 continue;
125 bytesfreed += dsl_dataset_block_kill(ds, bp, dn->dn_zio, tx);
126 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
127 bzero(bp, sizeof (blkptr_t));
128 blocks_freed += 1;
130 dnode_diduse_space(dn, -bytesfreed);
131 return (blocks_freed);
134 #ifdef ZFS_DEBUG
135 static void
136 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
138 int off, num;
139 int i, err, epbs;
140 uint64_t txg = tx->tx_txg;
142 epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
143 off = start - (db->db_blkid * 1<<epbs);
144 num = end - start + 1;
146 ASSERT3U(off, >=, 0);
147 ASSERT3U(num, >=, 0);
148 ASSERT3U(db->db_level, >, 0);
149 ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift);
150 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
151 ASSERT(db->db_blkptr != NULL);
153 for (i = off; i < off+num; i++) {
154 uint64_t *buf;
155 dmu_buf_impl_t *child;
156 dbuf_dirty_record_t *dr;
157 int j;
159 ASSERT(db->db_level == 1);
161 rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
162 err = dbuf_hold_impl(db->db_dnode, db->db_level-1,
163 (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
164 rw_exit(&db->db_dnode->dn_struct_rwlock);
165 if (err == ENOENT)
166 continue;
167 ASSERT(err == 0);
168 ASSERT(child->db_level == 0);
169 dr = child->db_last_dirty;
170 while (dr && dr->dr_txg > txg)
171 dr = dr->dr_next;
172 ASSERT(dr == NULL || dr->dr_txg == txg);
174 /* data_old better be zeroed */
175 if (dr) {
176 buf = dr->dt.dl.dr_data->b_data;
177 for (j = 0; j < child->db.db_size >> 3; j++) {
178 if (buf[j] != 0) {
179 panic("freed data not zero: "
180 "child=%p i=%d off=%d num=%d\n",
181 (void *)child, i, off, num);
187 * db_data better be zeroed unless it's dirty in a
188 * future txg.
190 mutex_enter(&child->db_mtx);
191 buf = child->db.db_data;
192 if (buf != NULL && child->db_state != DB_FILL &&
193 child->db_last_dirty == NULL) {
194 for (j = 0; j < child->db.db_size >> 3; j++) {
195 if (buf[j] != 0) {
196 panic("freed data not zero: "
197 "child=%p i=%d off=%d num=%d\n",
198 (void *)child, i, off, num);
202 mutex_exit(&child->db_mtx);
204 dbuf_rele(child, FTAG);
207 #endif
209 #define ALL -1
211 static int
212 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
213 dmu_tx_t *tx)
215 dnode_t *dn = db->db_dnode;
216 blkptr_t *bp;
217 dmu_buf_impl_t *subdb;
218 uint64_t start, end, dbstart, dbend, i;
219 int epbs, shift, err;
220 int all = TRUE;
221 int blocks_freed = 0;
224 * There is a small possibility that this block will not be cached:
225 * 1 - if level > 1 and there are no children with level <= 1
226 * 2 - if we didn't get a dirty hold (because this block had just
227 * finished being written -- and so had no holds), and then this
228 * block got evicted before we got here.
230 if (db->db_state != DB_CACHED)
231 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
233 arc_release(db->db_buf, db);
234 bp = (blkptr_t *)db->db.db_data;
236 epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
237 shift = (db->db_level - 1) * epbs;
238 dbstart = db->db_blkid << epbs;
239 start = blkid >> shift;
240 if (dbstart < start) {
241 bp += start - dbstart;
242 all = FALSE;
243 } else {
244 start = dbstart;
246 dbend = ((db->db_blkid + 1) << epbs) - 1;
247 end = (blkid + nblks - 1) >> shift;
248 if (dbend <= end)
249 end = dbend;
250 else if (all)
251 all = trunc;
252 ASSERT3U(start, <=, end);
254 if (db->db_level == 1) {
255 FREE_VERIFY(db, start, end, tx);
256 blocks_freed = free_blocks(dn, bp, end-start+1, tx);
257 arc_buf_freeze(db->db_buf);
258 ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
259 return (all ? ALL : blocks_freed);
262 for (i = start; i <= end; i++, bp++) {
263 if (BP_IS_HOLE(bp))
264 continue;
265 rw_enter(&dn->dn_struct_rwlock, RW_READER);
266 err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
267 ASSERT3U(err, ==, 0);
268 rw_exit(&dn->dn_struct_rwlock);
270 if (free_children(subdb, blkid, nblks, trunc, tx) == ALL) {
271 ASSERT3P(subdb->db_blkptr, ==, bp);
272 blocks_freed += free_blocks(dn, bp, 1, tx);
273 } else {
274 all = FALSE;
276 dbuf_rele(subdb, FTAG);
278 arc_buf_freeze(db->db_buf);
279 #ifdef ZFS_DEBUG
280 bp -= (end-start)+1;
281 for (i = start; i <= end; i++, bp++) {
282 if (i == start && blkid != 0)
283 continue;
284 else if (i == end && !trunc)
285 continue;
286 ASSERT3U(bp->blk_birth, ==, 0);
288 #endif
289 ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
290 return (all ? ALL : blocks_freed);
294 * free_range: Traverse the indicated range of the provided file
295 * and "free" all the blocks contained there.
297 static void
298 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
300 blkptr_t *bp = dn->dn_phys->dn_blkptr;
301 dmu_buf_impl_t *db;
302 int trunc, start, end, shift, i, err;
303 int dnlevel = dn->dn_phys->dn_nlevels;
305 if (blkid > dn->dn_phys->dn_maxblkid)
306 return;
308 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
309 trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
310 if (trunc)
311 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
313 /* There are no indirect blocks in the object */
314 if (dnlevel == 1) {
315 if (blkid >= dn->dn_phys->dn_nblkptr) {
316 /* this range was never made persistent */
317 return;
319 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
320 (void) free_blocks(dn, bp + blkid, nblks, tx);
321 if (trunc) {
322 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
323 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
324 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
325 ASSERT(off < dn->dn_phys->dn_maxblkid ||
326 dn->dn_phys->dn_maxblkid == 0 ||
327 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
329 return;
332 shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
333 start = blkid >> shift;
334 ASSERT(start < dn->dn_phys->dn_nblkptr);
335 end = (blkid + nblks - 1) >> shift;
336 bp += start;
337 for (i = start; i <= end; i++, bp++) {
338 if (BP_IS_HOLE(bp))
339 continue;
340 rw_enter(&dn->dn_struct_rwlock, RW_READER);
341 err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
342 ASSERT3U(err, ==, 0);
343 rw_exit(&dn->dn_struct_rwlock);
345 if (free_children(db, blkid, nblks, trunc, tx) == ALL) {
346 ASSERT3P(db->db_blkptr, ==, bp);
347 (void) free_blocks(dn, bp, 1, tx);
349 dbuf_rele(db, FTAG);
351 if (trunc) {
352 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
353 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
354 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
355 ASSERT(off < dn->dn_phys->dn_maxblkid ||
356 dn->dn_phys->dn_maxblkid == 0 ||
357 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
362 * Try to kick all the dnodes dbufs out of the cache...
364 void
365 dnode_evict_dbufs(dnode_t *dn)
367 int progress;
368 int pass = 0;
370 do {
371 dmu_buf_impl_t *db, marker;
372 int evicting = FALSE;
374 progress = FALSE;
375 mutex_enter(&dn->dn_dbufs_mtx);
376 list_insert_tail(&dn->dn_dbufs, &marker);
377 db = list_head(&dn->dn_dbufs);
378 for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
379 list_remove(&dn->dn_dbufs, db);
380 list_insert_tail(&dn->dn_dbufs, db);
381 ASSERT3P(db->db_dnode, ==, dn);
383 mutex_enter(&db->db_mtx);
384 if (db->db_state == DB_EVICTING) {
385 progress = TRUE;
386 evicting = TRUE;
387 mutex_exit(&db->db_mtx);
388 } else if (refcount_is_zero(&db->db_holds)) {
389 progress = TRUE;
390 dbuf_clear(db); /* exits db_mtx for us */
391 } else {
392 mutex_exit(&db->db_mtx);
396 list_remove(&dn->dn_dbufs, &marker);
398 * NB: we need to drop dn_dbufs_mtx between passes so
399 * that any DB_EVICTING dbufs can make progress.
400 * Ideally, we would have some cv we could wait on, but
401 * since we don't, just wait a bit to give the other
402 * thread a chance to run.
404 mutex_exit(&dn->dn_dbufs_mtx);
405 if (evicting)
406 delay(1);
407 pass++;
408 ASSERT(pass < 100); /* sanity check */
409 } while (progress);
411 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
412 if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
413 mutex_enter(&dn->dn_bonus->db_mtx);
414 dbuf_evict(dn->dn_bonus);
415 dn->dn_bonus = NULL;
417 rw_exit(&dn->dn_struct_rwlock);
420 static void
421 dnode_undirty_dbufs(list_t *list)
423 dbuf_dirty_record_t *dr;
425 while (dr = list_head(list)) {
426 dmu_buf_impl_t *db = dr->dr_dbuf;
427 uint64_t txg = dr->dr_txg;
429 mutex_enter(&db->db_mtx);
430 /* XXX - use dbuf_undirty()? */
431 list_remove(list, dr);
432 ASSERT(db->db_last_dirty == dr);
433 db->db_last_dirty = NULL;
434 db->db_dirtycnt -= 1;
435 if (db->db_level == 0) {
436 ASSERT(db->db_blkid == DB_BONUS_BLKID ||
437 dr->dt.dl.dr_data == db->db_buf);
438 dbuf_unoverride(dr);
439 mutex_exit(&db->db_mtx);
440 } else {
441 mutex_exit(&db->db_mtx);
442 mutex_destroy(&dr->dt.di.dr_mtx);
443 dnode_undirty_dbufs(&dr->dt.di.dr_children);
445 kmem_free(dr, sizeof (dbuf_dirty_record_t));
446 dbuf_rele(db, (void *)(uintptr_t)txg);
450 static void
451 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
453 int txgoff = tx->tx_txg & TXG_MASK;
455 ASSERT(dmu_tx_is_syncing(tx));
458 * Our contents should have been freed in dnode_sync() by the
459 * free range record inserted by the caller of dnode_free().
461 ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
462 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
464 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
465 dnode_evict_dbufs(dn);
466 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
469 * XXX - It would be nice to assert this, but we may still
470 * have residual holds from async evictions from the arc...
472 * zfs_obj_to_path() also depends on this being
473 * commented out.
475 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
478 /* Undirty next bits */
479 dn->dn_next_nlevels[txgoff] = 0;
480 dn->dn_next_indblkshift[txgoff] = 0;
481 dn->dn_next_blksz[txgoff] = 0;
483 /* ASSERT(blkptrs are zero); */
484 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
485 ASSERT(dn->dn_type != DMU_OT_NONE);
487 ASSERT(dn->dn_free_txg > 0);
488 if (dn->dn_allocated_txg != dn->dn_free_txg)
489 dbuf_will_dirty(dn->dn_dbuf, tx);
490 bzero(dn->dn_phys, sizeof (dnode_phys_t));
492 mutex_enter(&dn->dn_mtx);
493 dn->dn_type = DMU_OT_NONE;
494 dn->dn_maxblkid = 0;
495 dn->dn_allocated_txg = 0;
496 dn->dn_free_txg = 0;
497 mutex_exit(&dn->dn_mtx);
499 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
501 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
503 * Now that we've released our hold, the dnode may
504 * be evicted, so we musn't access it.
509 * Write out the dnode's dirty buffers.
511 * NOTE: The dnode is kept in memory by being dirty. Once the
512 * dirty bit is cleared, it may be evicted. Beware of this!
514 void
515 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
517 free_range_t *rp;
518 dnode_phys_t *dnp = dn->dn_phys;
519 int txgoff = tx->tx_txg & TXG_MASK;
520 list_t *list = &dn->dn_dirty_records[txgoff];
522 ASSERT(dmu_tx_is_syncing(tx));
523 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
524 DNODE_VERIFY(dn);
526 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
528 mutex_enter(&dn->dn_mtx);
529 if (dn->dn_allocated_txg == tx->tx_txg) {
530 /* The dnode is newly allocated or reallocated */
531 if (dnp->dn_type == DMU_OT_NONE) {
532 /* this is a first alloc, not a realloc */
533 /* XXX shouldn't the phys already be zeroed? */
534 bzero(dnp, DNODE_CORE_SIZE);
535 dnp->dn_nlevels = 1;
538 if (dn->dn_nblkptr > dnp->dn_nblkptr) {
539 /* zero the new blkptrs we are gaining */
540 bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
541 sizeof (blkptr_t) *
542 (dn->dn_nblkptr - dnp->dn_nblkptr));
544 dnp->dn_type = dn->dn_type;
545 dnp->dn_bonustype = dn->dn_bonustype;
546 dnp->dn_bonuslen = dn->dn_bonuslen;
547 dnp->dn_nblkptr = dn->dn_nblkptr;
550 ASSERT(dnp->dn_nlevels > 1 ||
551 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
552 BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
553 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
555 if (dn->dn_next_blksz[txgoff]) {
556 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
557 SPA_MINBLOCKSIZE) == 0);
558 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
559 dn->dn_maxblkid == 0 || list_head(list) != NULL ||
560 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
561 dnp->dn_datablkszsec);
562 dnp->dn_datablkszsec =
563 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
564 dn->dn_next_blksz[txgoff] = 0;
567 if (dn->dn_next_bonuslen[txgoff]) {
568 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
569 dnp->dn_bonuslen = 0;
570 else
571 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
572 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
573 dn->dn_next_bonuslen[txgoff] = 0;
576 if (dn->dn_next_indblkshift[txgoff]) {
577 ASSERT(dnp->dn_nlevels == 1);
578 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
579 dn->dn_next_indblkshift[txgoff] = 0;
583 * Just take the live (open-context) values for checksum and compress.
584 * Strictly speaking it's a future leak, but nothing bad happens if we
585 * start using the new checksum or compress algorithm a little early.
587 dnp->dn_checksum = dn->dn_checksum;
588 dnp->dn_compress = dn->dn_compress;
590 mutex_exit(&dn->dn_mtx);
592 /* process all the "freed" ranges in the file */
593 while (rp = avl_last(&dn->dn_ranges[txgoff])) {
594 dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx);
595 /* grab the mutex so we don't race with dnode_block_freed() */
596 mutex_enter(&dn->dn_mtx);
597 avl_remove(&dn->dn_ranges[txgoff], rp);
598 mutex_exit(&dn->dn_mtx);
599 kmem_free(rp, sizeof (free_range_t));
602 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
603 dnode_sync_free(dn, tx);
604 return;
607 if (dn->dn_next_nlevels[txgoff]) {
608 dnode_increase_indirection(dn, tx);
609 dn->dn_next_nlevels[txgoff] = 0;
612 dbuf_sync_list(list, tx);
614 if (dn->dn_object != DMU_META_DNODE_OBJECT) {
615 ASSERT3P(list_head(list), ==, NULL);
616 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
620 * Although we have dropped our reference to the dnode, it
621 * can't be evicted until its written, and we haven't yet
622 * initiated the IO for the dnode's dbuf.