Patrick Welche <prlw1@cam.ac.uk>
[netbsd-mini2440.git] / external / cddl / osnet / dist / uts / common / fs / zfs / dnode.c
blob646667be3fff2a652e9993132cec6df06e87386b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/dbuf.h>
28 #include <sys/dnode.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 #include <sys/zio.h>
37 #include <sys/dmu_zfetch.h>
39 static int free_range_compar(const void *node1, const void *node2);
41 static kmem_cache_t *dnode_cache;
43 static dnode_phys_t dnode_phys_zero;
45 int zfs_default_bs = SPA_MINBLOCKSHIFT;
46 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
48 /* ARGSUSED */
49 static int
50 dnode_cons(void *arg, void *unused, int kmflag)
52 int i;
53 dnode_t *dn = unused;
54 bzero(dn, sizeof (dnode_t));
56 rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
57 mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
58 mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
59 cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
60 refcount_create(&dn->dn_holds);
61 refcount_create(&dn->dn_tx_holds);
63 for (i = 0; i < TXG_SIZE; i++) {
64 avl_create(&dn->dn_ranges[i], free_range_compar,
65 sizeof (free_range_t),
66 offsetof(struct free_range, fr_node));
67 list_create(&dn->dn_dirty_records[i],
68 sizeof (dbuf_dirty_record_t),
69 offsetof(dbuf_dirty_record_t, dr_dirty_node));
72 list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t),
73 offsetof(dmu_buf_impl_t, db_link));
75 return (0);
78 /* ARGSUSED */
79 static void
80 dnode_dest(void *arg, void *unused)
82 int i;
83 dnode_t *dn = unused;
85 rw_destroy(&dn->dn_struct_rwlock);
86 mutex_destroy(&dn->dn_mtx);
87 mutex_destroy(&dn->dn_dbufs_mtx);
88 cv_destroy(&dn->dn_notxholds);
89 refcount_destroy(&dn->dn_holds);
90 refcount_destroy(&dn->dn_tx_holds);
92 for (i = 0; i < TXG_SIZE; i++) {
93 avl_destroy(&dn->dn_ranges[i]);
94 list_destroy(&dn->dn_dirty_records[i]);
97 list_destroy(&dn->dn_dbufs);
100 void
101 dnode_init(void)
103 dnode_cache = kmem_cache_create("dnode_t",
104 sizeof (dnode_t),
105 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
108 void
109 dnode_fini(void)
111 kmem_cache_destroy(dnode_cache);
115 #ifdef ZFS_DEBUG
116 void
117 dnode_verify(dnode_t *dn)
119 int drop_struct_lock = FALSE;
121 ASSERT(dn->dn_phys);
122 ASSERT(dn->dn_objset);
124 ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES);
126 if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
127 return;
129 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
130 rw_enter(&dn->dn_struct_rwlock, RW_READER);
131 drop_struct_lock = TRUE;
133 if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
134 int i;
135 ASSERT3U(dn->dn_indblkshift, >=, 0);
136 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
137 if (dn->dn_datablkshift) {
138 ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
139 ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
140 ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
142 ASSERT3U(dn->dn_nlevels, <=, 30);
143 ASSERT3U(dn->dn_type, <=, DMU_OT_NUMTYPES);
144 ASSERT3U(dn->dn_nblkptr, >=, 1);
145 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
146 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
147 ASSERT3U(dn->dn_datablksz, ==,
148 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
149 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
150 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
151 dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
152 for (i = 0; i < TXG_SIZE; i++) {
153 ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
156 if (dn->dn_phys->dn_type != DMU_OT_NONE)
157 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
158 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || dn->dn_dbuf != NULL);
159 if (dn->dn_dbuf != NULL) {
160 ASSERT3P(dn->dn_phys, ==,
161 (dnode_phys_t *)dn->dn_dbuf->db.db_data +
162 (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
164 if (drop_struct_lock)
165 rw_exit(&dn->dn_struct_rwlock);
167 #endif
169 void
170 dnode_byteswap(dnode_phys_t *dnp)
172 uint64_t *buf64 = (void*)&dnp->dn_blkptr;
173 int i;
175 if (dnp->dn_type == DMU_OT_NONE) {
176 bzero(dnp, sizeof (dnode_phys_t));
177 return;
180 dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
181 dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
182 dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
183 dnp->dn_used = BSWAP_64(dnp->dn_used);
186 * dn_nblkptr is only one byte, so it's OK to read it in either
187 * byte order. We can't read dn_bouslen.
189 ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
190 ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
191 for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
192 buf64[i] = BSWAP_64(buf64[i]);
195 * OK to check dn_bonuslen for zero, because it won't matter if
196 * we have the wrong byte order. This is necessary because the
197 * dnode dnode is smaller than a regular dnode.
199 if (dnp->dn_bonuslen != 0) {
201 * Note that the bonus length calculated here may be
202 * longer than the actual bonus buffer. This is because
203 * we always put the bonus buffer after the last block
204 * pointer (instead of packing it against the end of the
205 * dnode buffer).
207 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
208 size_t len = DN_MAX_BONUSLEN - off;
209 ASSERT3U(dnp->dn_bonustype, <, DMU_OT_NUMTYPES);
210 dmu_ot[dnp->dn_bonustype].ot_byteswap(dnp->dn_bonus + off, len);
214 void
215 dnode_buf_byteswap(void *vbuf, size_t size)
217 dnode_phys_t *buf = vbuf;
218 int i;
220 ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
221 ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
223 size >>= DNODE_SHIFT;
224 for (i = 0; i < size; i++) {
225 dnode_byteswap(buf);
226 buf++;
230 static int
231 free_range_compar(const void *node1, const void *node2)
233 const free_range_t *rp1 = node1;
234 const free_range_t *rp2 = node2;
236 if (rp1->fr_blkid < rp2->fr_blkid)
237 return (-1);
238 else if (rp1->fr_blkid > rp2->fr_blkid)
239 return (1);
240 else return (0);
243 void
244 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
246 ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
248 dnode_setdirty(dn, tx);
249 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
250 ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
251 (dn->dn_nblkptr-1) * sizeof (blkptr_t));
252 dn->dn_bonuslen = newsize;
253 if (newsize == 0)
254 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
255 else
256 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
257 rw_exit(&dn->dn_struct_rwlock);
260 static void
261 dnode_setdblksz(dnode_t *dn, int size)
263 ASSERT3U(P2PHASE(size, SPA_MINBLOCKSIZE), ==, 0);
264 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
265 ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
266 ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
267 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
268 dn->dn_datablksz = size;
269 dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
270 dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0;
273 static dnode_t *
274 dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
275 uint64_t object)
277 dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
278 // (void) dnode_cons(dn, NULL, 0); /* XXX */
280 dn->dn_objset = os;
281 dn->dn_object = object;
282 dn->dn_dbuf = db;
283 dn->dn_phys = dnp;
285 if (dnp->dn_datablkszsec)
286 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
287 dn->dn_indblkshift = dnp->dn_indblkshift;
288 dn->dn_nlevels = dnp->dn_nlevels;
289 dn->dn_type = dnp->dn_type;
290 dn->dn_nblkptr = dnp->dn_nblkptr;
291 dn->dn_checksum = dnp->dn_checksum;
292 dn->dn_compress = dnp->dn_compress;
293 dn->dn_bonustype = dnp->dn_bonustype;
294 dn->dn_bonuslen = dnp->dn_bonuslen;
295 dn->dn_maxblkid = dnp->dn_maxblkid;
297 dmu_zfetch_init(&dn->dn_zfetch, dn);
299 ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES);
300 mutex_enter(&os->os_lock);
301 list_insert_head(&os->os_dnodes, dn);
302 mutex_exit(&os->os_lock);
304 arc_space_consume(sizeof (dnode_t));
305 return (dn);
308 static void
309 dnode_destroy(dnode_t *dn)
311 objset_impl_t *os = dn->dn_objset;
313 #ifdef ZFS_DEBUG
314 int i;
316 for (i = 0; i < TXG_SIZE; i++) {
317 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
318 ASSERT(NULL == list_head(&dn->dn_dirty_records[i]));
319 ASSERT(0 == avl_numnodes(&dn->dn_ranges[i]));
321 ASSERT(NULL == list_head(&dn->dn_dbufs));
322 #endif
324 mutex_enter(&os->os_lock);
325 list_remove(&os->os_dnodes, dn);
326 mutex_exit(&os->os_lock);
328 if (dn->dn_dirtyctx_firstset) {
329 kmem_free(dn->dn_dirtyctx_firstset, 1);
330 dn->dn_dirtyctx_firstset = NULL;
332 dmu_zfetch_rele(&dn->dn_zfetch);
333 if (dn->dn_bonus) {
334 mutex_enter(&dn->dn_bonus->db_mtx);
335 dbuf_evict(dn->dn_bonus);
336 dn->dn_bonus = NULL;
338 kmem_cache_free(dnode_cache, dn);
339 arc_space_return(sizeof (dnode_t));
342 void
343 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
344 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
346 int i;
348 if (blocksize == 0)
349 blocksize = 1 << zfs_default_bs;
350 else if (blocksize > SPA_MAXBLOCKSIZE)
351 blocksize = SPA_MAXBLOCKSIZE;
352 else
353 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
355 if (ibs == 0)
356 ibs = zfs_default_ibs;
358 ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
360 dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
361 dn->dn_object, tx->tx_txg, blocksize, ibs);
363 ASSERT(dn->dn_type == DMU_OT_NONE);
364 ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
365 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
366 ASSERT(ot != DMU_OT_NONE);
367 ASSERT3U(ot, <, DMU_OT_NUMTYPES);
368 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
369 (bonustype != DMU_OT_NONE && bonuslen != 0));
370 ASSERT3U(bonustype, <, DMU_OT_NUMTYPES);
371 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
372 ASSERT(dn->dn_type == DMU_OT_NONE);
373 ASSERT3U(dn->dn_maxblkid, ==, 0);
374 ASSERT3U(dn->dn_allocated_txg, ==, 0);
375 ASSERT3U(dn->dn_assigned_txg, ==, 0);
376 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
377 ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
378 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
380 for (i = 0; i < TXG_SIZE; i++) {
381 ASSERT3U(dn->dn_next_nlevels[i], ==, 0);
382 ASSERT3U(dn->dn_next_indblkshift[i], ==, 0);
383 ASSERT3U(dn->dn_next_bonuslen[i], ==, 0);
384 ASSERT3U(dn->dn_next_blksz[i], ==, 0);
385 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
386 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
387 ASSERT3U(avl_numnodes(&dn->dn_ranges[i]), ==, 0);
390 dn->dn_type = ot;
391 dnode_setdblksz(dn, blocksize);
392 dn->dn_indblkshift = ibs;
393 dn->dn_nlevels = 1;
394 dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
395 dn->dn_bonustype = bonustype;
396 dn->dn_bonuslen = bonuslen;
397 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
398 dn->dn_compress = ZIO_COMPRESS_INHERIT;
399 dn->dn_dirtyctx = 0;
401 dn->dn_free_txg = 0;
402 if (dn->dn_dirtyctx_firstset) {
403 kmem_free(dn->dn_dirtyctx_firstset, 1);
404 dn->dn_dirtyctx_firstset = NULL;
407 dn->dn_allocated_txg = tx->tx_txg;
409 dnode_setdirty(dn, tx);
410 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
411 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
412 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
415 void
416 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
417 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
419 int i, old_nblkptr;
420 dmu_buf_impl_t *db = NULL;
422 ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
423 ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE);
424 ASSERT3U(blocksize % SPA_MINBLOCKSIZE, ==, 0);
425 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
426 ASSERT(tx->tx_txg != 0);
427 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
428 (bonustype != DMU_OT_NONE && bonuslen != 0));
429 ASSERT3U(bonustype, <, DMU_OT_NUMTYPES);
430 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
432 for (i = 0; i < TXG_SIZE; i++)
433 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
435 /* clean up any unreferenced dbufs */
436 dnode_evict_dbufs(dn);
437 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
440 * XXX I should really have a generation number to tell if we
441 * need to do this...
443 if (blocksize != dn->dn_datablksz ||
444 dn->dn_bonustype != bonustype || dn->dn_bonuslen != bonuslen) {
445 /* free all old data */
446 dnode_free_range(dn, 0, -1ULL, tx);
449 /* change blocksize */
450 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
451 if (blocksize != dn->dn_datablksz &&
452 (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
453 list_head(&dn->dn_dbufs) != NULL)) {
454 db = dbuf_hold(dn, 0, FTAG);
455 dbuf_new_size(db, blocksize, tx);
457 dnode_setdblksz(dn, blocksize);
458 dnode_setdirty(dn, tx);
459 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
460 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
461 rw_exit(&dn->dn_struct_rwlock);
462 if (db)
463 dbuf_rele(db, FTAG);
465 /* change type */
466 dn->dn_type = ot;
468 /* change bonus size and type */
469 mutex_enter(&dn->dn_mtx);
470 old_nblkptr = dn->dn_nblkptr;
471 dn->dn_bonustype = bonustype;
472 dn->dn_bonuslen = bonuslen;
473 dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
474 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
475 dn->dn_compress = ZIO_COMPRESS_INHERIT;
476 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
478 /* XXX - for now, we can't make nblkptr smaller */
479 ASSERT3U(dn->dn_nblkptr, >=, old_nblkptr);
481 /* fix up the bonus db_size if dn_nblkptr has changed */
482 if (dn->dn_bonus && dn->dn_bonuslen != old_nblkptr) {
483 dn->dn_bonus->db.db_size =
484 DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
485 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
488 dn->dn_allocated_txg = tx->tx_txg;
489 mutex_exit(&dn->dn_mtx);
492 void
493 dnode_special_close(dnode_t *dn)
496 * Wait for final references to the dnode to clear. This can
497 * only happen if the arc is asyncronously evicting state that
498 * has a hold on this dnode while we are trying to evict this
499 * dnode.
501 while (refcount_count(&dn->dn_holds) > 0)
502 delay(1);
503 dnode_destroy(dn);
506 dnode_t *
507 dnode_special_open(objset_impl_t *os, dnode_phys_t *dnp, uint64_t object)
509 dnode_t *dn = dnode_create(os, dnp, NULL, object);
510 DNODE_VERIFY(dn);
511 return (dn);
514 static void
515 dnode_buf_pageout(dmu_buf_t *db, void *arg)
517 dnode_t **children_dnodes = arg;
518 int i;
519 int epb = db->db_size >> DNODE_SHIFT;
521 for (i = 0; i < epb; i++) {
522 dnode_t *dn = children_dnodes[i];
523 int n;
525 if (dn == NULL)
526 continue;
527 #ifdef ZFS_DEBUG
529 * If there are holds on this dnode, then there should
530 * be holds on the dnode's containing dbuf as well; thus
531 * it wouldn't be eligable for eviction and this function
532 * would not have been called.
534 ASSERT(refcount_is_zero(&dn->dn_holds));
535 ASSERT(list_head(&dn->dn_dbufs) == NULL);
536 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
538 for (n = 0; n < TXG_SIZE; n++)
539 ASSERT(!list_link_active(&dn->dn_dirty_link[n]));
540 #endif
541 children_dnodes[i] = NULL;
542 dnode_destroy(dn);
544 kmem_free(children_dnodes, epb * sizeof (dnode_t *));
548 * errors:
549 * EINVAL - invalid object number.
550 * EIO - i/o error.
551 * succeeds even for free dnodes.
554 dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag,
555 void *tag, dnode_t **dnp)
557 int epb, idx, err;
558 int drop_struct_lock = FALSE;
559 int type;
560 uint64_t blk;
561 dnode_t *mdn, *dn;
562 dmu_buf_impl_t *db;
563 dnode_t **children_dnodes;
566 * If you are holding the spa config lock as writer, you shouldn't
567 * be asking the DMU to do *anything*.
569 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0);
571 if (object == 0 || object >= DN_MAX_OBJECT)
572 return (EINVAL);
574 mdn = os->os_meta_dnode;
576 DNODE_VERIFY(mdn);
578 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
579 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
580 drop_struct_lock = TRUE;
583 blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t));
585 db = dbuf_hold(mdn, blk, FTAG);
586 if (drop_struct_lock)
587 rw_exit(&mdn->dn_struct_rwlock);
588 if (db == NULL)
589 return (EIO);
590 err = dbuf_read(db, NULL, DB_RF_CANFAIL);
591 if (err) {
592 dbuf_rele(db, FTAG);
593 return (err);
596 ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
597 epb = db->db.db_size >> DNODE_SHIFT;
599 idx = object & (epb-1);
601 children_dnodes = dmu_buf_get_user(&db->db);
602 if (children_dnodes == NULL) {
603 dnode_t **winner;
604 children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *),
605 KM_SLEEP);
606 if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
607 dnode_buf_pageout)) {
608 kmem_free(children_dnodes, epb * sizeof (dnode_t *));
609 children_dnodes = winner;
613 if ((dn = children_dnodes[idx]) == NULL) {
614 dnode_phys_t *dnp = (dnode_phys_t *)db->db.db_data+idx;
615 dnode_t *winner;
617 dn = dnode_create(os, dnp, db, object);
618 winner = atomic_cas_ptr(&children_dnodes[idx], NULL, dn);
619 if (winner != NULL) {
620 dnode_destroy(dn);
621 dn = winner;
625 mutex_enter(&dn->dn_mtx);
626 type = dn->dn_type;
627 if (dn->dn_free_txg ||
628 ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
629 ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)) {
630 mutex_exit(&dn->dn_mtx);
631 dbuf_rele(db, FTAG);
632 return (type == DMU_OT_NONE ? ENOENT : EEXIST);
634 mutex_exit(&dn->dn_mtx);
636 if (refcount_add(&dn->dn_holds, tag) == 1)
637 dbuf_add_ref(db, dn);
639 DNODE_VERIFY(dn);
640 ASSERT3P(dn->dn_dbuf, ==, db);
641 ASSERT3U(dn->dn_object, ==, object);
642 dbuf_rele(db, FTAG);
644 *dnp = dn;
645 return (0);
649 * Return held dnode if the object is allocated, NULL if not.
652 dnode_hold(objset_impl_t *os, uint64_t object, void *tag, dnode_t **dnp)
654 return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
658 * Can only add a reference if there is already at least one
659 * reference on the dnode. Returns FALSE if unable to add a
660 * new reference.
662 boolean_t
663 dnode_add_ref(dnode_t *dn, void *tag)
665 mutex_enter(&dn->dn_mtx);
666 if (refcount_is_zero(&dn->dn_holds)) {
667 mutex_exit(&dn->dn_mtx);
668 return (FALSE);
670 VERIFY(1 < refcount_add(&dn->dn_holds, tag));
671 mutex_exit(&dn->dn_mtx);
672 return (TRUE);
675 void
676 dnode_rele(dnode_t *dn, void *tag)
678 uint64_t refs;
680 mutex_enter(&dn->dn_mtx);
681 refs = refcount_remove(&dn->dn_holds, tag);
682 mutex_exit(&dn->dn_mtx);
683 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
684 if (refs == 0 && dn->dn_dbuf)
685 dbuf_rele(dn->dn_dbuf, dn);
688 void
689 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
691 objset_impl_t *os = dn->dn_objset;
692 uint64_t txg = tx->tx_txg;
694 if (dn->dn_object == DMU_META_DNODE_OBJECT)
695 return;
697 DNODE_VERIFY(dn);
699 #ifdef ZFS_DEBUG
700 mutex_enter(&dn->dn_mtx);
701 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
702 /* ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); */
703 mutex_exit(&dn->dn_mtx);
704 #endif
706 mutex_enter(&os->os_lock);
709 * If we are already marked dirty, we're done.
711 if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
712 mutex_exit(&os->os_lock);
713 return;
716 ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs));
717 ASSERT(dn->dn_datablksz != 0);
718 ASSERT3U(dn->dn_next_bonuslen[txg&TXG_MASK], ==, 0);
719 ASSERT3U(dn->dn_next_blksz[txg&TXG_MASK], ==, 0);
721 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
722 dn->dn_object, txg);
724 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
725 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
726 } else {
727 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
730 mutex_exit(&os->os_lock);
733 * The dnode maintains a hold on its containing dbuf as
734 * long as there are holds on it. Each instantiated child
735 * dbuf maintaines a hold on the dnode. When the last child
736 * drops its hold, the dnode will drop its hold on the
737 * containing dbuf. We add a "dirty hold" here so that the
738 * dnode will hang around after we finish processing its
739 * children.
741 VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
743 (void) dbuf_dirty(dn->dn_dbuf, tx);
745 dsl_dataset_dirty(os->os_dsl_dataset, tx);
748 void
749 dnode_free(dnode_t *dn, dmu_tx_t *tx)
751 int txgoff = tx->tx_txg & TXG_MASK;
753 dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
755 /* we should be the only holder... hopefully */
756 /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
758 mutex_enter(&dn->dn_mtx);
759 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
760 mutex_exit(&dn->dn_mtx);
761 return;
763 dn->dn_free_txg = tx->tx_txg;
764 mutex_exit(&dn->dn_mtx);
767 * If the dnode is already dirty, it needs to be moved from
768 * the dirty list to the free list.
770 mutex_enter(&dn->dn_objset->os_lock);
771 if (list_link_active(&dn->dn_dirty_link[txgoff])) {
772 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
773 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
774 mutex_exit(&dn->dn_objset->os_lock);
775 } else {
776 mutex_exit(&dn->dn_objset->os_lock);
777 dnode_setdirty(dn, tx);
782 * Try to change the block size for the indicated dnode. This can only
783 * succeed if there are no blocks allocated or dirty beyond first block
786 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
788 dmu_buf_impl_t *db, *db_next;
789 int err;
791 if (size == 0)
792 size = SPA_MINBLOCKSIZE;
793 if (size > SPA_MAXBLOCKSIZE)
794 size = SPA_MAXBLOCKSIZE;
795 else
796 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
798 if (ibs == dn->dn_indblkshift)
799 ibs = 0;
801 if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
802 return (0);
804 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
806 /* Check for any allocated blocks beyond the first */
807 if (dn->dn_phys->dn_maxblkid != 0)
808 goto fail;
810 mutex_enter(&dn->dn_dbufs_mtx);
811 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
812 db_next = list_next(&dn->dn_dbufs, db);
814 if (db->db_blkid != 0 && db->db_blkid != DB_BONUS_BLKID) {
815 mutex_exit(&dn->dn_dbufs_mtx);
816 goto fail;
819 mutex_exit(&dn->dn_dbufs_mtx);
821 if (ibs && dn->dn_nlevels != 1)
822 goto fail;
824 /* resize the old block */
825 err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db);
826 if (err == 0)
827 dbuf_new_size(db, size, tx);
828 else if (err != ENOENT)
829 goto fail;
831 dnode_setdblksz(dn, size);
832 dnode_setdirty(dn, tx);
833 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
834 if (ibs) {
835 dn->dn_indblkshift = ibs;
836 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
838 /* rele after we have fixed the blocksize in the dnode */
839 if (db)
840 dbuf_rele(db, FTAG);
842 rw_exit(&dn->dn_struct_rwlock);
843 return (0);
845 fail:
846 rw_exit(&dn->dn_struct_rwlock);
847 return (ENOTSUP);
850 /* read-holding callers must not rely on the lock being continuously held */
851 void
852 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
854 uint64_t txgoff = tx->tx_txg & TXG_MASK;
855 int epbs, new_nlevels;
856 uint64_t sz;
858 ASSERT(blkid != DB_BONUS_BLKID);
860 ASSERT(have_read ?
861 RW_READ_HELD(&dn->dn_struct_rwlock) :
862 RW_WRITE_HELD(&dn->dn_struct_rwlock));
865 * if we have a read-lock, check to see if we need to do any work
866 * before upgrading to a write-lock.
868 if (have_read) {
869 if (blkid <= dn->dn_maxblkid)
870 return;
872 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
873 rw_exit(&dn->dn_struct_rwlock);
874 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
878 if (blkid <= dn->dn_maxblkid)
879 goto out;
881 dn->dn_maxblkid = blkid;
884 * Compute the number of levels necessary to support the new maxblkid.
886 new_nlevels = 1;
887 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
888 for (sz = dn->dn_nblkptr;
889 sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
890 new_nlevels++;
892 if (new_nlevels > dn->dn_nlevels) {
893 int old_nlevels = dn->dn_nlevels;
894 dmu_buf_impl_t *db;
895 list_t *list;
896 dbuf_dirty_record_t *new, *dr, *dr_next;
898 dn->dn_nlevels = new_nlevels;
900 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
901 dn->dn_next_nlevels[txgoff] = new_nlevels;
903 /* dirty the left indirects */
904 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
905 new = dbuf_dirty(db, tx);
906 dbuf_rele(db, FTAG);
908 /* transfer the dirty records to the new indirect */
909 mutex_enter(&dn->dn_mtx);
910 mutex_enter(&new->dt.di.dr_mtx);
911 list = &dn->dn_dirty_records[txgoff];
912 for (dr = list_head(list); dr; dr = dr_next) {
913 dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
914 if (dr->dr_dbuf->db_level != new_nlevels-1 &&
915 dr->dr_dbuf->db_blkid != DB_BONUS_BLKID) {
916 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
917 list_remove(&dn->dn_dirty_records[txgoff], dr);
918 list_insert_tail(&new->dt.di.dr_children, dr);
919 dr->dr_parent = new;
922 mutex_exit(&new->dt.di.dr_mtx);
923 mutex_exit(&dn->dn_mtx);
926 out:
927 if (have_read)
928 rw_downgrade(&dn->dn_struct_rwlock);
931 void
932 dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
934 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
935 avl_index_t where;
936 free_range_t *rp;
937 free_range_t rp_tofind;
938 uint64_t endblk = blkid + nblks;
940 ASSERT(MUTEX_HELD(&dn->dn_mtx));
941 ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */
943 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
944 blkid, nblks, tx->tx_txg);
945 rp_tofind.fr_blkid = blkid;
946 rp = avl_find(tree, &rp_tofind, &where);
947 if (rp == NULL)
948 rp = avl_nearest(tree, where, AVL_BEFORE);
949 if (rp == NULL)
950 rp = avl_nearest(tree, where, AVL_AFTER);
952 while (rp && (rp->fr_blkid <= blkid + nblks)) {
953 uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks;
954 free_range_t *nrp = AVL_NEXT(tree, rp);
956 if (blkid <= rp->fr_blkid && endblk >= fr_endblk) {
957 /* clear this entire range */
958 avl_remove(tree, rp);
959 kmem_free(rp, sizeof (free_range_t));
960 } else if (blkid <= rp->fr_blkid &&
961 endblk > rp->fr_blkid && endblk < fr_endblk) {
962 /* clear the beginning of this range */
963 rp->fr_blkid = endblk;
964 rp->fr_nblks = fr_endblk - endblk;
965 } else if (blkid > rp->fr_blkid && blkid < fr_endblk &&
966 endblk >= fr_endblk) {
967 /* clear the end of this range */
968 rp->fr_nblks = blkid - rp->fr_blkid;
969 } else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
970 /* clear a chunk out of this range */
971 free_range_t *new_rp =
972 kmem_alloc(sizeof (free_range_t), KM_SLEEP);
974 new_rp->fr_blkid = endblk;
975 new_rp->fr_nblks = fr_endblk - endblk;
976 avl_insert_here(tree, new_rp, rp, AVL_AFTER);
977 rp->fr_nblks = blkid - rp->fr_blkid;
979 /* there may be no overlap */
980 rp = nrp;
984 void
985 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
987 dmu_buf_impl_t *db;
988 uint64_t blkoff, blkid, nblks;
989 int blksz, blkshift, head, tail;
990 int trunc = FALSE;
991 int epbs;
993 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
994 blksz = dn->dn_datablksz;
995 blkshift = dn->dn_datablkshift;
996 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
998 if (len == -1ULL) {
999 len = UINT64_MAX - off;
1000 trunc = TRUE;
1004 * First, block align the region to free:
1006 if (ISP2(blksz)) {
1007 head = P2NPHASE(off, blksz);
1008 blkoff = P2PHASE(off, blksz);
1009 if ((off >> blkshift) > dn->dn_maxblkid)
1010 goto out;
1011 } else {
1012 ASSERT(dn->dn_maxblkid == 0);
1013 if (off == 0 && len >= blksz) {
1014 /* Freeing the whole block; fast-track this request */
1015 blkid = 0;
1016 nblks = 1;
1017 goto done;
1018 } else if (off >= blksz) {
1019 /* Freeing past end-of-data */
1020 goto out;
1021 } else {
1022 /* Freeing part of the block. */
1023 head = blksz - off;
1024 ASSERT3U(head, >, 0);
1026 blkoff = off;
1028 /* zero out any partial block data at the start of the range */
1029 if (head) {
1030 ASSERT3U(blkoff + head, ==, blksz);
1031 if (len < head)
1032 head = len;
1033 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE,
1034 FTAG, &db) == 0) {
1035 caddr_t data;
1037 /* don't dirty if it isn't on disk and isn't dirty */
1038 if (db->db_last_dirty ||
1039 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1040 rw_exit(&dn->dn_struct_rwlock);
1041 dbuf_will_dirty(db, tx);
1042 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1043 data = db->db.db_data;
1044 bzero(data + blkoff, head);
1046 dbuf_rele(db, FTAG);
1048 off += head;
1049 len -= head;
1052 /* If the range was less than one block, we're done */
1053 if (len == 0)
1054 goto out;
1056 /* If the remaining range is past end of file, we're done */
1057 if ((off >> blkshift) > dn->dn_maxblkid)
1058 goto out;
1060 ASSERT(ISP2(blksz));
1061 if (trunc)
1062 tail = 0;
1063 else
1064 tail = P2PHASE(len, blksz);
1066 ASSERT3U(P2PHASE(off, blksz), ==, 0);
1067 /* zero out any partial block data at the end of the range */
1068 if (tail) {
1069 if (len < tail)
1070 tail = len;
1071 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len),
1072 TRUE, FTAG, &db) == 0) {
1073 /* don't dirty if not on disk and not dirty */
1074 if (db->db_last_dirty ||
1075 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1076 rw_exit(&dn->dn_struct_rwlock);
1077 dbuf_will_dirty(db, tx);
1078 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1079 bzero(db->db.db_data, tail);
1081 dbuf_rele(db, FTAG);
1083 len -= tail;
1086 /* If the range did not include a full block, we are done */
1087 if (len == 0)
1088 goto out;
1090 ASSERT(IS_P2ALIGNED(off, blksz));
1091 ASSERT(trunc || IS_P2ALIGNED(len, blksz));
1092 blkid = off >> blkshift;
1093 nblks = len >> blkshift;
1094 if (trunc)
1095 nblks += 1;
1098 * Read in and mark all the level-1 indirects dirty,
1099 * so that they will stay in memory until syncing phase.
1100 * Always dirty the first and last indirect to make sure
1101 * we dirty all the partial indirects.
1103 if (dn->dn_nlevels > 1) {
1104 uint64_t i, first, last;
1105 int shift = epbs + dn->dn_datablkshift;
1107 first = blkid >> epbs;
1108 if (db = dbuf_hold_level(dn, 1, first, FTAG)) {
1109 dbuf_will_dirty(db, tx);
1110 dbuf_rele(db, FTAG);
1112 if (trunc)
1113 last = dn->dn_maxblkid >> epbs;
1114 else
1115 last = (blkid + nblks - 1) >> epbs;
1116 if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) {
1117 dbuf_will_dirty(db, tx);
1118 dbuf_rele(db, FTAG);
1120 for (i = first + 1; i < last; i++) {
1121 uint64_t ibyte = i << shift;
1122 int err;
1124 err = dnode_next_offset(dn,
1125 DNODE_FIND_HAVELOCK, &ibyte, 1, 1, 0);
1126 i = ibyte >> shift;
1127 if (err == ESRCH || i >= last)
1128 break;
1129 ASSERT(err == 0);
1130 db = dbuf_hold_level(dn, 1, i, FTAG);
1131 if (db) {
1132 dbuf_will_dirty(db, tx);
1133 dbuf_rele(db, FTAG);
1137 done:
1139 * Add this range to the dnode range list.
1140 * We will finish up this free operation in the syncing phase.
1142 mutex_enter(&dn->dn_mtx);
1143 dnode_clear_range(dn, blkid, nblks, tx);
1145 free_range_t *rp, *found;
1146 avl_index_t where;
1147 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
1149 /* Add new range to dn_ranges */
1150 rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
1151 rp->fr_blkid = blkid;
1152 rp->fr_nblks = nblks;
1153 found = avl_find(tree, rp, &where);
1154 ASSERT(found == NULL);
1155 avl_insert(tree, rp, where);
1156 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1157 blkid, nblks, tx->tx_txg);
1159 mutex_exit(&dn->dn_mtx);
1161 dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
1162 dnode_setdirty(dn, tx);
1163 out:
1164 if (trunc && dn->dn_maxblkid >= (off >> blkshift))
1165 dn->dn_maxblkid = (off >> blkshift ? (off >> blkshift) - 1 : 0);
1167 rw_exit(&dn->dn_struct_rwlock);
1170 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
1171 uint64_t
1172 dnode_block_freed(dnode_t *dn, uint64_t blkid)
1174 free_range_t range_tofind;
1175 void *dp = spa_get_dsl(dn->dn_objset->os_spa);
1176 int i;
1178 if (blkid == DB_BONUS_BLKID)
1179 return (FALSE);
1182 * If we're in the process of opening the pool, dp will not be
1183 * set yet, but there shouldn't be anything dirty.
1185 if (dp == NULL)
1186 return (FALSE);
1188 if (dn->dn_free_txg)
1189 return (TRUE);
1192 * If dn_datablkshift is not set, then there's only a single
1193 * block, in which case there will never be a free range so it
1194 * won't matter.
1196 range_tofind.fr_blkid = blkid;
1197 mutex_enter(&dn->dn_mtx);
1198 for (i = 0; i < TXG_SIZE; i++) {
1199 free_range_t *range_found;
1200 avl_index_t idx;
1202 range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx);
1203 if (range_found) {
1204 ASSERT(range_found->fr_nblks > 0);
1205 break;
1207 range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE);
1208 if (range_found &&
1209 range_found->fr_blkid + range_found->fr_nblks > blkid)
1210 break;
1212 mutex_exit(&dn->dn_mtx);
1213 return (i < TXG_SIZE);
1216 /* call from syncing context when we actually write/free space for this dnode */
1217 void
1218 dnode_diduse_space(dnode_t *dn, int64_t delta)
1220 uint64_t space;
1221 dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
1222 dn, dn->dn_phys,
1223 (u_longlong_t)dn->dn_phys->dn_used,
1224 (longlong_t)delta);
1226 mutex_enter(&dn->dn_mtx);
1227 space = DN_USED_BYTES(dn->dn_phys);
1228 if (delta > 0) {
1229 ASSERT3U(space + delta, >=, space); /* no overflow */
1230 } else {
1231 ASSERT3U(space, >=, -delta); /* no underflow */
1233 space += delta;
1234 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
1235 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
1236 ASSERT3U(P2PHASE(space, 1<<DEV_BSHIFT), ==, 0);
1237 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
1238 } else {
1239 dn->dn_phys->dn_used = space;
1240 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
1242 mutex_exit(&dn->dn_mtx);
1246 * Call when we think we're going to write/free space in open context.
1247 * Be conservative (ie. OK to write less than this or free more than
1248 * this, but don't write more or free less).
1250 void
1251 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
1253 objset_impl_t *os = dn->dn_objset;
1254 dsl_dataset_t *ds = os->os_dsl_dataset;
1256 if (space > 0)
1257 space = spa_get_asize(os->os_spa, space);
1259 if (ds)
1260 dsl_dir_willuse_space(ds->ds_dir, space, tx);
1262 dmu_tx_willuse_space(tx, space);
1265 static int
1266 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
1267 int lvl, uint64_t blkfill, uint64_t txg)
1269 dmu_buf_impl_t *db = NULL;
1270 void *data = NULL;
1271 uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1272 uint64_t epb = 1ULL << epbs;
1273 uint64_t minfill, maxfill;
1274 boolean_t hole;
1275 int i, inc, error, span;
1277 dprintf("probing object %llu offset %llx level %d of %u\n",
1278 dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
1280 hole = flags & DNODE_FIND_HOLE;
1281 inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
1282 ASSERT(txg == 0 || !hole);
1284 if (lvl == dn->dn_phys->dn_nlevels) {
1285 error = 0;
1286 epb = dn->dn_phys->dn_nblkptr;
1287 data = dn->dn_phys->dn_blkptr;
1288 } else {
1289 uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl);
1290 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db);
1291 if (error) {
1292 if (error != ENOENT)
1293 return (error);
1294 if (hole)
1295 return (0);
1297 * This can only happen when we are searching up
1298 * the block tree for data. We don't really need to
1299 * adjust the offset, as we will just end up looking
1300 * at the pointer to this block in its parent, and its
1301 * going to be unallocated, so we will skip over it.
1303 return (ESRCH);
1305 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
1306 if (error) {
1307 dbuf_rele(db, FTAG);
1308 return (error);
1310 data = db->db.db_data;
1313 if (db && txg &&
1314 (db->db_blkptr == NULL || db->db_blkptr->blk_birth <= txg)) {
1316 * This can only happen when we are searching up the tree
1317 * and these conditions mean that we need to keep climbing.
1319 error = ESRCH;
1320 } else if (lvl == 0) {
1321 dnode_phys_t *dnp = data;
1322 span = DNODE_SHIFT;
1323 ASSERT(dn->dn_type == DMU_OT_DNODE);
1325 for (i = (*offset >> span) & (blkfill - 1);
1326 i >= 0 && i < blkfill; i += inc) {
1327 boolean_t newcontents = B_TRUE;
1328 if (txg) {
1329 int j;
1330 newcontents = B_FALSE;
1331 for (j = 0; j < dnp[i].dn_nblkptr; j++) {
1332 if (dnp[i].dn_blkptr[j].blk_birth > txg)
1333 newcontents = B_TRUE;
1336 if (!dnp[i].dn_type == hole && newcontents)
1337 break;
1338 *offset += (1ULL << span) * inc;
1340 if (i < 0 || i == blkfill)
1341 error = ESRCH;
1342 } else {
1343 blkptr_t *bp = data;
1344 span = (lvl - 1) * epbs + dn->dn_datablkshift;
1345 minfill = 0;
1346 maxfill = blkfill << ((lvl - 1) * epbs);
1348 if (hole)
1349 maxfill--;
1350 else
1351 minfill++;
1353 for (i = (*offset >> span) & ((1ULL << epbs) - 1);
1354 i >= 0 && i < epb; i += inc) {
1355 if (bp[i].blk_fill >= minfill &&
1356 bp[i].blk_fill <= maxfill &&
1357 (hole || bp[i].blk_birth > txg))
1358 break;
1359 if (inc < 0 && *offset < (1ULL << span))
1360 *offset = 0;
1361 else
1362 *offset += (1ULL << span) * inc;
1364 if (i < 0 || i == epb)
1365 error = ESRCH;
1368 if (db)
1369 dbuf_rele(db, FTAG);
1371 return (error);
1375 * Find the next hole, data, or sparse region at or after *offset.
1376 * The value 'blkfill' tells us how many items we expect to find
1377 * in an L0 data block; this value is 1 for normal objects,
1378 * DNODES_PER_BLOCK for the meta dnode, and some fraction of
1379 * DNODES_PER_BLOCK when searching for sparse regions thereof.
1381 * Examples:
1383 * dnode_next_offset(dn, flags, offset, 1, 1, 0);
1384 * Finds the next/previous hole/data in a file.
1385 * Used in dmu_offset_next().
1387 * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
1388 * Finds the next free/allocated dnode an objset's meta-dnode.
1389 * Only finds objects that have new contents since txg (ie.
1390 * bonus buffer changes and content removal are ignored).
1391 * Used in dmu_object_next().
1393 * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
1394 * Finds the next L2 meta-dnode bp that's at most 1/4 full.
1395 * Used in dmu_object_alloc().
1398 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
1399 int minlvl, uint64_t blkfill, uint64_t txg)
1401 uint64_t initial_offset = *offset;
1402 int lvl, maxlvl;
1403 int error = 0;
1405 if (!(flags & DNODE_FIND_HAVELOCK))
1406 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1408 if (dn->dn_phys->dn_nlevels == 0) {
1409 error = ESRCH;
1410 goto out;
1413 if (dn->dn_datablkshift == 0) {
1414 if (*offset < dn->dn_datablksz) {
1415 if (flags & DNODE_FIND_HOLE)
1416 *offset = dn->dn_datablksz;
1417 } else {
1418 error = ESRCH;
1420 goto out;
1423 maxlvl = dn->dn_phys->dn_nlevels;
1425 for (lvl = minlvl; lvl <= maxlvl; lvl++) {
1426 error = dnode_next_offset_level(dn,
1427 flags, offset, lvl, blkfill, txg);
1428 if (error != ESRCH)
1429 break;
1432 while (error == 0 && --lvl >= minlvl) {
1433 error = dnode_next_offset_level(dn,
1434 flags, offset, lvl, blkfill, txg);
1437 if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
1438 initial_offset < *offset : initial_offset > *offset))
1439 error = ESRCH;
1440 out:
1441 if (!(flags & DNODE_FIND_HAVELOCK))
1442 rw_exit(&dn->dn_struct_rwlock);
1444 return (error);