4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
30 #include <sys/dnode.h>
31 #include <sys/zfs_context.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_traverse.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_pool.h>
37 #include <sys/dsl_synctask.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dmu_zfetch.h>
40 #include <sys/zfs_ioctl.h>
42 #include <sys/zio_checksum.h>
44 #include <sys/vmsystm.h>
45 #include <sys/zfs_znode.h>
48 const dmu_object_type_info_t dmu_ot
[DMU_OT_NUMTYPES
] = {
49 { byteswap_uint8_array
, TRUE
, "unallocated" },
50 { zap_byteswap
, TRUE
, "object directory" },
51 { byteswap_uint64_array
, TRUE
, "object array" },
52 { byteswap_uint8_array
, TRUE
, "packed nvlist" },
53 { byteswap_uint64_array
, TRUE
, "packed nvlist size" },
54 { byteswap_uint64_array
, TRUE
, "bplist" },
55 { byteswap_uint64_array
, TRUE
, "bplist header" },
56 { byteswap_uint64_array
, TRUE
, "SPA space map header" },
57 { byteswap_uint64_array
, TRUE
, "SPA space map" },
58 { byteswap_uint64_array
, TRUE
, "ZIL intent log" },
59 { dnode_buf_byteswap
, TRUE
, "DMU dnode" },
60 { dmu_objset_byteswap
, TRUE
, "DMU objset" },
61 { byteswap_uint64_array
, TRUE
, "DSL directory" },
62 { zap_byteswap
, TRUE
, "DSL directory child map"},
63 { zap_byteswap
, TRUE
, "DSL dataset snap map" },
64 { zap_byteswap
, TRUE
, "DSL props" },
65 { byteswap_uint64_array
, TRUE
, "DSL dataset" },
66 { zfs_znode_byteswap
, TRUE
, "ZFS znode" },
67 { zfs_oldacl_byteswap
, TRUE
, "ZFS V0 ACL" },
68 { byteswap_uint8_array
, FALSE
, "ZFS plain file" },
69 { zap_byteswap
, TRUE
, "ZFS directory" },
70 { zap_byteswap
, TRUE
, "ZFS master node" },
71 { zap_byteswap
, TRUE
, "ZFS delete queue" },
72 { byteswap_uint8_array
, FALSE
, "zvol object" },
73 { zap_byteswap
, TRUE
, "zvol prop" },
74 { byteswap_uint8_array
, FALSE
, "other uint8[]" },
75 { byteswap_uint64_array
, FALSE
, "other uint64[]" },
76 { zap_byteswap
, TRUE
, "other ZAP" },
77 { zap_byteswap
, TRUE
, "persistent error log" },
78 { byteswap_uint8_array
, TRUE
, "SPA history" },
79 { byteswap_uint64_array
, TRUE
, "SPA history offsets" },
80 { zap_byteswap
, TRUE
, "Pool properties" },
81 { zap_byteswap
, TRUE
, "DSL permissions" },
82 { zfs_acl_byteswap
, TRUE
, "ZFS ACL" },
83 { byteswap_uint8_array
, TRUE
, "ZFS SYSACL" },
84 { byteswap_uint8_array
, TRUE
, "FUID table" },
85 { byteswap_uint64_array
, TRUE
, "FUID table size" },
86 { zap_byteswap
, TRUE
, "DSL dataset next clones"},
87 { zap_byteswap
, TRUE
, "scrub work queue" },
91 dmu_buf_hold(objset_t
*os
, uint64_t object
, uint64_t offset
,
92 void *tag
, dmu_buf_t
**dbp
)
99 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
102 blkid
= dbuf_whichblock(dn
, offset
);
103 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
104 db
= dbuf_hold(dn
, blkid
, tag
);
105 rw_exit(&dn
->dn_struct_rwlock
);
109 err
= dbuf_read(db
, NULL
, DB_RF_CANFAIL
);
116 dnode_rele(dn
, FTAG
);
124 return (DN_MAX_BONUSLEN
);
128 dmu_set_bonus(dmu_buf_t
*db
, int newsize
, dmu_tx_t
*tx
)
130 dnode_t
*dn
= ((dmu_buf_impl_t
*)db
)->db_dnode
;
132 if (dn
->dn_bonus
!= (dmu_buf_impl_t
*)db
)
134 if (newsize
< 0 || newsize
> db
->db_size
)
136 dnode_setbonuslen(dn
, newsize
, tx
);
141 * returns ENOENT, EIO, or 0.
144 dmu_bonus_hold(objset_t
*os
, uint64_t object
, void *tag
, dmu_buf_t
**dbp
)
150 error
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
154 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
155 if (dn
->dn_bonus
== NULL
) {
156 rw_exit(&dn
->dn_struct_rwlock
);
157 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
158 if (dn
->dn_bonus
== NULL
)
159 dbuf_create_bonus(dn
);
162 rw_exit(&dn
->dn_struct_rwlock
);
164 /* as long as the bonus buf is held, the dnode will be held */
165 if (refcount_add(&db
->db_holds
, tag
) == 1)
166 VERIFY(dnode_add_ref(dn
, db
));
168 dnode_rele(dn
, FTAG
);
170 VERIFY(0 == dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
));
177 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
178 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
179 * and can induce severe lock contention when writing to several files
180 * whose dnodes are in the same block.
183 dmu_buf_hold_array_by_dnode(dnode_t
*dn
, uint64_t offset
,
184 uint64_t length
, int read
, void *tag
, int *numbufsp
, dmu_buf_t
***dbpp
)
186 dsl_pool_t
*dp
= NULL
;
188 uint64_t blkid
, nblks
, i
;
194 ASSERT(length
<= DMU_MAX_ACCESS
);
196 flags
= DB_RF_CANFAIL
| DB_RF_NEVERWAIT
;
197 if (length
> zfetch_array_rd_sz
)
198 flags
|= DB_RF_NOPREFETCH
;
200 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
201 if (dn
->dn_datablkshift
) {
202 int blkshift
= dn
->dn_datablkshift
;
203 nblks
= (P2ROUNDUP(offset
+length
, 1ULL<<blkshift
) -
204 P2ALIGN(offset
, 1ULL<<blkshift
)) >> blkshift
;
206 if (offset
+ length
> dn
->dn_datablksz
) {
207 zfs_panic_recover("zfs: accessing past end of object "
208 "%llx/%llx (size=%u access=%llu+%llu)",
209 (longlong_t
)dn
->dn_objset
->
210 os_dsl_dataset
->ds_object
,
211 (longlong_t
)dn
->dn_object
, dn
->dn_datablksz
,
212 (longlong_t
)offset
, (longlong_t
)length
);
217 dbp
= kmem_zalloc(sizeof (dmu_buf_t
*) * nblks
, KM_SLEEP
);
219 if (dn
->dn_objset
->os_dsl_dataset
)
220 dp
= dn
->dn_objset
->os_dsl_dataset
->ds_dir
->dd_pool
;
221 if (dp
&& dsl_pool_sync_context(dp
))
223 zio
= zio_root(dn
->dn_objset
->os_spa
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
224 blkid
= dbuf_whichblock(dn
, offset
);
225 for (i
= 0; i
< nblks
; i
++) {
226 dmu_buf_impl_t
*db
= dbuf_hold(dn
, blkid
+i
, tag
);
228 rw_exit(&dn
->dn_struct_rwlock
);
229 dmu_buf_rele_array(dbp
, nblks
, tag
);
233 /* initiate async i/o */
235 rw_exit(&dn
->dn_struct_rwlock
);
236 (void) dbuf_read(db
, zio
, flags
);
237 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
241 rw_exit(&dn
->dn_struct_rwlock
);
243 /* wait for async i/o */
245 /* track read overhead when we are in sync context */
246 if (dp
&& dsl_pool_sync_context(dp
))
247 dp
->dp_read_overhead
+= gethrtime() - start
;
249 dmu_buf_rele_array(dbp
, nblks
, tag
);
253 /* wait for other io to complete */
255 for (i
= 0; i
< nblks
; i
++) {
256 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)dbp
[i
];
257 mutex_enter(&db
->db_mtx
);
258 while (db
->db_state
== DB_READ
||
259 db
->db_state
== DB_FILL
)
260 cv_wait(&db
->db_changed
, &db
->db_mtx
);
261 if (db
->db_state
== DB_UNCACHED
)
263 mutex_exit(&db
->db_mtx
);
265 dmu_buf_rele_array(dbp
, nblks
, tag
);
277 dmu_buf_hold_array(objset_t
*os
, uint64_t object
, uint64_t offset
,
278 uint64_t length
, int read
, void *tag
, int *numbufsp
, dmu_buf_t
***dbpp
)
283 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
287 err
= dmu_buf_hold_array_by_dnode(dn
, offset
, length
, read
, tag
,
290 dnode_rele(dn
, FTAG
);
296 dmu_buf_hold_array_by_bonus(dmu_buf_t
*db
, uint64_t offset
,
297 uint64_t length
, int read
, void *tag
, int *numbufsp
, dmu_buf_t
***dbpp
)
299 dnode_t
*dn
= ((dmu_buf_impl_t
*)db
)->db_dnode
;
302 err
= dmu_buf_hold_array_by_dnode(dn
, offset
, length
, read
, tag
,
309 dmu_buf_rele_array(dmu_buf_t
**dbp_fake
, int numbufs
, void *tag
)
312 dmu_buf_impl_t
**dbp
= (dmu_buf_impl_t
**)dbp_fake
;
317 for (i
= 0; i
< numbufs
; i
++) {
319 dbuf_rele(dbp
[i
], tag
);
322 kmem_free(dbp
, sizeof (dmu_buf_t
*) * numbufs
);
326 dmu_prefetch(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t len
)
332 if (zfs_prefetch_disable
)
335 if (len
== 0) { /* they're interested in the bonus buffer */
336 dn
= os
->os
->os_meta_dnode
;
338 if (object
== 0 || object
>= DN_MAX_OBJECT
)
341 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
342 blkid
= dbuf_whichblock(dn
, object
* sizeof (dnode_phys_t
));
343 dbuf_prefetch(dn
, blkid
);
344 rw_exit(&dn
->dn_struct_rwlock
);
349 * XXX - Note, if the dnode for the requested object is not
350 * already cached, we will do a *synchronous* read in the
351 * dnode_hold() call. The same is true for any indirects.
353 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
357 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
358 if (dn
->dn_datablkshift
) {
359 int blkshift
= dn
->dn_datablkshift
;
360 nblks
= (P2ROUNDUP(offset
+len
, 1<<blkshift
) -
361 P2ALIGN(offset
, 1<<blkshift
)) >> blkshift
;
363 nblks
= (offset
< dn
->dn_datablksz
);
367 blkid
= dbuf_whichblock(dn
, offset
);
368 for (i
= 0; i
< nblks
; i
++)
369 dbuf_prefetch(dn
, blkid
+i
);
372 rw_exit(&dn
->dn_struct_rwlock
);
374 dnode_rele(dn
, FTAG
);
378 get_next_chunk(dnode_t
*dn
, uint64_t *offset
, uint64_t limit
)
380 uint64_t len
= *offset
- limit
;
381 uint64_t chunk_len
= dn
->dn_datablksz
* DMU_MAX_DELETEBLKCNT
;
383 dn
->dn_datablksz
* EPB(dn
->dn_indblkshift
, SPA_BLKPTRSHIFT
);
385 ASSERT(limit
<= *offset
);
387 if (len
<= chunk_len
) {
392 ASSERT(ISP2(subchunk
));
394 while (*offset
> limit
) {
395 uint64_t initial_offset
= P2ROUNDUP(*offset
, subchunk
);
399 /* skip over allocated data */
400 err
= dnode_next_offset(dn
,
401 DNODE_FIND_HOLE
|DNODE_FIND_BACKWARDS
, offset
, 1, 1, 0);
407 ASSERT3U(*offset
, <=, initial_offset
);
408 *offset
= P2ALIGN(*offset
, subchunk
);
409 delta
= initial_offset
- *offset
;
410 if (delta
>= chunk_len
) {
411 *offset
+= delta
- chunk_len
;
416 /* skip over unallocated data */
417 err
= dnode_next_offset(dn
,
418 DNODE_FIND_BACKWARDS
, offset
, 1, 1, 0);
426 ASSERT3U(*offset
, <, initial_offset
);
432 dmu_free_long_range_impl(objset_t
*os
, dnode_t
*dn
, uint64_t offset
,
433 uint64_t length
, boolean_t free_dnode
)
436 uint64_t object_size
, start
, end
, len
;
437 boolean_t trunc
= (length
== DMU_OBJECT_END
);
440 align
= 1 << dn
->dn_datablkshift
;
442 object_size
= align
== 1 ? dn
->dn_datablksz
:
443 (dn
->dn_maxblkid
+ 1) << dn
->dn_datablkshift
;
445 if (trunc
|| (end
= offset
+ length
) > object_size
)
449 length
= end
- offset
;
453 err
= get_next_chunk(dn
, &start
, offset
);
456 len
= trunc
? DMU_OBJECT_END
: end
- start
;
458 tx
= dmu_tx_create(os
);
459 dmu_tx_hold_free(tx
, dn
->dn_object
, start
, len
);
460 err
= dmu_tx_assign(tx
, TXG_WAIT
);
466 dnode_free_range(dn
, start
, trunc
? -1 : len
, tx
);
468 if (start
== 0 && free_dnode
) {
473 length
-= end
- start
;
482 dmu_free_long_range(objset_t
*os
, uint64_t object
,
483 uint64_t offset
, uint64_t length
)
488 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
491 err
= dmu_free_long_range_impl(os
, dn
, offset
, length
, FALSE
);
492 dnode_rele(dn
, FTAG
);
497 dmu_free_object(objset_t
*os
, uint64_t object
)
503 err
= dnode_hold_impl(os
->os
, object
, DNODE_MUST_BE_ALLOCATED
,
507 if (dn
->dn_nlevels
== 1) {
508 tx
= dmu_tx_create(os
);
509 dmu_tx_hold_bonus(tx
, object
);
510 dmu_tx_hold_free(tx
, dn
->dn_object
, 0, DMU_OBJECT_END
);
511 err
= dmu_tx_assign(tx
, TXG_WAIT
);
513 dnode_free_range(dn
, 0, DMU_OBJECT_END
, tx
);
520 err
= dmu_free_long_range_impl(os
, dn
, 0, DMU_OBJECT_END
, TRUE
);
522 dnode_rele(dn
, FTAG
);
527 dmu_free_range(objset_t
*os
, uint64_t object
, uint64_t offset
,
528 uint64_t size
, dmu_tx_t
*tx
)
531 int err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
534 ASSERT(offset
< UINT64_MAX
);
535 ASSERT(size
== -1ULL || size
<= UINT64_MAX
- offset
);
536 dnode_free_range(dn
, offset
, size
, tx
);
537 dnode_rele(dn
, FTAG
);
542 dmu_read(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
549 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
554 * Deal with odd block sizes, where there can't be data past the first
555 * block. If we ever do the tail block optimization, we will need to
556 * handle that here as well.
558 if (dn
->dn_datablkshift
== 0) {
559 int newsz
= offset
> dn
->dn_datablksz
? 0 :
560 MIN(size
, dn
->dn_datablksz
- offset
);
561 bzero((char *)buf
+ newsz
, size
- newsz
);
566 uint64_t mylen
= MIN(size
, DMU_MAX_ACCESS
/ 2);
569 * NB: we could do this block-at-a-time, but it's nice
570 * to be reading in parallel.
572 err
= dmu_buf_hold_array_by_dnode(dn
, offset
, mylen
,
573 TRUE
, FTAG
, &numbufs
, &dbp
);
577 for (i
= 0; i
< numbufs
; i
++) {
580 dmu_buf_t
*db
= dbp
[i
];
584 bufoff
= offset
- db
->db_offset
;
585 tocpy
= (int)MIN(db
->db_size
- bufoff
, size
);
587 bcopy((char *)db
->db_data
+ bufoff
, buf
, tocpy
);
591 buf
= (char *)buf
+ tocpy
;
593 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
595 dnode_rele(dn
, FTAG
);
600 dmu_write(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
601 const void *buf
, dmu_tx_t
*tx
)
609 VERIFY(0 == dmu_buf_hold_array(os
, object
, offset
, size
,
610 FALSE
, FTAG
, &numbufs
, &dbp
));
612 for (i
= 0; i
< numbufs
; i
++) {
615 dmu_buf_t
*db
= dbp
[i
];
619 bufoff
= offset
- db
->db_offset
;
620 tocpy
= (int)MIN(db
->db_size
- bufoff
, size
);
622 ASSERT(i
== 0 || i
== numbufs
-1 || tocpy
== db
->db_size
);
624 if (tocpy
== db
->db_size
)
625 dmu_buf_will_fill(db
, tx
);
627 dmu_buf_will_dirty(db
, tx
);
629 bcopy(buf
, (char *)db
->db_data
+ bufoff
, tocpy
);
631 if (tocpy
== db
->db_size
)
632 dmu_buf_fill_done(db
, tx
);
636 buf
= (char *)buf
+ tocpy
;
638 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
642 dmu_prealloc(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
651 VERIFY(0 == dmu_buf_hold_array(os
, object
, offset
, size
,
652 FALSE
, FTAG
, &numbufs
, &dbp
));
654 for (i
= 0; i
< numbufs
; i
++) {
655 dmu_buf_t
*db
= dbp
[i
];
657 dmu_buf_will_not_fill(db
, tx
);
659 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
664 dmu_read_uio(objset_t
*os
, uint64_t object
, uio_t
*uio
, uint64_t size
)
670 * NB: we could do this block-at-a-time, but it's nice
671 * to be reading in parallel.
673 err
= dmu_buf_hold_array(os
, object
, uio
->uio_loffset
, size
, TRUE
, FTAG
,
678 for (i
= 0; i
< numbufs
; i
++) {
681 dmu_buf_t
*db
= dbp
[i
];
685 bufoff
= uio
->uio_loffset
- db
->db_offset
;
686 tocpy
= (int)MIN(db
->db_size
- bufoff
, size
);
688 err
= uiomove((char *)db
->db_data
+ bufoff
, tocpy
,
695 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
701 dmu_write_uio(objset_t
*os
, uint64_t object
, uio_t
*uio
, uint64_t size
,
711 err
= dmu_buf_hold_array(os
, object
, uio
->uio_loffset
, size
,
712 FALSE
, FTAG
, &numbufs
, &dbp
);
716 for (i
= 0; i
< numbufs
; i
++) {
719 dmu_buf_t
*db
= dbp
[i
];
723 bufoff
= uio
->uio_loffset
- db
->db_offset
;
724 tocpy
= (int)MIN(db
->db_size
- bufoff
, size
);
726 ASSERT(i
== 0 || i
== numbufs
-1 || tocpy
== db
->db_size
);
728 if (tocpy
== db
->db_size
)
729 dmu_buf_will_fill(db
, tx
);
731 dmu_buf_will_dirty(db
, tx
);
734 * XXX uiomove could block forever (eg. nfs-backed
735 * pages). There needs to be a uiolockdown() function
736 * to lock the pages in memory, so that uiomove won't
739 err
= uiomove((char *)db
->db_data
+ bufoff
, tocpy
,
742 if (tocpy
== db
->db_size
)
743 dmu_buf_fill_done(db
, tx
);
750 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
756 dmu_write_pages(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
757 page_t
*pp
, dmu_tx_t
*tx
)
766 err
= dmu_buf_hold_array(os
, object
, offset
, size
,
767 FALSE
, FTAG
, &numbufs
, &dbp
);
771 for (i
= 0; i
< numbufs
; i
++) {
772 int tocpy
, copied
, thiscpy
;
774 dmu_buf_t
*db
= dbp
[i
];
778 ASSERT3U(db
->db_size
, >=, PAGESIZE
);
780 bufoff
= offset
- db
->db_offset
;
781 tocpy
= (int)MIN(db
->db_size
- bufoff
, size
);
783 ASSERT(i
== 0 || i
== numbufs
-1 || tocpy
== db
->db_size
);
785 if (tocpy
== db
->db_size
)
786 dmu_buf_will_fill(db
, tx
);
788 dmu_buf_will_dirty(db
, tx
);
790 for (copied
= 0; copied
< tocpy
; copied
+= PAGESIZE
) {
791 ASSERT3U(pp
->p_offset
, ==, db
->db_offset
+ bufoff
);
792 thiscpy
= MIN(PAGESIZE
, tocpy
- copied
);
793 va
= zfs_map_page(pp
, S_READ
);
794 bcopy(va
, (char *)db
->db_data
+ bufoff
, thiscpy
);
795 zfs_unmap_page(pp
, va
);
800 if (tocpy
== db
->db_size
)
801 dmu_buf_fill_done(db
, tx
);
809 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
812 #endif /* __NetBSD__ */
816 dbuf_dirty_record_t
*dr
;
823 dmu_sync_ready(zio_t
*zio
, arc_buf_t
*buf
, void *varg
)
825 blkptr_t
*bp
= zio
->io_bp
;
827 if (!BP_IS_HOLE(bp
)) {
828 dmu_sync_arg_t
*in
= varg
;
829 dbuf_dirty_record_t
*dr
= in
->dr
;
830 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
831 ASSERT(BP_GET_TYPE(bp
) == db
->db_dnode
->dn_type
);
832 ASSERT(BP_GET_LEVEL(bp
) == 0);
839 dmu_sync_done(zio_t
*zio
, arc_buf_t
*buf
, void *varg
)
841 dmu_sync_arg_t
*in
= varg
;
842 dbuf_dirty_record_t
*dr
= in
->dr
;
843 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
844 dmu_sync_cb_t
*done
= in
->done
;
846 mutex_enter(&db
->db_mtx
);
847 ASSERT(dr
->dt
.dl
.dr_override_state
== DR_IN_DMU_SYNC
);
848 dr
->dt
.dl
.dr_overridden_by
= *zio
->io_bp
; /* structure assignment */
849 dr
->dt
.dl
.dr_override_state
= DR_OVERRIDDEN
;
850 cv_broadcast(&db
->db_changed
);
851 mutex_exit(&db
->db_mtx
);
854 done(&(db
->db
), in
->arg
);
856 kmem_free(in
, sizeof (dmu_sync_arg_t
));
860 * Intent log support: sync the block associated with db to disk.
861 * N.B. and XXX: the caller is responsible for making sure that the
862 * data isn't changing while dmu_sync() is writing it.
866 * EEXIST: this txg has already been synced, so there's nothing to to.
867 * The caller should not log the write.
869 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
870 * The caller should not log the write.
872 * EALREADY: this block is already in the process of being synced.
873 * The caller should track its progress (somehow).
875 * EINPROGRESS: the IO has been initiated.
876 * The caller should log this blkptr in the callback.
878 * 0: completed. Sets *bp to the blkptr just written.
879 * The caller should log this blkptr immediately.
882 dmu_sync(zio_t
*pio
, dmu_buf_t
*db_fake
,
883 blkptr_t
*bp
, uint64_t txg
, dmu_sync_cb_t
*done
, void *arg
)
885 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
886 objset_impl_t
*os
= db
->db_objset
;
887 dsl_pool_t
*dp
= os
->os_dsl_dataset
->ds_dir
->dd_pool
;
888 tx_state_t
*tx
= &dp
->dp_tx
;
889 dbuf_dirty_record_t
*dr
;
892 writeprops_t wp
= { 0 };
896 ASSERT(BP_IS_HOLE(bp
));
899 dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n",
900 txg
, tx
->tx_synced_txg
, tx
->tx_open_txg
, tx
->tx_quiesced_txg
);
903 * XXX - would be nice if we could do this without suspending...
908 * If this txg already synced, there's nothing to do.
910 if (txg
<= tx
->tx_synced_txg
) {
913 * If we're running ziltest, we need the blkptr regardless.
915 if (txg
> spa_freeze_txg(dp
->dp_spa
)) {
916 /* if db_blkptr == NULL, this was an empty write */
918 *bp
= *db
->db_blkptr
; /* structure assignment */
924 mutex_enter(&db
->db_mtx
);
926 if (txg
== tx
->tx_syncing_txg
) {
927 while (db
->db_data_pending
) {
929 * IO is in-progress. Wait for it to finish.
930 * XXX - would be nice to be able to somehow "attach"
931 * this zio to the parent zio passed in.
933 cv_wait(&db
->db_changed
, &db
->db_mtx
);
934 if (!db
->db_data_pending
&&
935 db
->db_blkptr
&& BP_IS_HOLE(db
->db_blkptr
)) {
937 * IO was compressed away
939 *bp
= *db
->db_blkptr
; /* structure assignment */
940 mutex_exit(&db
->db_mtx
);
944 ASSERT(db
->db_data_pending
||
945 (db
->db_blkptr
&& db
->db_blkptr
->blk_birth
== txg
));
948 if (db
->db_blkptr
&& db
->db_blkptr
->blk_birth
== txg
) {
950 * IO is already completed.
952 *bp
= *db
->db_blkptr
; /* structure assignment */
953 mutex_exit(&db
->db_mtx
);
959 dr
= db
->db_last_dirty
;
960 while (dr
&& dr
->dr_txg
> txg
)
962 if (dr
== NULL
|| dr
->dr_txg
< txg
) {
964 * This dbuf isn't dirty, must have been free_range'd.
965 * There's no need to log writes to freed blocks, so we're done.
967 mutex_exit(&db
->db_mtx
);
972 ASSERT(dr
->dr_txg
== txg
);
973 if (dr
->dt
.dl
.dr_override_state
== DR_IN_DMU_SYNC
) {
975 * We have already issued a sync write for this buffer.
977 mutex_exit(&db
->db_mtx
);
980 } else if (dr
->dt
.dl
.dr_override_state
== DR_OVERRIDDEN
) {
982 * This buffer has already been synced. It could not
983 * have been dirtied since, or we would have cleared the state.
985 *bp
= dr
->dt
.dl
.dr_overridden_by
; /* structure assignment */
986 mutex_exit(&db
->db_mtx
);
991 dr
->dt
.dl
.dr_override_state
= DR_IN_DMU_SYNC
;
992 in
= kmem_alloc(sizeof (dmu_sync_arg_t
), KM_SLEEP
);
996 mutex_exit(&db
->db_mtx
);
999 zb
.zb_objset
= os
->os_dsl_dataset
->ds_object
;
1000 zb
.zb_object
= db
->db
.db_object
;
1001 zb
.zb_level
= db
->db_level
;
1002 zb
.zb_blkid
= db
->db_blkid
;
1004 wp
.wp_type
= db
->db_dnode
->dn_type
;
1005 wp
.wp_level
= db
->db_level
;
1006 wp
.wp_copies
= os
->os_copies
;
1007 wp
.wp_dnchecksum
= db
->db_dnode
->dn_checksum
;
1008 wp
.wp_oschecksum
= os
->os_checksum
;
1009 wp
.wp_dncompress
= db
->db_dnode
->dn_compress
;
1010 wp
.wp_oscompress
= os
->os_compress
;
1012 ASSERT(BP_IS_HOLE(bp
));
1014 zio
= arc_write(pio
, os
->os_spa
, &wp
, DBUF_IS_L2CACHEABLE(db
),
1015 txg
, bp
, dr
->dt
.dl
.dr_data
, dmu_sync_ready
, dmu_sync_done
, in
,
1016 ZIO_PRIORITY_SYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);
1021 err
= zio_wait(zio
);
1028 dmu_object_set_blocksize(objset_t
*os
, uint64_t object
, uint64_t size
, int ibs
,
1034 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
1037 err
= dnode_set_blksz(dn
, size
, ibs
, tx
);
1038 dnode_rele(dn
, FTAG
);
1043 dmu_object_set_checksum(objset_t
*os
, uint64_t object
, uint8_t checksum
,
1048 /* XXX assumes dnode_hold will not get an i/o error */
1049 (void) dnode_hold(os
->os
, object
, FTAG
, &dn
);
1050 ASSERT(checksum
< ZIO_CHECKSUM_FUNCTIONS
);
1051 dn
->dn_checksum
= checksum
;
1052 dnode_setdirty(dn
, tx
);
1053 dnode_rele(dn
, FTAG
);
1057 dmu_object_set_compress(objset_t
*os
, uint64_t object
, uint8_t compress
,
1062 /* XXX assumes dnode_hold will not get an i/o error */
1063 (void) dnode_hold(os
->os
, object
, FTAG
, &dn
);
1064 ASSERT(compress
< ZIO_COMPRESS_FUNCTIONS
);
1065 dn
->dn_compress
= compress
;
1066 dnode_setdirty(dn
, tx
);
1067 dnode_rele(dn
, FTAG
);
1071 dmu_offset_next(objset_t
*os
, uint64_t object
, boolean_t hole
, uint64_t *off
)
1076 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
1080 * Sync any current changes before
1081 * we go trundling through the block pointers.
1083 for (i
= 0; i
< TXG_SIZE
; i
++) {
1084 if (list_link_active(&dn
->dn_dirty_link
[i
]))
1087 if (i
!= TXG_SIZE
) {
1088 dnode_rele(dn
, FTAG
);
1089 txg_wait_synced(dmu_objset_pool(os
), 0);
1090 err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
1095 err
= dnode_next_offset(dn
, (hole
? DNODE_FIND_HOLE
: 0), off
, 1, 1, 0);
1096 dnode_rele(dn
, FTAG
);
1102 dmu_object_info_from_dnode(dnode_t
*dn
, dmu_object_info_t
*doi
)
1104 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
1105 mutex_enter(&dn
->dn_mtx
);
1107 doi
->doi_data_block_size
= dn
->dn_datablksz
;
1108 doi
->doi_metadata_block_size
= dn
->dn_indblkshift
?
1109 1ULL << dn
->dn_indblkshift
: 0;
1110 doi
->doi_indirection
= dn
->dn_nlevels
;
1111 doi
->doi_checksum
= dn
->dn_checksum
;
1112 doi
->doi_compress
= dn
->dn_compress
;
1113 doi
->doi_physical_blks
= (DN_USED_BYTES(dn
->dn_phys
) +
1114 SPA_MINBLOCKSIZE
/2) >> SPA_MINBLOCKSHIFT
;
1115 doi
->doi_max_block_offset
= dn
->dn_phys
->dn_maxblkid
;
1116 doi
->doi_type
= dn
->dn_type
;
1117 doi
->doi_bonus_size
= dn
->dn_bonuslen
;
1118 doi
->doi_bonus_type
= dn
->dn_bonustype
;
1120 mutex_exit(&dn
->dn_mtx
);
1121 rw_exit(&dn
->dn_struct_rwlock
);
1125 * Get information on a DMU object.
1126 * If doi is NULL, just indicates whether the object exists.
1129 dmu_object_info(objset_t
*os
, uint64_t object
, dmu_object_info_t
*doi
)
1132 int err
= dnode_hold(os
->os
, object
, FTAG
, &dn
);
1138 dmu_object_info_from_dnode(dn
, doi
);
1140 dnode_rele(dn
, FTAG
);
1145 * As above, but faster; can be used when you have a held dbuf in hand.
1148 dmu_object_info_from_db(dmu_buf_t
*db
, dmu_object_info_t
*doi
)
1150 dmu_object_info_from_dnode(((dmu_buf_impl_t
*)db
)->db_dnode
, doi
);
1154 * Faster still when you only care about the size.
1155 * This is specifically optimized for zfs_getattr().
1158 dmu_object_size_from_db(dmu_buf_t
*db
, uint32_t *blksize
, u_longlong_t
*nblk512
)
1160 dnode_t
*dn
= ((dmu_buf_impl_t
*)db
)->db_dnode
;
1162 *blksize
= dn
->dn_datablksz
;
1163 /* add 1 for dnode space */
1164 *nblk512
= ((DN_USED_BYTES(dn
->dn_phys
) + SPA_MINBLOCKSIZE
/2) >>
1165 SPA_MINBLOCKSHIFT
) + 1;
1169 byteswap_uint64_array(void *vbuf
, size_t size
)
1171 uint64_t *buf
= vbuf
;
1172 size_t count
= size
>> 3;
1175 ASSERT((size
& 7) == 0);
1177 for (i
= 0; i
< count
; i
++)
1178 buf
[i
] = BSWAP_64(buf
[i
]);
1182 byteswap_uint32_array(void *vbuf
, size_t size
)
1184 uint32_t *buf
= vbuf
;
1185 size_t count
= size
>> 2;
1188 ASSERT((size
& 3) == 0);
1190 for (i
= 0; i
< count
; i
++)
1191 buf
[i
] = BSWAP_32(buf
[i
]);
1195 byteswap_uint16_array(void *vbuf
, size_t size
)
1197 uint16_t *buf
= vbuf
;
1198 size_t count
= size
>> 1;
1201 ASSERT((size
& 1) == 0);
1203 for (i
= 0; i
< count
; i
++)
1204 buf
[i
] = BSWAP_16(buf
[i
]);
1209 byteswap_uint8_array(void *vbuf
, size_t size
)