4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/trace_zfs.h>
42 typedef void (*dmu_tx_hold_func_t
)(dmu_tx_t
*tx
, struct dnode
*dn
,
43 uint64_t arg1
, uint64_t arg2
);
45 dmu_tx_stats_t dmu_tx_stats
= {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64
},
47 { "dmu_tx_delay", KSTAT_DATA_UINT64
},
48 { "dmu_tx_error", KSTAT_DATA_UINT64
},
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64
},
50 { "dmu_tx_group", KSTAT_DATA_UINT64
},
51 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64
},
52 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64
},
53 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64
},
54 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64
},
55 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64
},
56 { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64
},
57 { "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64
},
58 { "dmu_tx_quota", KSTAT_DATA_UINT64
},
61 static kstat_t
*dmu_tx_ksp
;
64 dmu_tx_create_dd(dsl_dir_t
*dd
)
66 dmu_tx_t
*tx
= kmem_zalloc(sizeof (dmu_tx_t
), KM_SLEEP
);
69 tx
->tx_pool
= dd
->dd_pool
;
70 list_create(&tx
->tx_holds
, sizeof (dmu_tx_hold_t
),
71 offsetof(dmu_tx_hold_t
, txh_node
));
72 list_create(&tx
->tx_callbacks
, sizeof (dmu_tx_callback_t
),
73 offsetof(dmu_tx_callback_t
, dcb_node
));
74 tx
->tx_start
= gethrtime();
79 dmu_tx_create(objset_t
*os
)
81 dmu_tx_t
*tx
= dmu_tx_create_dd(os
->os_dsl_dataset
->ds_dir
);
87 dmu_tx_create_assigned(struct dsl_pool
*dp
, uint64_t txg
)
89 dmu_tx_t
*tx
= dmu_tx_create_dd(NULL
);
91 TXG_VERIFY(dp
->dp_spa
, txg
);
100 dmu_tx_is_syncing(dmu_tx_t
*tx
)
102 return (tx
->tx_anyobj
);
106 dmu_tx_private_ok(dmu_tx_t
*tx
)
108 return (tx
->tx_anyobj
);
111 static dmu_tx_hold_t
*
112 dmu_tx_hold_dnode_impl(dmu_tx_t
*tx
, dnode_t
*dn
, enum dmu_tx_hold_type type
,
113 uint64_t arg1
, uint64_t arg2
)
118 (void) zfs_refcount_add(&dn
->dn_holds
, tx
);
119 if (tx
->tx_txg
!= 0) {
120 mutex_enter(&dn
->dn_mtx
);
122 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
123 * problem, but there's no way for it to happen (for
126 ASSERT(dn
->dn_assigned_txg
== 0);
127 dn
->dn_assigned_txg
= tx
->tx_txg
;
128 (void) zfs_refcount_add(&dn
->dn_tx_holds
, tx
);
129 mutex_exit(&dn
->dn_mtx
);
133 txh
= kmem_zalloc(sizeof (dmu_tx_hold_t
), KM_SLEEP
);
136 zfs_refcount_create(&txh
->txh_space_towrite
);
137 zfs_refcount_create(&txh
->txh_memory_tohold
);
138 txh
->txh_type
= type
;
139 txh
->txh_arg1
= arg1
;
140 txh
->txh_arg2
= arg2
;
141 list_insert_tail(&tx
->tx_holds
, txh
);
146 static dmu_tx_hold_t
*
147 dmu_tx_hold_object_impl(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
,
148 enum dmu_tx_hold_type type
, uint64_t arg1
, uint64_t arg2
)
154 if (object
!= DMU_NEW_OBJECT
) {
155 err
= dnode_hold(os
, object
, FTAG
, &dn
);
161 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, type
, arg1
, arg2
);
163 dnode_rele(dn
, FTAG
);
168 dmu_tx_add_new_object(dmu_tx_t
*tx
, dnode_t
*dn
)
171 * If we're syncing, they can manipulate any object anyhow, and
172 * the hold on the dnode_t can cause problems.
174 if (!dmu_tx_is_syncing(tx
))
175 (void) dmu_tx_hold_dnode_impl(tx
, dn
, THT_NEWOBJECT
, 0, 0);
179 * This function reads specified data from disk. The specified data will
180 * be needed to perform the transaction -- i.e, it will be read after
181 * we do dmu_tx_assign(). There are two reasons that we read the data now
182 * (before dmu_tx_assign()):
184 * 1. Reading it now has potentially better performance. The transaction
185 * has not yet been assigned, so the TXG is not held open, and also the
186 * caller typically has less locks held when calling dmu_tx_hold_*() than
187 * after the transaction has been assigned. This reduces the lock (and txg)
188 * hold times, thus reducing lock contention.
190 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
191 * that are detected before they start making changes to the DMU state
192 * (i.e. now). Once the transaction has been assigned, and some DMU
193 * state has been changed, it can be difficult to recover from an i/o
194 * error (e.g. to undo the changes already made in memory at the DMU
195 * layer). Typically code to do so does not exist in the caller -- it
196 * assumes that the data has already been cached and thus i/o errors are
199 * It has been observed that the i/o initiated here can be a performance
200 * problem, and it appears to be optional, because we don't look at the
201 * data which is read. However, removing this read would only serve to
202 * move the work elsewhere (after the dmu_tx_assign()), where it may
203 * have a greater impact on performance (in addition to the impact on
204 * fault tolerance noted above).
207 dmu_tx_check_ioerr(zio_t
*zio
, dnode_t
*dn
, int level
, uint64_t blkid
)
212 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
213 db
= dbuf_hold_level(dn
, level
, blkid
, FTAG
);
214 rw_exit(&dn
->dn_struct_rwlock
);
216 return (SET_ERROR(EIO
));
218 * PARTIAL_FIRST allows caching for uncacheable blocks. It will
219 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
221 err
= dbuf_read(db
, zio
, DB_RF_CANFAIL
| DB_RF_NOPREFETCH
|
222 (level
== 0 ? DB_RF_PARTIAL_FIRST
: 0));
228 dmu_tx_count_write(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
230 dnode_t
*dn
= txh
->txh_dnode
;
236 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
, len
, FTAG
);
242 * For i/o error checking, read the blocks that will be needed
243 * to perform the write: the first and last level-0 blocks (if
244 * they are not aligned, i.e. if they are partial-block writes),
245 * and all the level-1 blocks.
247 if (dn
->dn_maxblkid
== 0) {
248 if (off
< dn
->dn_datablksz
&&
249 (off
> 0 || len
< dn
->dn_datablksz
)) {
250 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
252 txh
->txh_tx
->tx_err
= err
;
256 zio_t
*zio
= zio_root(dn
->dn_objset
->os_spa
,
257 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
259 /* first level-0 block */
260 uint64_t start
= off
>> dn
->dn_datablkshift
;
261 if (P2PHASE(off
, dn
->dn_datablksz
) || len
< dn
->dn_datablksz
) {
262 err
= dmu_tx_check_ioerr(zio
, dn
, 0, start
);
264 txh
->txh_tx
->tx_err
= err
;
268 /* last level-0 block */
269 uint64_t end
= (off
+ len
- 1) >> dn
->dn_datablkshift
;
270 if (end
!= start
&& end
<= dn
->dn_maxblkid
&&
271 P2PHASE(off
+ len
, dn
->dn_datablksz
)) {
272 err
= dmu_tx_check_ioerr(zio
, dn
, 0, end
);
274 txh
->txh_tx
->tx_err
= err
;
279 if (dn
->dn_nlevels
> 1) {
280 int shft
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
281 for (uint64_t i
= (start
>> shft
) + 1;
282 i
< end
>> shft
; i
++) {
283 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
285 txh
->txh_tx
->tx_err
= err
;
292 txh
->txh_tx
->tx_err
= err
;
298 dmu_tx_count_dnode(dmu_tx_hold_t
*txh
)
300 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
301 DNODE_MIN_SIZE
, FTAG
);
305 dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
)
310 ASSERT3U(len
, <=, DMU_MAX_ACCESS
);
311 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
313 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
314 object
, THT_WRITE
, off
, len
);
316 dmu_tx_count_write(txh
, off
, len
);
317 dmu_tx_count_dnode(txh
);
322 dmu_tx_hold_write_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
, int len
)
327 ASSERT3U(len
, <=, DMU_MAX_ACCESS
);
328 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
330 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_WRITE
, off
, len
);
332 dmu_tx_count_write(txh
, off
, len
);
333 dmu_tx_count_dnode(txh
);
338 * This function marks the transaction as being a "net free". The end
339 * result is that refquotas will be disabled for this transaction, and
340 * this transaction will be able to use half of the pool space overhead
341 * (see dsl_pool_adjustedsize()). Therefore this function should only
342 * be called for transactions that we expect will not cause a net increase
343 * in the amount of space used (but it's OK if that is occasionally not true).
346 dmu_tx_mark_netfree(dmu_tx_t
*tx
)
348 tx
->tx_netfree
= B_TRUE
;
352 dmu_tx_hold_free_impl(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
354 dmu_tx_t
*tx
= txh
->txh_tx
;
355 dnode_t
*dn
= txh
->txh_dnode
;
358 ASSERT(tx
->tx_txg
== 0);
360 dmu_tx_count_dnode(txh
);
362 if (off
>= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
)
364 if (len
== DMU_OBJECT_END
)
365 len
= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
- off
;
367 dmu_tx_count_dnode(txh
);
370 * For i/o error checking, we read the first and last level-0
371 * blocks if they are not aligned, and all the level-1 blocks.
373 * Note: dbuf_free_range() assumes that we have not instantiated
374 * any level-0 dbufs that will be completely freed. Therefore we must
375 * exercise care to not read or count the first and last blocks
376 * if they are blocksize-aligned.
378 if (dn
->dn_datablkshift
== 0) {
379 if (off
!= 0 || len
< dn
->dn_datablksz
)
380 dmu_tx_count_write(txh
, 0, dn
->dn_datablksz
);
382 /* first block will be modified if it is not aligned */
383 if (!IS_P2ALIGNED(off
, 1 << dn
->dn_datablkshift
))
384 dmu_tx_count_write(txh
, off
, 1);
385 /* last block will be modified if it is not aligned */
386 if (!IS_P2ALIGNED(off
+ len
, 1 << dn
->dn_datablkshift
))
387 dmu_tx_count_write(txh
, off
+ len
, 1);
391 * Check level-1 blocks.
393 if (dn
->dn_nlevels
> 1) {
394 int shift
= dn
->dn_datablkshift
+ dn
->dn_indblkshift
-
396 uint64_t start
= off
>> shift
;
397 uint64_t end
= (off
+ len
) >> shift
;
399 ASSERT(dn
->dn_indblkshift
!= 0);
402 * dnode_reallocate() can result in an object with indirect
403 * blocks having an odd data block size. In this case,
404 * just check the single block.
406 if (dn
->dn_datablkshift
== 0)
409 zio_t
*zio
= zio_root(tx
->tx_pool
->dp_spa
,
410 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
411 for (uint64_t i
= start
; i
<= end
; i
++) {
412 uint64_t ibyte
= i
<< shift
;
413 err
= dnode_next_offset(dn
, 0, &ibyte
, 2, 1, 0);
415 if (err
== ESRCH
|| i
> end
)
419 (void) zio_wait(zio
);
423 (void) zfs_refcount_add_many(&txh
->txh_memory_tohold
,
424 1 << dn
->dn_indblkshift
, FTAG
);
426 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
429 (void) zio_wait(zio
);
442 dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, uint64_t len
)
446 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
447 object
, THT_FREE
, off
, len
);
449 (void) dmu_tx_hold_free_impl(txh
, off
, len
);
453 dmu_tx_hold_free_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
, uint64_t len
)
457 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_FREE
, off
, len
);
459 (void) dmu_tx_hold_free_impl(txh
, off
, len
);
463 dmu_tx_hold_zap_impl(dmu_tx_hold_t
*txh
, const char *name
)
465 dmu_tx_t
*tx
= txh
->txh_tx
;
466 dnode_t
*dn
= txh
->txh_dnode
;
468 extern int zap_micro_max_size
;
470 ASSERT(tx
->tx_txg
== 0);
472 dmu_tx_count_dnode(txh
);
475 * Modifying a almost-full microzap is around the worst case (128KB)
477 * If it is a fat zap, the worst case would be 7*16KB=112KB:
478 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
479 * - 4 new blocks written if adding:
480 * - 2 blocks for possibly split leaves,
481 * - 2 grown ptrtbl blocks
483 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
484 zap_micro_max_size
, FTAG
);
489 ASSERT3U(DMU_OT_BYTESWAP(dn
->dn_type
), ==, DMU_BSWAP_ZAP
);
491 if (dn
->dn_maxblkid
== 0 || name
== NULL
) {
493 * This is a microzap (only one block), or we don't know
494 * the name. Check the first block for i/o errors.
496 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
502 * Access the name so that we'll check for i/o errors to
503 * the leaf blocks, etc. We ignore ENOENT, as this name
506 err
= zap_lookup_by_dnode(dn
, name
, 8, 0, NULL
);
507 if (err
== EIO
|| err
== ECKSUM
|| err
== ENXIO
) {
514 dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, const char *name
)
520 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
521 object
, THT_ZAP
, add
, (uintptr_t)name
);
523 dmu_tx_hold_zap_impl(txh
, name
);
527 dmu_tx_hold_zap_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, int add
, const char *name
)
534 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_ZAP
, add
, (uintptr_t)name
);
536 dmu_tx_hold_zap_impl(txh
, name
);
540 dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
)
544 ASSERT(tx
->tx_txg
== 0);
546 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
547 object
, THT_BONUS
, 0, 0);
549 dmu_tx_count_dnode(txh
);
553 dmu_tx_hold_bonus_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
)
559 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_BONUS
, 0, 0);
561 dmu_tx_count_dnode(txh
);
565 dmu_tx_hold_space(dmu_tx_t
*tx
, uint64_t space
)
569 ASSERT(tx
->tx_txg
== 0);
571 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
572 DMU_NEW_OBJECT
, THT_SPACE
, space
, 0);
574 (void) zfs_refcount_add_many(
575 &txh
->txh_space_towrite
, space
, FTAG
);
581 dmu_tx_dirty_buf(dmu_tx_t
*tx
, dmu_buf_impl_t
*db
)
583 boolean_t match_object
= B_FALSE
;
584 boolean_t match_offset
= B_FALSE
;
587 dnode_t
*dn
= DB_DNODE(db
);
588 ASSERT(tx
->tx_txg
!= 0);
589 ASSERT(tx
->tx_objset
== NULL
|| dn
->dn_objset
== tx
->tx_objset
);
590 ASSERT3U(dn
->dn_object
, ==, db
->db
.db_object
);
597 /* XXX No checking on the meta dnode for now */
598 if (db
->db
.db_object
== DMU_META_DNODE_OBJECT
) {
603 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
604 txh
= list_next(&tx
->tx_holds
, txh
)) {
605 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
606 if (txh
->txh_dnode
== dn
&& txh
->txh_type
!= THT_NEWOBJECT
)
608 if (txh
->txh_dnode
== NULL
|| txh
->txh_dnode
== dn
) {
609 int datablkshift
= dn
->dn_datablkshift
?
610 dn
->dn_datablkshift
: SPA_MAXBLOCKSHIFT
;
611 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
612 int shift
= datablkshift
+ epbs
* db
->db_level
;
613 uint64_t beginblk
= shift
>= 64 ? 0 :
614 (txh
->txh_arg1
>> shift
);
615 uint64_t endblk
= shift
>= 64 ? 0 :
616 ((txh
->txh_arg1
+ txh
->txh_arg2
- 1) >> shift
);
617 uint64_t blkid
= db
->db_blkid
;
619 /* XXX txh_arg2 better not be zero... */
621 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
622 txh
->txh_type
, (u_longlong_t
)beginblk
,
623 (u_longlong_t
)endblk
);
625 switch (txh
->txh_type
) {
627 if (blkid
>= beginblk
&& blkid
<= endblk
)
630 * We will let this hold work for the bonus
631 * or spill buffer so that we don't need to
632 * hold it when creating a new object.
634 if (blkid
== DMU_BONUS_BLKID
||
635 blkid
== DMU_SPILL_BLKID
)
638 * They might have to increase nlevels,
639 * thus dirtying the new TLIBs. Or the
640 * might have to change the block size,
641 * thus dirying the new lvl=0 blk=0.
648 * We will dirty all the level 1 blocks in
649 * the free range and perhaps the first and
650 * last level 0 block.
652 if (blkid
>= beginblk
&& (blkid
<= endblk
||
653 txh
->txh_arg2
== DMU_OBJECT_END
))
657 if (blkid
== DMU_SPILL_BLKID
)
661 if (blkid
== DMU_BONUS_BLKID
)
671 cmn_err(CE_PANIC
, "bad txh_type %d",
675 if (match_object
&& match_offset
) {
681 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
682 (u_longlong_t
)db
->db
.db_object
, db
->db_level
,
683 (u_longlong_t
)db
->db_blkid
);
688 * If we can't do 10 iops, something is wrong. Let us go ahead
689 * and hit zfs_dirty_data_max.
691 static const hrtime_t zfs_delay_max_ns
= 100 * MICROSEC
; /* 100 milliseconds */
694 * We delay transactions when we've determined that the backend storage
695 * isn't able to accommodate the rate of incoming writes.
697 * If there is already a transaction waiting, we delay relative to when
698 * that transaction finishes waiting. This way the calculated min_time
699 * is independent of the number of threads concurrently executing
702 * If we are the only waiter, wait relative to when the transaction
703 * started, rather than the current time. This credits the transaction for
704 * "time already served", e.g. reading indirect blocks.
706 * The minimum time for a transaction to take is calculated as:
707 * min_time = scale * (dirty - min) / (max - dirty)
708 * min_time is then capped at zfs_delay_max_ns.
710 * The delay has two degrees of freedom that can be adjusted via tunables.
711 * The percentage of dirty data at which we start to delay is defined by
712 * zfs_delay_min_dirty_percent. This should typically be at or above
713 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
714 * delay after writing at full speed has failed to keep up with the incoming
715 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
716 * speaking, this variable determines the amount of delay at the midpoint of
720 * 10ms +-------------------------------------------------------------*+
736 * 2ms + (midpoint) * +
739 * | zfs_delay_scale ----------> ******** |
740 * 0 +-------------------------------------*********----------------+
741 * 0% <- zfs_dirty_data_max -> 100%
743 * Note that since the delay is added to the outstanding time remaining on the
744 * most recent transaction, the delay is effectively the inverse of IOPS.
745 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
746 * was chosen such that small changes in the amount of accumulated dirty data
747 * in the first 3/4 of the curve yield relatively small differences in the
750 * The effects can be easier to understand when the amount of delay is
751 * represented on a log scale:
754 * 100ms +-------------------------------------------------------------++
763 * + zfs_delay_scale ----------> ***** +
774 * +--------------------------------------------------------------+
775 * 0% <- zfs_dirty_data_max -> 100%
777 * Note here that only as the amount of dirty data approaches its limit does
778 * the delay start to increase rapidly. The goal of a properly tuned system
779 * should be to keep the amount of dirty data out of that range by first
780 * ensuring that the appropriate limits are set for the I/O scheduler to reach
781 * optimal throughput on the backend storage, and then by changing the value
782 * of zfs_delay_scale to increase the steepness of the curve.
785 dmu_tx_delay(dmu_tx_t
*tx
, uint64_t dirty
)
787 dsl_pool_t
*dp
= tx
->tx_pool
;
788 uint64_t delay_min_bytes
, wrlog
;
789 hrtime_t wakeup
, tx_time
= 0, now
;
791 /* Calculate minimum transaction time for the dirty data amount. */
793 zfs_dirty_data_max
* zfs_delay_min_dirty_percent
/ 100;
794 if (dirty
> delay_min_bytes
) {
796 * The caller has already waited until we are under the max.
797 * We make them pass us the amount of dirty data so we don't
798 * have to handle the case of it being >= the max, which
799 * could cause a divide-by-zero if it's == the max.
801 ASSERT3U(dirty
, <, zfs_dirty_data_max
);
803 tx_time
= zfs_delay_scale
* (dirty
- delay_min_bytes
) /
804 (zfs_dirty_data_max
- dirty
);
807 /* Calculate minimum transaction time for the TX_WRITE log size. */
808 wrlog
= aggsum_upper_bound(&dp
->dp_wrlog_total
);
810 zfs_wrlog_data_max
* zfs_delay_min_dirty_percent
/ 100;
811 if (wrlog
>= zfs_wrlog_data_max
) {
812 tx_time
= zfs_delay_max_ns
;
813 } else if (wrlog
> delay_min_bytes
) {
814 tx_time
= MAX(zfs_delay_scale
* (wrlog
- delay_min_bytes
) /
815 (zfs_wrlog_data_max
- wrlog
), tx_time
);
821 tx_time
= MIN(tx_time
, zfs_delay_max_ns
);
823 if (now
> tx
->tx_start
+ tx_time
)
826 DTRACE_PROBE3(delay__mintime
, dmu_tx_t
*, tx
, uint64_t, dirty
,
829 mutex_enter(&dp
->dp_lock
);
830 wakeup
= MAX(tx
->tx_start
+ tx_time
, dp
->dp_last_wakeup
+ tx_time
);
831 dp
->dp_last_wakeup
= wakeup
;
832 mutex_exit(&dp
->dp_lock
);
834 zfs_sleep_until(wakeup
);
838 * This routine attempts to assign the transaction to a transaction group.
839 * To do so, we must determine if there is sufficient free space on disk.
841 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
842 * on it), then it is assumed that there is sufficient free space,
843 * unless there's insufficient slop space in the pool (see the comment
844 * above spa_slop_shift in spa_misc.c).
846 * If it is not a "netfree" transaction, then if the data already on disk
847 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
848 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
849 * plus the rough estimate of this transaction's changes, may exceed the
850 * allowed usage, then this will fail with ERESTART, which will cause the
851 * caller to wait for the pending changes to be written to disk (by waiting
852 * for the next TXG to open), and then check the space usage again.
854 * The rough estimate of pending changes is comprised of the sum of:
856 * - this transaction's holds' txh_space_towrite
858 * - dd_tempreserved[], which is the sum of in-flight transactions'
859 * holds' txh_space_towrite (i.e. those transactions that have called
860 * dmu_tx_assign() but not yet called dmu_tx_commit()).
862 * - dd_space_towrite[], which is the amount of dirtied dbufs.
864 * Note that all of these values are inflated by spa_get_worst_case_asize(),
865 * which means that we may get ERESTART well before we are actually in danger
866 * of running out of space, but this also mitigates any small inaccuracies
867 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
868 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
871 * Note that due to this algorithm, it is possible to exceed the allowed
872 * usage by one transaction. Also, as we approach the allowed usage,
873 * we will allow a very limited amount of changes into each TXG, thus
874 * decreasing performance.
877 dmu_tx_try_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
879 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
884 DMU_TX_STAT_BUMP(dmu_tx_error
);
888 if (spa_suspended(spa
)) {
889 DMU_TX_STAT_BUMP(dmu_tx_suspended
);
892 * If the user has indicated a blocking failure mode
893 * then return ERESTART which will block in dmu_tx_wait().
894 * Otherwise, return EIO so that an error can get
895 * propagated back to the VOP calls.
897 * Note that we always honor the txg_how flag regardless
898 * of the failuremode setting.
900 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
&&
901 !(txg_how
& TXG_WAIT
))
902 return (SET_ERROR(EIO
));
904 return (SET_ERROR(ERESTART
));
907 if (!tx
->tx_dirty_delayed
&&
908 dsl_pool_need_wrlog_delay(tx
->tx_pool
)) {
909 tx
->tx_wait_dirty
= B_TRUE
;
910 DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay
);
911 return (SET_ERROR(ERESTART
));
914 if (!tx
->tx_dirty_delayed
&&
915 dsl_pool_need_dirty_delay(tx
->tx_pool
)) {
916 tx
->tx_wait_dirty
= B_TRUE
;
917 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay
);
918 return (SET_ERROR(ERESTART
));
921 tx
->tx_txg
= txg_hold_open(tx
->tx_pool
, &tx
->tx_txgh
);
922 tx
->tx_needassign_txh
= NULL
;
925 * NB: No error returns are allowed after txg_hold_open, but
926 * before processing the dnode holds, due to the
927 * dmu_tx_unassign() logic.
930 uint64_t towrite
= 0;
932 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
933 txh
= list_next(&tx
->tx_holds
, txh
)) {
934 dnode_t
*dn
= txh
->txh_dnode
;
937 * This thread can't hold the dn_struct_rwlock
938 * while assigning the tx, because this can lead to
939 * deadlock. Specifically, if this dnode is already
940 * assigned to an earlier txg, this thread may need
941 * to wait for that txg to sync (the ERESTART case
942 * below). The other thread that has assigned this
943 * dnode to an earlier txg prevents this txg from
944 * syncing until its tx can complete (calling
945 * dmu_tx_commit()), but it may need to acquire the
946 * dn_struct_rwlock to do so (e.g. via
949 * Note that this thread can't hold the lock for
950 * read either, but the rwlock doesn't record
951 * enough information to make that assertion.
953 ASSERT(!RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
955 mutex_enter(&dn
->dn_mtx
);
956 if (dn
->dn_assigned_txg
== tx
->tx_txg
- 1) {
957 mutex_exit(&dn
->dn_mtx
);
958 tx
->tx_needassign_txh
= txh
;
959 DMU_TX_STAT_BUMP(dmu_tx_group
);
960 return (SET_ERROR(ERESTART
));
962 if (dn
->dn_assigned_txg
== 0)
963 dn
->dn_assigned_txg
= tx
->tx_txg
;
964 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
965 (void) zfs_refcount_add(&dn
->dn_tx_holds
, tx
);
966 mutex_exit(&dn
->dn_mtx
);
968 towrite
+= zfs_refcount_count(&txh
->txh_space_towrite
);
969 tohold
+= zfs_refcount_count(&txh
->txh_memory_tohold
);
972 /* needed allocation: worst-case estimate of write space */
973 uint64_t asize
= spa_get_worst_case_asize(tx
->tx_pool
->dp_spa
, towrite
);
974 /* calculate memory footprint estimate */
975 uint64_t memory
= towrite
+ tohold
;
977 if (tx
->tx_dir
!= NULL
&& asize
!= 0) {
978 int err
= dsl_dir_tempreserve_space(tx
->tx_dir
, memory
,
979 asize
, tx
->tx_netfree
, &tx
->tx_tempreserve_cookie
, tx
);
984 DMU_TX_STAT_BUMP(dmu_tx_assigned
);
990 dmu_tx_unassign(dmu_tx_t
*tx
)
995 txg_rele_to_quiesce(&tx
->tx_txgh
);
998 * Walk the transaction's hold list, removing the hold on the
999 * associated dnode, and notifying waiters if the refcount drops to 0.
1001 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
);
1002 txh
&& txh
!= tx
->tx_needassign_txh
;
1003 txh
= list_next(&tx
->tx_holds
, txh
)) {
1004 dnode_t
*dn
= txh
->txh_dnode
;
1008 mutex_enter(&dn
->dn_mtx
);
1009 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1011 if (zfs_refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1012 dn
->dn_assigned_txg
= 0;
1013 cv_broadcast(&dn
->dn_notxholds
);
1015 mutex_exit(&dn
->dn_mtx
);
1018 txg_rele_to_sync(&tx
->tx_txgh
);
1020 tx
->tx_lasttried_txg
= tx
->tx_txg
;
1025 * Assign tx to a transaction group; txg_how is a bitmask:
1027 * If TXG_WAIT is set and the currently open txg is full, this function
1028 * will wait until there's a new txg. This should be used when no locks
1029 * are being held. With this bit set, this function will only fail if
1030 * we're truly out of space (or over quota).
1032 * If TXG_WAIT is *not* set and we can't assign into the currently open
1033 * txg without blocking, this function will return immediately with
1034 * ERESTART. This should be used whenever locks are being held. On an
1035 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1038 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1039 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1040 * details on the throttle). This is used by the VFS operations, after
1041 * they have already called dmu_tx_wait() (though most likely on a
1044 * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1045 * will assign the tx to monotonically increasing txgs. Of course this is
1046 * not strong monotonicity, because the same txg can be returned multiple
1047 * times in a row. This guarantee holds both for subsequent calls from
1048 * one thread and for multiple threads. For example, it is impossible to
1049 * observe the following sequence of events:
1053 * dmu_tx_assign(T1, ...)
1054 * 1 <- dmu_tx_get_txg(T1)
1055 * dmu_tx_assign(T2, ...)
1056 * 2 <- dmu_tx_get_txg(T2)
1057 * dmu_tx_assign(T3, ...)
1058 * 1 <- dmu_tx_get_txg(T3)
1061 dmu_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
1065 ASSERT(tx
->tx_txg
== 0);
1066 ASSERT0(txg_how
& ~(TXG_WAIT
| TXG_NOTHROTTLE
));
1067 ASSERT(!dsl_pool_sync_context(tx
->tx_pool
));
1069 /* If we might wait, we must not hold the config lock. */
1070 IMPLY((txg_how
& TXG_WAIT
), !dsl_pool_config_held(tx
->tx_pool
));
1072 if ((txg_how
& TXG_NOTHROTTLE
))
1073 tx
->tx_dirty_delayed
= B_TRUE
;
1075 while ((err
= dmu_tx_try_assign(tx
, txg_how
)) != 0) {
1076 dmu_tx_unassign(tx
);
1078 if (err
!= ERESTART
|| !(txg_how
& TXG_WAIT
))
1084 txg_rele_to_quiesce(&tx
->tx_txgh
);
1090 dmu_tx_wait(dmu_tx_t
*tx
)
1092 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
1093 dsl_pool_t
*dp
= tx
->tx_pool
;
1096 ASSERT(tx
->tx_txg
== 0);
1097 ASSERT(!dsl_pool_config_held(tx
->tx_pool
));
1099 before
= gethrtime();
1101 if (tx
->tx_wait_dirty
) {
1105 * dmu_tx_try_assign() has determined that we need to wait
1106 * because we've consumed much or all of the dirty buffer
1109 mutex_enter(&dp
->dp_lock
);
1110 if (dp
->dp_dirty_total
>= zfs_dirty_data_max
)
1111 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max
);
1112 while (dp
->dp_dirty_total
>= zfs_dirty_data_max
)
1113 cv_wait(&dp
->dp_spaceavail_cv
, &dp
->dp_lock
);
1114 dirty
= dp
->dp_dirty_total
;
1115 mutex_exit(&dp
->dp_lock
);
1117 dmu_tx_delay(tx
, dirty
);
1119 tx
->tx_wait_dirty
= B_FALSE
;
1122 * Note: setting tx_dirty_delayed only has effect if the
1123 * caller used TX_WAIT. Otherwise they are going to
1124 * destroy this tx and try again. The common case,
1125 * zfs_write(), uses TX_WAIT.
1127 tx
->tx_dirty_delayed
= B_TRUE
;
1128 } else if (spa_suspended(spa
) || tx
->tx_lasttried_txg
== 0) {
1130 * If the pool is suspended we need to wait until it
1131 * is resumed. Note that it's possible that the pool
1132 * has become active after this thread has tried to
1133 * obtain a tx. If that's the case then tx_lasttried_txg
1134 * would not have been set.
1136 txg_wait_synced(dp
, spa_last_synced_txg(spa
) + 1);
1137 } else if (tx
->tx_needassign_txh
) {
1138 dnode_t
*dn
= tx
->tx_needassign_txh
->txh_dnode
;
1140 mutex_enter(&dn
->dn_mtx
);
1141 while (dn
->dn_assigned_txg
== tx
->tx_lasttried_txg
- 1)
1142 cv_wait(&dn
->dn_notxholds
, &dn
->dn_mtx
);
1143 mutex_exit(&dn
->dn_mtx
);
1144 tx
->tx_needassign_txh
= NULL
;
1147 * If we have a lot of dirty data just wait until we sync
1148 * out a TXG at which point we'll hopefully have synced
1149 * a portion of the changes.
1151 txg_wait_synced(dp
, spa_last_synced_txg(spa
) + 1);
1154 spa_tx_assign_add_nsecs(spa
, gethrtime() - before
);
1158 dmu_tx_destroy(dmu_tx_t
*tx
)
1162 while ((txh
= list_head(&tx
->tx_holds
)) != NULL
) {
1163 dnode_t
*dn
= txh
->txh_dnode
;
1165 list_remove(&tx
->tx_holds
, txh
);
1166 zfs_refcount_destroy_many(&txh
->txh_space_towrite
,
1167 zfs_refcount_count(&txh
->txh_space_towrite
));
1168 zfs_refcount_destroy_many(&txh
->txh_memory_tohold
,
1169 zfs_refcount_count(&txh
->txh_memory_tohold
));
1170 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1175 list_destroy(&tx
->tx_callbacks
);
1176 list_destroy(&tx
->tx_holds
);
1177 kmem_free(tx
, sizeof (dmu_tx_t
));
1181 dmu_tx_commit(dmu_tx_t
*tx
)
1183 ASSERT(tx
->tx_txg
!= 0);
1186 * Go through the transaction's hold list and remove holds on
1187 * associated dnodes, notifying waiters if no holds remain.
1189 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
1190 txh
= list_next(&tx
->tx_holds
, txh
)) {
1191 dnode_t
*dn
= txh
->txh_dnode
;
1196 mutex_enter(&dn
->dn_mtx
);
1197 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1199 if (zfs_refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1200 dn
->dn_assigned_txg
= 0;
1201 cv_broadcast(&dn
->dn_notxholds
);
1203 mutex_exit(&dn
->dn_mtx
);
1206 if (tx
->tx_tempreserve_cookie
)
1207 dsl_dir_tempreserve_clear(tx
->tx_tempreserve_cookie
, tx
);
1209 if (!list_is_empty(&tx
->tx_callbacks
))
1210 txg_register_callbacks(&tx
->tx_txgh
, &tx
->tx_callbacks
);
1212 if (tx
->tx_anyobj
== FALSE
)
1213 txg_rele_to_sync(&tx
->tx_txgh
);
1219 dmu_tx_abort(dmu_tx_t
*tx
)
1221 ASSERT(tx
->tx_txg
== 0);
1224 * Call any registered callbacks with an error code.
1226 if (!list_is_empty(&tx
->tx_callbacks
))
1227 dmu_tx_do_callbacks(&tx
->tx_callbacks
, SET_ERROR(ECANCELED
));
1233 dmu_tx_get_txg(dmu_tx_t
*tx
)
1235 ASSERT(tx
->tx_txg
!= 0);
1236 return (tx
->tx_txg
);
1240 dmu_tx_pool(dmu_tx_t
*tx
)
1242 ASSERT(tx
->tx_pool
!= NULL
);
1243 return (tx
->tx_pool
);
1247 dmu_tx_callback_register(dmu_tx_t
*tx
, dmu_tx_callback_func_t
*func
, void *data
)
1249 dmu_tx_callback_t
*dcb
;
1251 dcb
= kmem_alloc(sizeof (dmu_tx_callback_t
), KM_SLEEP
);
1253 dcb
->dcb_func
= func
;
1254 dcb
->dcb_data
= data
;
1256 list_insert_tail(&tx
->tx_callbacks
, dcb
);
1260 * Call all the commit callbacks on a list, with a given error code.
1263 dmu_tx_do_callbacks(list_t
*cb_list
, int error
)
1265 dmu_tx_callback_t
*dcb
;
1267 while ((dcb
= list_tail(cb_list
)) != NULL
) {
1268 list_remove(cb_list
, dcb
);
1269 dcb
->dcb_func(dcb
->dcb_data
, error
);
1270 kmem_free(dcb
, sizeof (dmu_tx_callback_t
));
1275 * Interface to hold a bunch of attributes.
1276 * used for creating new files.
1277 * attrsize is the total size of all attributes
1278 * to be added during object creation
1280 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1284 * hold necessary attribute name for attribute registration.
1285 * should be a very rare case where this is needed. If it does
1286 * happen it would only happen on the first write to the file system.
1289 dmu_tx_sa_registration_hold(sa_os_t
*sa
, dmu_tx_t
*tx
)
1291 if (!sa
->sa_need_attr_registration
)
1294 for (int i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1295 if (!sa
->sa_attr_table
[i
].sa_registered
) {
1296 if (sa
->sa_reg_attr_obj
)
1297 dmu_tx_hold_zap(tx
, sa
->sa_reg_attr_obj
,
1298 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1300 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
,
1301 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1307 dmu_tx_hold_spill(dmu_tx_t
*tx
, uint64_t object
)
1311 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, object
,
1314 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
1315 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1319 dmu_tx_hold_sa_create(dmu_tx_t
*tx
, int attrsize
)
1321 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1323 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1325 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1328 if (tx
->tx_objset
->os_sa
->sa_layout_attr_obj
) {
1329 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1331 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1332 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1333 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1334 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1337 dmu_tx_sa_registration_hold(sa
, tx
);
1339 if (attrsize
<= DN_OLD_MAX_BONUSLEN
&& !sa
->sa_force_spill
)
1342 (void) dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, DMU_NEW_OBJECT
,
1349 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1351 * variable_size is the total size of all variable sized attributes
1352 * passed to this function. It is not the total size of all
1353 * variable size attributes that *may* exist on this object.
1356 dmu_tx_hold_sa(dmu_tx_t
*tx
, sa_handle_t
*hdl
, boolean_t may_grow
)
1359 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1361 ASSERT(hdl
!= NULL
);
1363 object
= sa_handle_object(hdl
);
1365 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1367 dmu_tx_hold_bonus_by_dnode(tx
, DB_DNODE(db
));
1370 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1373 if (tx
->tx_objset
->os_sa
->sa_reg_attr_obj
== 0 ||
1374 tx
->tx_objset
->os_sa
->sa_layout_attr_obj
== 0) {
1375 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1376 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1377 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1378 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1381 dmu_tx_sa_registration_hold(sa
, tx
);
1383 if (may_grow
&& tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1384 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1386 if (sa
->sa_force_spill
|| may_grow
|| hdl
->sa_spill
) {
1387 ASSERT(tx
->tx_txg
== 0);
1388 dmu_tx_hold_spill(tx
, object
);
1394 if (dn
->dn_have_spill
) {
1395 ASSERT(tx
->tx_txg
== 0);
1396 dmu_tx_hold_spill(tx
, object
);
1405 dmu_tx_ksp
= kstat_create("zfs", 0, "dmu_tx", "misc",
1406 KSTAT_TYPE_NAMED
, sizeof (dmu_tx_stats
) / sizeof (kstat_named_t
),
1407 KSTAT_FLAG_VIRTUAL
);
1409 if (dmu_tx_ksp
!= NULL
) {
1410 dmu_tx_ksp
->ks_data
= &dmu_tx_stats
;
1411 kstat_install(dmu_tx_ksp
);
1418 if (dmu_tx_ksp
!= NULL
) {
1419 kstat_delete(dmu_tx_ksp
);
1424 #if defined(_KERNEL)
1425 EXPORT_SYMBOL(dmu_tx_create
);
1426 EXPORT_SYMBOL(dmu_tx_hold_write
);
1427 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode
);
1428 EXPORT_SYMBOL(dmu_tx_hold_free
);
1429 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode
);
1430 EXPORT_SYMBOL(dmu_tx_hold_zap
);
1431 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode
);
1432 EXPORT_SYMBOL(dmu_tx_hold_bonus
);
1433 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode
);
1434 EXPORT_SYMBOL(dmu_tx_abort
);
1435 EXPORT_SYMBOL(dmu_tx_assign
);
1436 EXPORT_SYMBOL(dmu_tx_wait
);
1437 EXPORT_SYMBOL(dmu_tx_commit
);
1438 EXPORT_SYMBOL(dmu_tx_mark_netfree
);
1439 EXPORT_SYMBOL(dmu_tx_get_txg
);
1440 EXPORT_SYMBOL(dmu_tx_callback_register
);
1441 EXPORT_SYMBOL(dmu_tx_do_callbacks
);
1442 EXPORT_SYMBOL(dmu_tx_hold_spill
);
1443 EXPORT_SYMBOL(dmu_tx_hold_sa_create
);
1444 EXPORT_SYMBOL(dmu_tx_hold_sa
);