4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/trace_dmu.h>
42 typedef void (*dmu_tx_hold_func_t
)(dmu_tx_t
*tx
, struct dnode
*dn
,
43 uint64_t arg1
, uint64_t arg2
);
45 dmu_tx_stats_t dmu_tx_stats
= {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64
},
47 { "dmu_tx_delay", KSTAT_DATA_UINT64
},
48 { "dmu_tx_error", KSTAT_DATA_UINT64
},
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64
},
50 { "dmu_tx_group", KSTAT_DATA_UINT64
},
51 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64
},
52 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64
},
53 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64
},
54 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64
},
55 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64
},
56 { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64
},
57 { "dmu_tx_quota", KSTAT_DATA_UINT64
},
60 static kstat_t
*dmu_tx_ksp
;
63 dmu_tx_create_dd(dsl_dir_t
*dd
)
65 dmu_tx_t
*tx
= kmem_zalloc(sizeof (dmu_tx_t
), KM_SLEEP
);
68 tx
->tx_pool
= dd
->dd_pool
;
69 list_create(&tx
->tx_holds
, sizeof (dmu_tx_hold_t
),
70 offsetof(dmu_tx_hold_t
, txh_node
));
71 list_create(&tx
->tx_callbacks
, sizeof (dmu_tx_callback_t
),
72 offsetof(dmu_tx_callback_t
, dcb_node
));
73 tx
->tx_start
= gethrtime();
78 dmu_tx_create(objset_t
*os
)
80 dmu_tx_t
*tx
= dmu_tx_create_dd(os
->os_dsl_dataset
->ds_dir
);
86 dmu_tx_create_assigned(struct dsl_pool
*dp
, uint64_t txg
)
88 dmu_tx_t
*tx
= dmu_tx_create_dd(NULL
);
90 TXG_VERIFY(dp
->dp_spa
, txg
);
99 dmu_tx_is_syncing(dmu_tx_t
*tx
)
101 return (tx
->tx_anyobj
);
105 dmu_tx_private_ok(dmu_tx_t
*tx
)
107 return (tx
->tx_anyobj
);
110 static dmu_tx_hold_t
*
111 dmu_tx_hold_dnode_impl(dmu_tx_t
*tx
, dnode_t
*dn
, enum dmu_tx_hold_type type
,
112 uint64_t arg1
, uint64_t arg2
)
117 (void) zfs_refcount_add(&dn
->dn_holds
, tx
);
118 if (tx
->tx_txg
!= 0) {
119 mutex_enter(&dn
->dn_mtx
);
121 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
122 * problem, but there's no way for it to happen (for
125 ASSERT(dn
->dn_assigned_txg
== 0);
126 dn
->dn_assigned_txg
= tx
->tx_txg
;
127 (void) zfs_refcount_add(&dn
->dn_tx_holds
, tx
);
128 mutex_exit(&dn
->dn_mtx
);
132 txh
= kmem_zalloc(sizeof (dmu_tx_hold_t
), KM_SLEEP
);
135 zfs_refcount_create(&txh
->txh_space_towrite
);
136 zfs_refcount_create(&txh
->txh_memory_tohold
);
137 txh
->txh_type
= type
;
138 txh
->txh_arg1
= arg1
;
139 txh
->txh_arg2
= arg2
;
140 list_insert_tail(&tx
->tx_holds
, txh
);
145 static dmu_tx_hold_t
*
146 dmu_tx_hold_object_impl(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
,
147 enum dmu_tx_hold_type type
, uint64_t arg1
, uint64_t arg2
)
153 if (object
!= DMU_NEW_OBJECT
) {
154 err
= dnode_hold(os
, object
, FTAG
, &dn
);
160 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, type
, arg1
, arg2
);
162 dnode_rele(dn
, FTAG
);
167 dmu_tx_add_new_object(dmu_tx_t
*tx
, dnode_t
*dn
)
170 * If we're syncing, they can manipulate any object anyhow, and
171 * the hold on the dnode_t can cause problems.
173 if (!dmu_tx_is_syncing(tx
))
174 (void) dmu_tx_hold_dnode_impl(tx
, dn
, THT_NEWOBJECT
, 0, 0);
178 * This function reads specified data from disk. The specified data will
179 * be needed to perform the transaction -- i.e, it will be read after
180 * we do dmu_tx_assign(). There are two reasons that we read the data now
181 * (before dmu_tx_assign()):
183 * 1. Reading it now has potentially better performance. The transaction
184 * has not yet been assigned, so the TXG is not held open, and also the
185 * caller typically has less locks held when calling dmu_tx_hold_*() than
186 * after the transaction has been assigned. This reduces the lock (and txg)
187 * hold times, thus reducing lock contention.
189 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
190 * that are detected before they start making changes to the DMU state
191 * (i.e. now). Once the transaction has been assigned, and some DMU
192 * state has been changed, it can be difficult to recover from an i/o
193 * error (e.g. to undo the changes already made in memory at the DMU
194 * layer). Typically code to do so does not exist in the caller -- it
195 * assumes that the data has already been cached and thus i/o errors are
198 * It has been observed that the i/o initiated here can be a performance
199 * problem, and it appears to be optional, because we don't look at the
200 * data which is read. However, removing this read would only serve to
201 * move the work elsewhere (after the dmu_tx_assign()), where it may
202 * have a greater impact on performance (in addition to the impact on
203 * fault tolerance noted above).
206 dmu_tx_check_ioerr(zio_t
*zio
, dnode_t
*dn
, int level
, uint64_t blkid
)
211 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
212 db
= dbuf_hold_level(dn
, level
, blkid
, FTAG
);
213 rw_exit(&dn
->dn_struct_rwlock
);
215 return (SET_ERROR(EIO
));
216 err
= dbuf_read(db
, zio
, DB_RF_CANFAIL
| DB_RF_NOPREFETCH
);
223 dmu_tx_count_write(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
225 dnode_t
*dn
= txh
->txh_dnode
;
231 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
, len
, FTAG
);
233 if (zfs_refcount_count(&txh
->txh_space_towrite
) > 2 * DMU_MAX_ACCESS
)
234 err
= SET_ERROR(EFBIG
);
240 * For i/o error checking, read the blocks that will be needed
241 * to perform the write: the first and last level-0 blocks (if
242 * they are not aligned, i.e. if they are partial-block writes),
243 * and all the level-1 blocks.
245 if (dn
->dn_maxblkid
== 0) {
246 if (off
< dn
->dn_datablksz
&&
247 (off
> 0 || len
< dn
->dn_datablksz
)) {
248 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
250 txh
->txh_tx
->tx_err
= err
;
254 zio_t
*zio
= zio_root(dn
->dn_objset
->os_spa
,
255 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
257 /* first level-0 block */
258 uint64_t start
= off
>> dn
->dn_datablkshift
;
259 if (P2PHASE(off
, dn
->dn_datablksz
) || len
< dn
->dn_datablksz
) {
260 err
= dmu_tx_check_ioerr(zio
, dn
, 0, start
);
262 txh
->txh_tx
->tx_err
= err
;
266 /* last level-0 block */
267 uint64_t end
= (off
+ len
- 1) >> dn
->dn_datablkshift
;
268 if (end
!= start
&& end
<= dn
->dn_maxblkid
&&
269 P2PHASE(off
+ len
, dn
->dn_datablksz
)) {
270 err
= dmu_tx_check_ioerr(zio
, dn
, 0, end
);
272 txh
->txh_tx
->tx_err
= err
;
277 if (dn
->dn_nlevels
> 1) {
278 int shft
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
279 for (uint64_t i
= (start
>> shft
) + 1;
280 i
< end
>> shft
; i
++) {
281 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
283 txh
->txh_tx
->tx_err
= err
;
290 txh
->txh_tx
->tx_err
= err
;
296 dmu_tx_count_dnode(dmu_tx_hold_t
*txh
)
298 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
299 DNODE_MIN_SIZE
, FTAG
);
303 dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
)
308 ASSERT3U(len
, <=, DMU_MAX_ACCESS
);
309 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
311 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
312 object
, THT_WRITE
, off
, len
);
314 dmu_tx_count_write(txh
, off
, len
);
315 dmu_tx_count_dnode(txh
);
320 dmu_tx_hold_remap_l1indirect(dmu_tx_t
*tx
, uint64_t object
)
324 ASSERT(tx
->tx_txg
== 0);
325 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
326 object
, THT_WRITE
, 0, 0);
330 dnode_t
*dn
= txh
->txh_dnode
;
331 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
332 1ULL << dn
->dn_indblkshift
, FTAG
);
333 dmu_tx_count_dnode(txh
);
337 dmu_tx_hold_write_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
, int len
)
342 ASSERT3U(len
, <=, DMU_MAX_ACCESS
);
343 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
345 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_WRITE
, off
, len
);
347 dmu_tx_count_write(txh
, off
, len
);
348 dmu_tx_count_dnode(txh
);
353 * This function marks the transaction as being a "net free". The end
354 * result is that refquotas will be disabled for this transaction, and
355 * this transaction will be able to use half of the pool space overhead
356 * (see dsl_pool_adjustedsize()). Therefore this function should only
357 * be called for transactions that we expect will not cause a net increase
358 * in the amount of space used (but it's OK if that is occasionally not true).
361 dmu_tx_mark_netfree(dmu_tx_t
*tx
)
363 tx
->tx_netfree
= B_TRUE
;
367 dmu_tx_hold_free_impl(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
369 dmu_tx_t
*tx
= txh
->txh_tx
;
370 dnode_t
*dn
= txh
->txh_dnode
;
373 ASSERT(tx
->tx_txg
== 0);
375 dmu_tx_count_dnode(txh
);
377 if (off
>= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
)
379 if (len
== DMU_OBJECT_END
)
380 len
= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
- off
;
382 dmu_tx_count_dnode(txh
);
385 * For i/o error checking, we read the first and last level-0
386 * blocks if they are not aligned, and all the level-1 blocks.
388 * Note: dbuf_free_range() assumes that we have not instantiated
389 * any level-0 dbufs that will be completely freed. Therefore we must
390 * exercise care to not read or count the first and last blocks
391 * if they are blocksize-aligned.
393 if (dn
->dn_datablkshift
== 0) {
394 if (off
!= 0 || len
< dn
->dn_datablksz
)
395 dmu_tx_count_write(txh
, 0, dn
->dn_datablksz
);
397 /* first block will be modified if it is not aligned */
398 if (!IS_P2ALIGNED(off
, 1 << dn
->dn_datablkshift
))
399 dmu_tx_count_write(txh
, off
, 1);
400 /* last block will be modified if it is not aligned */
401 if (!IS_P2ALIGNED(off
+ len
, 1 << dn
->dn_datablkshift
))
402 dmu_tx_count_write(txh
, off
+ len
, 1);
406 * Check level-1 blocks.
408 if (dn
->dn_nlevels
> 1) {
409 int shift
= dn
->dn_datablkshift
+ dn
->dn_indblkshift
-
411 uint64_t start
= off
>> shift
;
412 uint64_t end
= (off
+ len
) >> shift
;
414 ASSERT(dn
->dn_indblkshift
!= 0);
417 * dnode_reallocate() can result in an object with indirect
418 * blocks having an odd data block size. In this case,
419 * just check the single block.
421 if (dn
->dn_datablkshift
== 0)
424 zio_t
*zio
= zio_root(tx
->tx_pool
->dp_spa
,
425 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
426 for (uint64_t i
= start
; i
<= end
; i
++) {
427 uint64_t ibyte
= i
<< shift
;
428 err
= dnode_next_offset(dn
, 0, &ibyte
, 2, 1, 0);
430 if (err
== ESRCH
|| i
> end
)
434 (void) zio_wait(zio
);
438 (void) zfs_refcount_add_many(&txh
->txh_memory_tohold
,
439 1 << dn
->dn_indblkshift
, FTAG
);
441 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
444 (void) zio_wait(zio
);
457 dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, uint64_t len
)
461 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
462 object
, THT_FREE
, off
, len
);
464 (void) dmu_tx_hold_free_impl(txh
, off
, len
);
468 dmu_tx_hold_free_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
, uint64_t len
)
472 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_FREE
, off
, len
);
474 (void) dmu_tx_hold_free_impl(txh
, off
, len
);
478 dmu_tx_hold_zap_impl(dmu_tx_hold_t
*txh
, const char *name
)
480 dmu_tx_t
*tx
= txh
->txh_tx
;
481 dnode_t
*dn
= txh
->txh_dnode
;
484 ASSERT(tx
->tx_txg
== 0);
486 dmu_tx_count_dnode(txh
);
489 * Modifying a almost-full microzap is around the worst case (128KB)
491 * If it is a fat zap, the worst case would be 7*16KB=112KB:
492 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
493 * - 4 new blocks written if adding:
494 * - 2 blocks for possibly split leaves,
495 * - 2 grown ptrtbl blocks
497 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
498 MZAP_MAX_BLKSZ
, FTAG
);
503 ASSERT3U(DMU_OT_BYTESWAP(dn
->dn_type
), ==, DMU_BSWAP_ZAP
);
505 if (dn
->dn_maxblkid
== 0 || name
== NULL
) {
507 * This is a microzap (only one block), or we don't know
508 * the name. Check the first block for i/o errors.
510 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
516 * Access the name so that we'll check for i/o errors to
517 * the leaf blocks, etc. We ignore ENOENT, as this name
520 err
= zap_lookup_by_dnode(dn
, name
, 8, 0, NULL
);
521 if (err
== EIO
|| err
== ECKSUM
|| err
== ENXIO
) {
528 dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, const char *name
)
534 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
535 object
, THT_ZAP
, add
, (uintptr_t)name
);
537 dmu_tx_hold_zap_impl(txh
, name
);
541 dmu_tx_hold_zap_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, int add
, const char *name
)
548 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_ZAP
, add
, (uintptr_t)name
);
550 dmu_tx_hold_zap_impl(txh
, name
);
554 dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
)
558 ASSERT(tx
->tx_txg
== 0);
560 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
561 object
, THT_BONUS
, 0, 0);
563 dmu_tx_count_dnode(txh
);
567 dmu_tx_hold_bonus_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
)
573 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_BONUS
, 0, 0);
575 dmu_tx_count_dnode(txh
);
579 dmu_tx_hold_space(dmu_tx_t
*tx
, uint64_t space
)
583 ASSERT(tx
->tx_txg
== 0);
585 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
586 DMU_NEW_OBJECT
, THT_SPACE
, space
, 0);
588 (void) zfs_refcount_add_many(
589 &txh
->txh_space_towrite
, space
, FTAG
);
595 dmu_tx_dirty_buf(dmu_tx_t
*tx
, dmu_buf_impl_t
*db
)
597 boolean_t match_object
= B_FALSE
;
598 boolean_t match_offset
= B_FALSE
;
601 dnode_t
*dn
= DB_DNODE(db
);
602 ASSERT(tx
->tx_txg
!= 0);
603 ASSERT(tx
->tx_objset
== NULL
|| dn
->dn_objset
== tx
->tx_objset
);
604 ASSERT3U(dn
->dn_object
, ==, db
->db
.db_object
);
611 /* XXX No checking on the meta dnode for now */
612 if (db
->db
.db_object
== DMU_META_DNODE_OBJECT
) {
617 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
618 txh
= list_next(&tx
->tx_holds
, txh
)) {
619 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
620 if (txh
->txh_dnode
== dn
&& txh
->txh_type
!= THT_NEWOBJECT
)
622 if (txh
->txh_dnode
== NULL
|| txh
->txh_dnode
== dn
) {
623 int datablkshift
= dn
->dn_datablkshift
?
624 dn
->dn_datablkshift
: SPA_MAXBLOCKSHIFT
;
625 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
626 int shift
= datablkshift
+ epbs
* db
->db_level
;
627 uint64_t beginblk
= shift
>= 64 ? 0 :
628 (txh
->txh_arg1
>> shift
);
629 uint64_t endblk
= shift
>= 64 ? 0 :
630 ((txh
->txh_arg1
+ txh
->txh_arg2
- 1) >> shift
);
631 uint64_t blkid
= db
->db_blkid
;
633 /* XXX txh_arg2 better not be zero... */
635 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
636 txh
->txh_type
, beginblk
, endblk
);
638 switch (txh
->txh_type
) {
640 if (blkid
>= beginblk
&& blkid
<= endblk
)
643 * We will let this hold work for the bonus
644 * or spill buffer so that we don't need to
645 * hold it when creating a new object.
647 if (blkid
== DMU_BONUS_BLKID
||
648 blkid
== DMU_SPILL_BLKID
)
651 * They might have to increase nlevels,
652 * thus dirtying the new TLIBs. Or the
653 * might have to change the block size,
654 * thus dirying the new lvl=0 blk=0.
661 * We will dirty all the level 1 blocks in
662 * the free range and perhaps the first and
663 * last level 0 block.
665 if (blkid
>= beginblk
&& (blkid
<= endblk
||
666 txh
->txh_arg2
== DMU_OBJECT_END
))
670 if (blkid
== DMU_SPILL_BLKID
)
674 if (blkid
== DMU_BONUS_BLKID
)
684 cmn_err(CE_PANIC
, "bad txh_type %d",
688 if (match_object
&& match_offset
) {
694 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
695 (u_longlong_t
)db
->db
.db_object
, db
->db_level
,
696 (u_longlong_t
)db
->db_blkid
);
701 * If we can't do 10 iops, something is wrong. Let us go ahead
702 * and hit zfs_dirty_data_max.
704 hrtime_t zfs_delay_max_ns
= 100 * MICROSEC
; /* 100 milliseconds */
705 int zfs_delay_resolution_ns
= 100 * 1000; /* 100 microseconds */
708 * We delay transactions when we've determined that the backend storage
709 * isn't able to accommodate the rate of incoming writes.
711 * If there is already a transaction waiting, we delay relative to when
712 * that transaction finishes waiting. This way the calculated min_time
713 * is independent of the number of threads concurrently executing
716 * If we are the only waiter, wait relative to when the transaction
717 * started, rather than the current time. This credits the transaction for
718 * "time already served", e.g. reading indirect blocks.
720 * The minimum time for a transaction to take is calculated as:
721 * min_time = scale * (dirty - min) / (max - dirty)
722 * min_time is then capped at zfs_delay_max_ns.
724 * The delay has two degrees of freedom that can be adjusted via tunables.
725 * The percentage of dirty data at which we start to delay is defined by
726 * zfs_delay_min_dirty_percent. This should typically be at or above
727 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
728 * delay after writing at full speed has failed to keep up with the incoming
729 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
730 * speaking, this variable determines the amount of delay at the midpoint of
734 * 10ms +-------------------------------------------------------------*+
750 * 2ms + (midpoint) * +
753 * | zfs_delay_scale ----------> ******** |
754 * 0 +-------------------------------------*********----------------+
755 * 0% <- zfs_dirty_data_max -> 100%
757 * Note that since the delay is added to the outstanding time remaining on the
758 * most recent transaction, the delay is effectively the inverse of IOPS.
759 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
760 * was chosen such that small changes in the amount of accumulated dirty data
761 * in the first 3/4 of the curve yield relatively small differences in the
764 * The effects can be easier to understand when the amount of delay is
765 * represented on a log scale:
768 * 100ms +-------------------------------------------------------------++
777 * + zfs_delay_scale ----------> ***** +
788 * +--------------------------------------------------------------+
789 * 0% <- zfs_dirty_data_max -> 100%
791 * Note here that only as the amount of dirty data approaches its limit does
792 * the delay start to increase rapidly. The goal of a properly tuned system
793 * should be to keep the amount of dirty data out of that range by first
794 * ensuring that the appropriate limits are set for the I/O scheduler to reach
795 * optimal throughput on the backend storage, and then by changing the value
796 * of zfs_delay_scale to increase the steepness of the curve.
799 dmu_tx_delay(dmu_tx_t
*tx
, uint64_t dirty
)
801 dsl_pool_t
*dp
= tx
->tx_pool
;
802 uint64_t delay_min_bytes
=
803 zfs_dirty_data_max
* zfs_delay_min_dirty_percent
/ 100;
804 hrtime_t wakeup
, min_tx_time
, now
;
806 if (dirty
<= delay_min_bytes
)
810 * The caller has already waited until we are under the max.
811 * We make them pass us the amount of dirty data so we don't
812 * have to handle the case of it being >= the max, which could
813 * cause a divide-by-zero if it's == the max.
815 ASSERT3U(dirty
, <, zfs_dirty_data_max
);
818 min_tx_time
= zfs_delay_scale
*
819 (dirty
- delay_min_bytes
) / (zfs_dirty_data_max
- dirty
);
820 min_tx_time
= MIN(min_tx_time
, zfs_delay_max_ns
);
821 if (now
> tx
->tx_start
+ min_tx_time
)
824 DTRACE_PROBE3(delay__mintime
, dmu_tx_t
*, tx
, uint64_t, dirty
,
825 uint64_t, min_tx_time
);
827 mutex_enter(&dp
->dp_lock
);
828 wakeup
= MAX(tx
->tx_start
+ min_tx_time
,
829 dp
->dp_last_wakeup
+ min_tx_time
);
830 dp
->dp_last_wakeup
= wakeup
;
831 mutex_exit(&dp
->dp_lock
);
833 zfs_sleep_until(wakeup
);
837 * This routine attempts to assign the transaction to a transaction group.
838 * To do so, we must determine if there is sufficient free space on disk.
840 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
841 * on it), then it is assumed that there is sufficient free space,
842 * unless there's insufficient slop space in the pool (see the comment
843 * above spa_slop_shift in spa_misc.c).
845 * If it is not a "netfree" transaction, then if the data already on disk
846 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
847 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
848 * plus the rough estimate of this transaction's changes, may exceed the
849 * allowed usage, then this will fail with ERESTART, which will cause the
850 * caller to wait for the pending changes to be written to disk (by waiting
851 * for the next TXG to open), and then check the space usage again.
853 * The rough estimate of pending changes is comprised of the sum of:
855 * - this transaction's holds' txh_space_towrite
857 * - dd_tempreserved[], which is the sum of in-flight transactions'
858 * holds' txh_space_towrite (i.e. those transactions that have called
859 * dmu_tx_assign() but not yet called dmu_tx_commit()).
861 * - dd_space_towrite[], which is the amount of dirtied dbufs.
863 * Note that all of these values are inflated by spa_get_worst_case_asize(),
864 * which means that we may get ERESTART well before we are actually in danger
865 * of running out of space, but this also mitigates any small inaccuracies
866 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
867 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
870 * Note that due to this algorithm, it is possible to exceed the allowed
871 * usage by one transaction. Also, as we approach the allowed usage,
872 * we will allow a very limited amount of changes into each TXG, thus
873 * decreasing performance.
876 dmu_tx_try_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
878 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
883 DMU_TX_STAT_BUMP(dmu_tx_error
);
887 if (spa_suspended(spa
)) {
888 DMU_TX_STAT_BUMP(dmu_tx_suspended
);
891 * If the user has indicated a blocking failure mode
892 * then return ERESTART which will block in dmu_tx_wait().
893 * Otherwise, return EIO so that an error can get
894 * propagated back to the VOP calls.
896 * Note that we always honor the txg_how flag regardless
897 * of the failuremode setting.
899 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
&&
900 !(txg_how
& TXG_WAIT
))
901 return (SET_ERROR(EIO
));
903 return (SET_ERROR(ERESTART
));
906 if (!tx
->tx_dirty_delayed
&&
907 dsl_pool_need_dirty_delay(tx
->tx_pool
)) {
908 tx
->tx_wait_dirty
= B_TRUE
;
909 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay
);
910 return (SET_ERROR(ERESTART
));
913 tx
->tx_txg
= txg_hold_open(tx
->tx_pool
, &tx
->tx_txgh
);
914 tx
->tx_needassign_txh
= NULL
;
917 * NB: No error returns are allowed after txg_hold_open, but
918 * before processing the dnode holds, due to the
919 * dmu_tx_unassign() logic.
922 uint64_t towrite
= 0;
924 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
925 txh
= list_next(&tx
->tx_holds
, txh
)) {
926 dnode_t
*dn
= txh
->txh_dnode
;
929 * This thread can't hold the dn_struct_rwlock
930 * while assigning the tx, because this can lead to
931 * deadlock. Specifically, if this dnode is already
932 * assigned to an earlier txg, this thread may need
933 * to wait for that txg to sync (the ERESTART case
934 * below). The other thread that has assigned this
935 * dnode to an earlier txg prevents this txg from
936 * syncing until its tx can complete (calling
937 * dmu_tx_commit()), but it may need to acquire the
938 * dn_struct_rwlock to do so (e.g. via
941 * Note that this thread can't hold the lock for
942 * read either, but the rwlock doesn't record
943 * enough information to make that assertion.
945 ASSERT(!RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
947 mutex_enter(&dn
->dn_mtx
);
948 if (dn
->dn_assigned_txg
== tx
->tx_txg
- 1) {
949 mutex_exit(&dn
->dn_mtx
);
950 tx
->tx_needassign_txh
= txh
;
951 DMU_TX_STAT_BUMP(dmu_tx_group
);
952 return (SET_ERROR(ERESTART
));
954 if (dn
->dn_assigned_txg
== 0)
955 dn
->dn_assigned_txg
= tx
->tx_txg
;
956 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
957 (void) zfs_refcount_add(&dn
->dn_tx_holds
, tx
);
958 mutex_exit(&dn
->dn_mtx
);
960 towrite
+= zfs_refcount_count(&txh
->txh_space_towrite
);
961 tohold
+= zfs_refcount_count(&txh
->txh_memory_tohold
);
964 /* needed allocation: worst-case estimate of write space */
965 uint64_t asize
= spa_get_worst_case_asize(tx
->tx_pool
->dp_spa
, towrite
);
966 /* calculate memory footprint estimate */
967 uint64_t memory
= towrite
+ tohold
;
969 if (tx
->tx_dir
!= NULL
&& asize
!= 0) {
970 int err
= dsl_dir_tempreserve_space(tx
->tx_dir
, memory
,
971 asize
, tx
->tx_netfree
, &tx
->tx_tempreserve_cookie
, tx
);
976 DMU_TX_STAT_BUMP(dmu_tx_assigned
);
982 dmu_tx_unassign(dmu_tx_t
*tx
)
987 txg_rele_to_quiesce(&tx
->tx_txgh
);
990 * Walk the transaction's hold list, removing the hold on the
991 * associated dnode, and notifying waiters if the refcount drops to 0.
993 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
);
994 txh
&& txh
!= tx
->tx_needassign_txh
;
995 txh
= list_next(&tx
->tx_holds
, txh
)) {
996 dnode_t
*dn
= txh
->txh_dnode
;
1000 mutex_enter(&dn
->dn_mtx
);
1001 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1003 if (zfs_refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1004 dn
->dn_assigned_txg
= 0;
1005 cv_broadcast(&dn
->dn_notxholds
);
1007 mutex_exit(&dn
->dn_mtx
);
1010 txg_rele_to_sync(&tx
->tx_txgh
);
1012 tx
->tx_lasttried_txg
= tx
->tx_txg
;
1017 * Assign tx to a transaction group; txg_how is a bitmask:
1019 * If TXG_WAIT is set and the currently open txg is full, this function
1020 * will wait until there's a new txg. This should be used when no locks
1021 * are being held. With this bit set, this function will only fail if
1022 * we're truly out of space (or over quota).
1024 * If TXG_WAIT is *not* set and we can't assign into the currently open
1025 * txg without blocking, this function will return immediately with
1026 * ERESTART. This should be used whenever locks are being held. On an
1027 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1030 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1031 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1032 * details on the throttle). This is used by the VFS operations, after
1033 * they have already called dmu_tx_wait() (though most likely on a
1037 dmu_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
1041 ASSERT(tx
->tx_txg
== 0);
1042 ASSERT0(txg_how
& ~(TXG_WAIT
| TXG_NOTHROTTLE
));
1043 ASSERT(!dsl_pool_sync_context(tx
->tx_pool
));
1045 /* If we might wait, we must not hold the config lock. */
1046 IMPLY((txg_how
& TXG_WAIT
), !dsl_pool_config_held(tx
->tx_pool
));
1048 if ((txg_how
& TXG_NOTHROTTLE
))
1049 tx
->tx_dirty_delayed
= B_TRUE
;
1051 while ((err
= dmu_tx_try_assign(tx
, txg_how
)) != 0) {
1052 dmu_tx_unassign(tx
);
1054 if (err
!= ERESTART
|| !(txg_how
& TXG_WAIT
))
1060 txg_rele_to_quiesce(&tx
->tx_txgh
);
1066 dmu_tx_wait(dmu_tx_t
*tx
)
1068 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
1069 dsl_pool_t
*dp
= tx
->tx_pool
;
1072 ASSERT(tx
->tx_txg
== 0);
1073 ASSERT(!dsl_pool_config_held(tx
->tx_pool
));
1075 before
= gethrtime();
1077 if (tx
->tx_wait_dirty
) {
1081 * dmu_tx_try_assign() has determined that we need to wait
1082 * because we've consumed much or all of the dirty buffer
1085 mutex_enter(&dp
->dp_lock
);
1086 if (dp
->dp_dirty_total
>= zfs_dirty_data_max
)
1087 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max
);
1088 while (dp
->dp_dirty_total
>= zfs_dirty_data_max
)
1089 cv_wait(&dp
->dp_spaceavail_cv
, &dp
->dp_lock
);
1090 dirty
= dp
->dp_dirty_total
;
1091 mutex_exit(&dp
->dp_lock
);
1093 dmu_tx_delay(tx
, dirty
);
1095 tx
->tx_wait_dirty
= B_FALSE
;
1098 * Note: setting tx_dirty_delayed only has effect if the
1099 * caller used TX_WAIT. Otherwise they are going to
1100 * destroy this tx and try again. The common case,
1101 * zfs_write(), uses TX_WAIT.
1103 tx
->tx_dirty_delayed
= B_TRUE
;
1104 } else if (spa_suspended(spa
) || tx
->tx_lasttried_txg
== 0) {
1106 * If the pool is suspended we need to wait until it
1107 * is resumed. Note that it's possible that the pool
1108 * has become active after this thread has tried to
1109 * obtain a tx. If that's the case then tx_lasttried_txg
1110 * would not have been set.
1112 txg_wait_synced(dp
, spa_last_synced_txg(spa
) + 1);
1113 } else if (tx
->tx_needassign_txh
) {
1114 dnode_t
*dn
= tx
->tx_needassign_txh
->txh_dnode
;
1116 mutex_enter(&dn
->dn_mtx
);
1117 while (dn
->dn_assigned_txg
== tx
->tx_lasttried_txg
- 1)
1118 cv_wait(&dn
->dn_notxholds
, &dn
->dn_mtx
);
1119 mutex_exit(&dn
->dn_mtx
);
1120 tx
->tx_needassign_txh
= NULL
;
1123 * If we have a lot of dirty data just wait until we sync
1124 * out a TXG at which point we'll hopefully have synced
1125 * a portion of the changes.
1127 txg_wait_synced(dp
, spa_last_synced_txg(spa
) + 1);
1130 spa_tx_assign_add_nsecs(spa
, gethrtime() - before
);
1134 dmu_tx_destroy(dmu_tx_t
*tx
)
1138 while ((txh
= list_head(&tx
->tx_holds
)) != NULL
) {
1139 dnode_t
*dn
= txh
->txh_dnode
;
1141 list_remove(&tx
->tx_holds
, txh
);
1142 zfs_refcount_destroy_many(&txh
->txh_space_towrite
,
1143 zfs_refcount_count(&txh
->txh_space_towrite
));
1144 zfs_refcount_destroy_many(&txh
->txh_memory_tohold
,
1145 zfs_refcount_count(&txh
->txh_memory_tohold
));
1146 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1151 list_destroy(&tx
->tx_callbacks
);
1152 list_destroy(&tx
->tx_holds
);
1153 kmem_free(tx
, sizeof (dmu_tx_t
));
1157 dmu_tx_commit(dmu_tx_t
*tx
)
1159 ASSERT(tx
->tx_txg
!= 0);
1162 * Go through the transaction's hold list and remove holds on
1163 * associated dnodes, notifying waiters if no holds remain.
1165 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
1166 txh
= list_next(&tx
->tx_holds
, txh
)) {
1167 dnode_t
*dn
= txh
->txh_dnode
;
1172 mutex_enter(&dn
->dn_mtx
);
1173 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1175 if (zfs_refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1176 dn
->dn_assigned_txg
= 0;
1177 cv_broadcast(&dn
->dn_notxholds
);
1179 mutex_exit(&dn
->dn_mtx
);
1182 if (tx
->tx_tempreserve_cookie
)
1183 dsl_dir_tempreserve_clear(tx
->tx_tempreserve_cookie
, tx
);
1185 if (!list_is_empty(&tx
->tx_callbacks
))
1186 txg_register_callbacks(&tx
->tx_txgh
, &tx
->tx_callbacks
);
1188 if (tx
->tx_anyobj
== FALSE
)
1189 txg_rele_to_sync(&tx
->tx_txgh
);
1195 dmu_tx_abort(dmu_tx_t
*tx
)
1197 ASSERT(tx
->tx_txg
== 0);
1200 * Call any registered callbacks with an error code.
1202 if (!list_is_empty(&tx
->tx_callbacks
))
1203 dmu_tx_do_callbacks(&tx
->tx_callbacks
, ECANCELED
);
1209 dmu_tx_get_txg(dmu_tx_t
*tx
)
1211 ASSERT(tx
->tx_txg
!= 0);
1212 return (tx
->tx_txg
);
1216 dmu_tx_pool(dmu_tx_t
*tx
)
1218 ASSERT(tx
->tx_pool
!= NULL
);
1219 return (tx
->tx_pool
);
1223 dmu_tx_callback_register(dmu_tx_t
*tx
, dmu_tx_callback_func_t
*func
, void *data
)
1225 dmu_tx_callback_t
*dcb
;
1227 dcb
= kmem_alloc(sizeof (dmu_tx_callback_t
), KM_SLEEP
);
1229 dcb
->dcb_func
= func
;
1230 dcb
->dcb_data
= data
;
1232 list_insert_tail(&tx
->tx_callbacks
, dcb
);
1236 * Call all the commit callbacks on a list, with a given error code.
1239 dmu_tx_do_callbacks(list_t
*cb_list
, int error
)
1241 dmu_tx_callback_t
*dcb
;
1243 while ((dcb
= list_tail(cb_list
)) != NULL
) {
1244 list_remove(cb_list
, dcb
);
1245 dcb
->dcb_func(dcb
->dcb_data
, error
);
1246 kmem_free(dcb
, sizeof (dmu_tx_callback_t
));
1251 * Interface to hold a bunch of attributes.
1252 * used for creating new files.
1253 * attrsize is the total size of all attributes
1254 * to be added during object creation
1256 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1260 * hold necessary attribute name for attribute registration.
1261 * should be a very rare case where this is needed. If it does
1262 * happen it would only happen on the first write to the file system.
1265 dmu_tx_sa_registration_hold(sa_os_t
*sa
, dmu_tx_t
*tx
)
1267 if (!sa
->sa_need_attr_registration
)
1270 for (int i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1271 if (!sa
->sa_attr_table
[i
].sa_registered
) {
1272 if (sa
->sa_reg_attr_obj
)
1273 dmu_tx_hold_zap(tx
, sa
->sa_reg_attr_obj
,
1274 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1276 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
,
1277 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1283 dmu_tx_hold_spill(dmu_tx_t
*tx
, uint64_t object
)
1287 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, object
,
1290 (void) zfs_refcount_add_many(&txh
->txh_space_towrite
,
1291 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1295 dmu_tx_hold_sa_create(dmu_tx_t
*tx
, int attrsize
)
1297 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1299 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1301 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1304 if (tx
->tx_objset
->os_sa
->sa_layout_attr_obj
) {
1305 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1307 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1308 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1309 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1310 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1313 dmu_tx_sa_registration_hold(sa
, tx
);
1315 if (attrsize
<= DN_OLD_MAX_BONUSLEN
&& !sa
->sa_force_spill
)
1318 (void) dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, DMU_NEW_OBJECT
,
1325 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1327 * variable_size is the total size of all variable sized attributes
1328 * passed to this function. It is not the total size of all
1329 * variable size attributes that *may* exist on this object.
1332 dmu_tx_hold_sa(dmu_tx_t
*tx
, sa_handle_t
*hdl
, boolean_t may_grow
)
1335 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1337 ASSERT(hdl
!= NULL
);
1339 object
= sa_handle_object(hdl
);
1341 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1343 dmu_tx_hold_bonus_by_dnode(tx
, DB_DNODE(db
));
1346 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1349 if (tx
->tx_objset
->os_sa
->sa_reg_attr_obj
== 0 ||
1350 tx
->tx_objset
->os_sa
->sa_layout_attr_obj
== 0) {
1351 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1352 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1353 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1354 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1357 dmu_tx_sa_registration_hold(sa
, tx
);
1359 if (may_grow
&& tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1360 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1362 if (sa
->sa_force_spill
|| may_grow
|| hdl
->sa_spill
) {
1363 ASSERT(tx
->tx_txg
== 0);
1364 dmu_tx_hold_spill(tx
, object
);
1370 if (dn
->dn_have_spill
) {
1371 ASSERT(tx
->tx_txg
== 0);
1372 dmu_tx_hold_spill(tx
, object
);
1381 dmu_tx_ksp
= kstat_create("zfs", 0, "dmu_tx", "misc",
1382 KSTAT_TYPE_NAMED
, sizeof (dmu_tx_stats
) / sizeof (kstat_named_t
),
1383 KSTAT_FLAG_VIRTUAL
);
1385 if (dmu_tx_ksp
!= NULL
) {
1386 dmu_tx_ksp
->ks_data
= &dmu_tx_stats
;
1387 kstat_install(dmu_tx_ksp
);
1394 if (dmu_tx_ksp
!= NULL
) {
1395 kstat_delete(dmu_tx_ksp
);
1400 #if defined(_KERNEL)
1401 EXPORT_SYMBOL(dmu_tx_create
);
1402 EXPORT_SYMBOL(dmu_tx_hold_write
);
1403 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode
);
1404 EXPORT_SYMBOL(dmu_tx_hold_free
);
1405 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode
);
1406 EXPORT_SYMBOL(dmu_tx_hold_zap
);
1407 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode
);
1408 EXPORT_SYMBOL(dmu_tx_hold_bonus
);
1409 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode
);
1410 EXPORT_SYMBOL(dmu_tx_abort
);
1411 EXPORT_SYMBOL(dmu_tx_assign
);
1412 EXPORT_SYMBOL(dmu_tx_wait
);
1413 EXPORT_SYMBOL(dmu_tx_commit
);
1414 EXPORT_SYMBOL(dmu_tx_mark_netfree
);
1415 EXPORT_SYMBOL(dmu_tx_get_txg
);
1416 EXPORT_SYMBOL(dmu_tx_callback_register
);
1417 EXPORT_SYMBOL(dmu_tx_do_callbacks
);
1418 EXPORT_SYMBOL(dmu_tx_hold_spill
);
1419 EXPORT_SYMBOL(dmu_tx_hold_sa_create
);
1420 EXPORT_SYMBOL(dmu_tx_hold_sa
);