FreeBSD: Fix a pair of bugs in zfs_fhtovp()
[zfs.git] / module / zfs / dmu_tx.c
blob28f64369d8dd0f48a011c5e29de220158d01684d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h>
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/trace_zfs.h>
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
45 dmu_tx_stats_t dmu_tx_stats = {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
47 { "dmu_tx_delay", KSTAT_DATA_UINT64 },
48 { "dmu_tx_error", KSTAT_DATA_UINT64 },
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
50 { "dmu_tx_group", KSTAT_DATA_UINT64 },
51 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
52 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
53 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
54 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 },
55 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 },
56 { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 },
57 { "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64 },
58 { "dmu_tx_quota", KSTAT_DATA_UINT64 },
61 static kstat_t *dmu_tx_ksp;
63 dmu_tx_t *
64 dmu_tx_create_dd(dsl_dir_t *dd)
66 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
67 tx->tx_dir = dd;
68 if (dd != NULL)
69 tx->tx_pool = dd->dd_pool;
70 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
71 offsetof(dmu_tx_hold_t, txh_node));
72 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
73 offsetof(dmu_tx_callback_t, dcb_node));
74 tx->tx_start = gethrtime();
75 return (tx);
78 dmu_tx_t *
79 dmu_tx_create(objset_t *os)
81 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
82 tx->tx_objset = os;
83 return (tx);
86 dmu_tx_t *
87 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
89 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
91 TXG_VERIFY(dp->dp_spa, txg);
92 tx->tx_pool = dp;
93 tx->tx_txg = txg;
94 tx->tx_anyobj = TRUE;
96 return (tx);
99 int
100 dmu_tx_is_syncing(dmu_tx_t *tx)
102 return (tx->tx_anyobj);
106 dmu_tx_private_ok(dmu_tx_t *tx)
108 return (tx->tx_anyobj);
111 static dmu_tx_hold_t *
112 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
113 uint64_t arg1, uint64_t arg2)
115 dmu_tx_hold_t *txh;
117 if (dn != NULL) {
118 (void) zfs_refcount_add(&dn->dn_holds, tx);
119 if (tx->tx_txg != 0) {
120 mutex_enter(&dn->dn_mtx);
122 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
123 * problem, but there's no way for it to happen (for
124 * now, at least).
126 ASSERT(dn->dn_assigned_txg == 0);
127 dn->dn_assigned_txg = tx->tx_txg;
128 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
129 mutex_exit(&dn->dn_mtx);
133 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
134 txh->txh_tx = tx;
135 txh->txh_dnode = dn;
136 zfs_refcount_create(&txh->txh_space_towrite);
137 zfs_refcount_create(&txh->txh_memory_tohold);
138 txh->txh_type = type;
139 txh->txh_arg1 = arg1;
140 txh->txh_arg2 = arg2;
141 list_insert_tail(&tx->tx_holds, txh);
143 return (txh);
146 static dmu_tx_hold_t *
147 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
148 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
150 dnode_t *dn = NULL;
151 dmu_tx_hold_t *txh;
152 int err;
154 if (object != DMU_NEW_OBJECT) {
155 err = dnode_hold(os, object, FTAG, &dn);
156 if (err != 0) {
157 tx->tx_err = err;
158 return (NULL);
161 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
162 if (dn != NULL)
163 dnode_rele(dn, FTAG);
164 return (txh);
167 void
168 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
171 * If we're syncing, they can manipulate any object anyhow, and
172 * the hold on the dnode_t can cause problems.
174 if (!dmu_tx_is_syncing(tx))
175 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
179 * This function reads specified data from disk. The specified data will
180 * be needed to perform the transaction -- i.e, it will be read after
181 * we do dmu_tx_assign(). There are two reasons that we read the data now
182 * (before dmu_tx_assign()):
184 * 1. Reading it now has potentially better performance. The transaction
185 * has not yet been assigned, so the TXG is not held open, and also the
186 * caller typically has less locks held when calling dmu_tx_hold_*() than
187 * after the transaction has been assigned. This reduces the lock (and txg)
188 * hold times, thus reducing lock contention.
190 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
191 * that are detected before they start making changes to the DMU state
192 * (i.e. now). Once the transaction has been assigned, and some DMU
193 * state has been changed, it can be difficult to recover from an i/o
194 * error (e.g. to undo the changes already made in memory at the DMU
195 * layer). Typically code to do so does not exist in the caller -- it
196 * assumes that the data has already been cached and thus i/o errors are
197 * not possible.
199 * It has been observed that the i/o initiated here can be a performance
200 * problem, and it appears to be optional, because we don't look at the
201 * data which is read. However, removing this read would only serve to
202 * move the work elsewhere (after the dmu_tx_assign()), where it may
203 * have a greater impact on performance (in addition to the impact on
204 * fault tolerance noted above).
206 static int
207 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
209 int err;
210 dmu_buf_impl_t *db;
212 rw_enter(&dn->dn_struct_rwlock, RW_READER);
213 db = dbuf_hold_level(dn, level, blkid, FTAG);
214 rw_exit(&dn->dn_struct_rwlock);
215 if (db == NULL)
216 return (SET_ERROR(EIO));
217 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
218 dbuf_rele(db, FTAG);
219 return (err);
222 static void
223 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
225 dnode_t *dn = txh->txh_dnode;
226 int err = 0;
228 if (len == 0)
229 return;
231 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
233 if (dn == NULL)
234 return;
237 * For i/o error checking, read the blocks that will be needed
238 * to perform the write: the first and last level-0 blocks (if
239 * they are not aligned, i.e. if they are partial-block writes),
240 * and all the level-1 blocks.
242 if (dn->dn_maxblkid == 0) {
243 if (off < dn->dn_datablksz &&
244 (off > 0 || len < dn->dn_datablksz)) {
245 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
246 if (err != 0) {
247 txh->txh_tx->tx_err = err;
250 } else {
251 zio_t *zio = zio_root(dn->dn_objset->os_spa,
252 NULL, NULL, ZIO_FLAG_CANFAIL);
254 /* first level-0 block */
255 uint64_t start = off >> dn->dn_datablkshift;
256 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
257 err = dmu_tx_check_ioerr(zio, dn, 0, start);
258 if (err != 0) {
259 txh->txh_tx->tx_err = err;
263 /* last level-0 block */
264 uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
265 if (end != start && end <= dn->dn_maxblkid &&
266 P2PHASE(off + len, dn->dn_datablksz)) {
267 err = dmu_tx_check_ioerr(zio, dn, 0, end);
268 if (err != 0) {
269 txh->txh_tx->tx_err = err;
273 /* level-1 blocks */
274 if (dn->dn_nlevels > 1) {
275 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
276 for (uint64_t i = (start >> shft) + 1;
277 i < end >> shft; i++) {
278 err = dmu_tx_check_ioerr(zio, dn, 1, i);
279 if (err != 0) {
280 txh->txh_tx->tx_err = err;
285 err = zio_wait(zio);
286 if (err != 0) {
287 txh->txh_tx->tx_err = err;
292 static void
293 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
295 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
296 DNODE_MIN_SIZE, FTAG);
299 void
300 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
302 dmu_tx_hold_t *txh;
304 ASSERT0(tx->tx_txg);
305 ASSERT3U(len, <=, DMU_MAX_ACCESS);
306 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
308 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
309 object, THT_WRITE, off, len);
310 if (txh != NULL) {
311 dmu_tx_count_write(txh, off, len);
312 dmu_tx_count_dnode(txh);
316 void
317 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
319 dmu_tx_hold_t *txh;
321 ASSERT0(tx->tx_txg);
322 ASSERT3U(len, <=, DMU_MAX_ACCESS);
323 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
325 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
326 if (txh != NULL) {
327 dmu_tx_count_write(txh, off, len);
328 dmu_tx_count_dnode(txh);
333 * This function marks the transaction as being a "net free". The end
334 * result is that refquotas will be disabled for this transaction, and
335 * this transaction will be able to use half of the pool space overhead
336 * (see dsl_pool_adjustedsize()). Therefore this function should only
337 * be called for transactions that we expect will not cause a net increase
338 * in the amount of space used (but it's OK if that is occasionally not true).
340 void
341 dmu_tx_mark_netfree(dmu_tx_t *tx)
343 tx->tx_netfree = B_TRUE;
346 static void
347 dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
349 dmu_tx_t *tx = txh->txh_tx;
350 dnode_t *dn = txh->txh_dnode;
351 int err;
353 ASSERT(tx->tx_txg == 0);
355 dmu_tx_count_dnode(txh);
357 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
358 return;
359 if (len == DMU_OBJECT_END)
360 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
362 dmu_tx_count_dnode(txh);
365 * For i/o error checking, we read the first and last level-0
366 * blocks if they are not aligned, and all the level-1 blocks.
368 * Note: dbuf_free_range() assumes that we have not instantiated
369 * any level-0 dbufs that will be completely freed. Therefore we must
370 * exercise care to not read or count the first and last blocks
371 * if they are blocksize-aligned.
373 if (dn->dn_datablkshift == 0) {
374 if (off != 0 || len < dn->dn_datablksz)
375 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
376 } else {
377 /* first block will be modified if it is not aligned */
378 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
379 dmu_tx_count_write(txh, off, 1);
380 /* last block will be modified if it is not aligned */
381 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
382 dmu_tx_count_write(txh, off + len, 1);
386 * Check level-1 blocks.
388 if (dn->dn_nlevels > 1) {
389 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
390 SPA_BLKPTRSHIFT;
391 uint64_t start = off >> shift;
392 uint64_t end = (off + len) >> shift;
394 ASSERT(dn->dn_indblkshift != 0);
397 * dnode_reallocate() can result in an object with indirect
398 * blocks having an odd data block size. In this case,
399 * just check the single block.
401 if (dn->dn_datablkshift == 0)
402 start = end = 0;
404 zio_t *zio = zio_root(tx->tx_pool->dp_spa,
405 NULL, NULL, ZIO_FLAG_CANFAIL);
406 for (uint64_t i = start; i <= end; i++) {
407 uint64_t ibyte = i << shift;
408 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
409 i = ibyte >> shift;
410 if (err == ESRCH || i > end)
411 break;
412 if (err != 0) {
413 tx->tx_err = err;
414 (void) zio_wait(zio);
415 return;
418 (void) zfs_refcount_add_many(&txh->txh_memory_tohold,
419 1 << dn->dn_indblkshift, FTAG);
421 err = dmu_tx_check_ioerr(zio, dn, 1, i);
422 if (err != 0) {
423 tx->tx_err = err;
424 (void) zio_wait(zio);
425 return;
428 err = zio_wait(zio);
429 if (err != 0) {
430 tx->tx_err = err;
431 return;
436 void
437 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
439 dmu_tx_hold_t *txh;
441 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
442 object, THT_FREE, off, len);
443 if (txh != NULL)
444 (void) dmu_tx_hold_free_impl(txh, off, len);
447 void
448 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
450 dmu_tx_hold_t *txh;
452 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
453 if (txh != NULL)
454 (void) dmu_tx_hold_free_impl(txh, off, len);
457 static void
458 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
460 dmu_tx_t *tx = txh->txh_tx;
461 dnode_t *dn = txh->txh_dnode;
462 int err;
464 ASSERT(tx->tx_txg == 0);
466 dmu_tx_count_dnode(txh);
469 * Modifying a almost-full microzap is around the worst case (128KB)
471 * If it is a fat zap, the worst case would be 7*16KB=112KB:
472 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
473 * - 4 new blocks written if adding:
474 * - 2 blocks for possibly split leaves,
475 * - 2 grown ptrtbl blocks
477 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
478 MZAP_MAX_BLKSZ, FTAG);
480 if (dn == NULL)
481 return;
483 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
485 if (dn->dn_maxblkid == 0 || name == NULL) {
487 * This is a microzap (only one block), or we don't know
488 * the name. Check the first block for i/o errors.
490 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
491 if (err != 0) {
492 tx->tx_err = err;
494 } else {
496 * Access the name so that we'll check for i/o errors to
497 * the leaf blocks, etc. We ignore ENOENT, as this name
498 * may not yet exist.
500 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
501 if (err == EIO || err == ECKSUM || err == ENXIO) {
502 tx->tx_err = err;
507 void
508 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
510 dmu_tx_hold_t *txh;
512 ASSERT0(tx->tx_txg);
514 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
515 object, THT_ZAP, add, (uintptr_t)name);
516 if (txh != NULL)
517 dmu_tx_hold_zap_impl(txh, name);
520 void
521 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
523 dmu_tx_hold_t *txh;
525 ASSERT0(tx->tx_txg);
526 ASSERT(dn != NULL);
528 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
529 if (txh != NULL)
530 dmu_tx_hold_zap_impl(txh, name);
533 void
534 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
536 dmu_tx_hold_t *txh;
538 ASSERT(tx->tx_txg == 0);
540 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
541 object, THT_BONUS, 0, 0);
542 if (txh)
543 dmu_tx_count_dnode(txh);
546 void
547 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
549 dmu_tx_hold_t *txh;
551 ASSERT0(tx->tx_txg);
553 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
554 if (txh)
555 dmu_tx_count_dnode(txh);
558 void
559 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
561 dmu_tx_hold_t *txh;
563 ASSERT(tx->tx_txg == 0);
565 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
566 DMU_NEW_OBJECT, THT_SPACE, space, 0);
567 if (txh) {
568 (void) zfs_refcount_add_many(
569 &txh->txh_space_towrite, space, FTAG);
573 #ifdef ZFS_DEBUG
574 void
575 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
577 boolean_t match_object = B_FALSE;
578 boolean_t match_offset = B_FALSE;
580 DB_DNODE_ENTER(db);
581 dnode_t *dn = DB_DNODE(db);
582 ASSERT(tx->tx_txg != 0);
583 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
584 ASSERT3U(dn->dn_object, ==, db->db.db_object);
586 if (tx->tx_anyobj) {
587 DB_DNODE_EXIT(db);
588 return;
591 /* XXX No checking on the meta dnode for now */
592 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
593 DB_DNODE_EXIT(db);
594 return;
597 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
598 txh = list_next(&tx->tx_holds, txh)) {
599 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
600 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
601 match_object = TRUE;
602 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
603 int datablkshift = dn->dn_datablkshift ?
604 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
605 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
606 int shift = datablkshift + epbs * db->db_level;
607 uint64_t beginblk = shift >= 64 ? 0 :
608 (txh->txh_arg1 >> shift);
609 uint64_t endblk = shift >= 64 ? 0 :
610 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
611 uint64_t blkid = db->db_blkid;
613 /* XXX txh_arg2 better not be zero... */
615 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
616 txh->txh_type, (u_longlong_t)beginblk,
617 (u_longlong_t)endblk);
619 switch (txh->txh_type) {
620 case THT_WRITE:
621 if (blkid >= beginblk && blkid <= endblk)
622 match_offset = TRUE;
624 * We will let this hold work for the bonus
625 * or spill buffer so that we don't need to
626 * hold it when creating a new object.
628 if (blkid == DMU_BONUS_BLKID ||
629 blkid == DMU_SPILL_BLKID)
630 match_offset = TRUE;
632 * They might have to increase nlevels,
633 * thus dirtying the new TLIBs. Or the
634 * might have to change the block size,
635 * thus dirying the new lvl=0 blk=0.
637 if (blkid == 0)
638 match_offset = TRUE;
639 break;
640 case THT_FREE:
642 * We will dirty all the level 1 blocks in
643 * the free range and perhaps the first and
644 * last level 0 block.
646 if (blkid >= beginblk && (blkid <= endblk ||
647 txh->txh_arg2 == DMU_OBJECT_END))
648 match_offset = TRUE;
649 break;
650 case THT_SPILL:
651 if (blkid == DMU_SPILL_BLKID)
652 match_offset = TRUE;
653 break;
654 case THT_BONUS:
655 if (blkid == DMU_BONUS_BLKID)
656 match_offset = TRUE;
657 break;
658 case THT_ZAP:
659 match_offset = TRUE;
660 break;
661 case THT_NEWOBJECT:
662 match_object = TRUE;
663 break;
664 default:
665 cmn_err(CE_PANIC, "bad txh_type %d",
666 txh->txh_type);
669 if (match_object && match_offset) {
670 DB_DNODE_EXIT(db);
671 return;
674 DB_DNODE_EXIT(db);
675 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
676 (u_longlong_t)db->db.db_object, db->db_level,
677 (u_longlong_t)db->db_blkid);
679 #endif
682 * If we can't do 10 iops, something is wrong. Let us go ahead
683 * and hit zfs_dirty_data_max.
685 static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
688 * We delay transactions when we've determined that the backend storage
689 * isn't able to accommodate the rate of incoming writes.
691 * If there is already a transaction waiting, we delay relative to when
692 * that transaction finishes waiting. This way the calculated min_time
693 * is independent of the number of threads concurrently executing
694 * transactions.
696 * If we are the only waiter, wait relative to when the transaction
697 * started, rather than the current time. This credits the transaction for
698 * "time already served", e.g. reading indirect blocks.
700 * The minimum time for a transaction to take is calculated as:
701 * min_time = scale * (dirty - min) / (max - dirty)
702 * min_time is then capped at zfs_delay_max_ns.
704 * The delay has two degrees of freedom that can be adjusted via tunables.
705 * The percentage of dirty data at which we start to delay is defined by
706 * zfs_delay_min_dirty_percent. This should typically be at or above
707 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
708 * delay after writing at full speed has failed to keep up with the incoming
709 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
710 * speaking, this variable determines the amount of delay at the midpoint of
711 * the curve.
713 * delay
714 * 10ms +-------------------------------------------------------------*+
715 * | *|
716 * 9ms + *+
717 * | *|
718 * 8ms + *+
719 * | * |
720 * 7ms + * +
721 * | * |
722 * 6ms + * +
723 * | * |
724 * 5ms + * +
725 * | * |
726 * 4ms + * +
727 * | * |
728 * 3ms + * +
729 * | * |
730 * 2ms + (midpoint) * +
731 * | | ** |
732 * 1ms + v *** +
733 * | zfs_delay_scale ----------> ******** |
734 * 0 +-------------------------------------*********----------------+
735 * 0% <- zfs_dirty_data_max -> 100%
737 * Note that since the delay is added to the outstanding time remaining on the
738 * most recent transaction, the delay is effectively the inverse of IOPS.
739 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
740 * was chosen such that small changes in the amount of accumulated dirty data
741 * in the first 3/4 of the curve yield relatively small differences in the
742 * amount of delay.
744 * The effects can be easier to understand when the amount of delay is
745 * represented on a log scale:
747 * delay
748 * 100ms +-------------------------------------------------------------++
749 * + +
750 * | |
751 * + *+
752 * 10ms + *+
753 * + ** +
754 * | (midpoint) ** |
755 * + | ** +
756 * 1ms + v **** +
757 * + zfs_delay_scale ----------> ***** +
758 * | **** |
759 * + **** +
760 * 100us + ** +
761 * + * +
762 * | * |
763 * + * +
764 * 10us + * +
765 * + +
766 * | |
767 * + +
768 * +--------------------------------------------------------------+
769 * 0% <- zfs_dirty_data_max -> 100%
771 * Note here that only as the amount of dirty data approaches its limit does
772 * the delay start to increase rapidly. The goal of a properly tuned system
773 * should be to keep the amount of dirty data out of that range by first
774 * ensuring that the appropriate limits are set for the I/O scheduler to reach
775 * optimal throughput on the backend storage, and then by changing the value
776 * of zfs_delay_scale to increase the steepness of the curve.
778 static void
779 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
781 dsl_pool_t *dp = tx->tx_pool;
782 uint64_t delay_min_bytes, wrlog;
783 hrtime_t wakeup, tx_time = 0, now;
785 /* Calculate minimum transaction time for the dirty data amount. */
786 delay_min_bytes =
787 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
788 if (dirty > delay_min_bytes) {
790 * The caller has already waited until we are under the max.
791 * We make them pass us the amount of dirty data so we don't
792 * have to handle the case of it being >= the max, which
793 * could cause a divide-by-zero if it's == the max.
795 ASSERT3U(dirty, <, zfs_dirty_data_max);
797 tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
798 (zfs_dirty_data_max - dirty);
801 /* Calculate minimum transaction time for the TX_WRITE log size. */
802 wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
803 delay_min_bytes =
804 zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
805 if (wrlog >= zfs_wrlog_data_max) {
806 tx_time = zfs_delay_max_ns;
807 } else if (wrlog > delay_min_bytes) {
808 tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
809 (zfs_wrlog_data_max - wrlog), tx_time);
812 if (tx_time == 0)
813 return;
815 tx_time = MIN(tx_time, zfs_delay_max_ns);
816 now = gethrtime();
817 if (now > tx->tx_start + tx_time)
818 return;
820 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
821 uint64_t, tx_time);
823 mutex_enter(&dp->dp_lock);
824 wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
825 dp->dp_last_wakeup = wakeup;
826 mutex_exit(&dp->dp_lock);
828 zfs_sleep_until(wakeup);
832 * This routine attempts to assign the transaction to a transaction group.
833 * To do so, we must determine if there is sufficient free space on disk.
835 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
836 * on it), then it is assumed that there is sufficient free space,
837 * unless there's insufficient slop space in the pool (see the comment
838 * above spa_slop_shift in spa_misc.c).
840 * If it is not a "netfree" transaction, then if the data already on disk
841 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
842 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
843 * plus the rough estimate of this transaction's changes, may exceed the
844 * allowed usage, then this will fail with ERESTART, which will cause the
845 * caller to wait for the pending changes to be written to disk (by waiting
846 * for the next TXG to open), and then check the space usage again.
848 * The rough estimate of pending changes is comprised of the sum of:
850 * - this transaction's holds' txh_space_towrite
852 * - dd_tempreserved[], which is the sum of in-flight transactions'
853 * holds' txh_space_towrite (i.e. those transactions that have called
854 * dmu_tx_assign() but not yet called dmu_tx_commit()).
856 * - dd_space_towrite[], which is the amount of dirtied dbufs.
858 * Note that all of these values are inflated by spa_get_worst_case_asize(),
859 * which means that we may get ERESTART well before we are actually in danger
860 * of running out of space, but this also mitigates any small inaccuracies
861 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
862 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
863 * to the MOS).
865 * Note that due to this algorithm, it is possible to exceed the allowed
866 * usage by one transaction. Also, as we approach the allowed usage,
867 * we will allow a very limited amount of changes into each TXG, thus
868 * decreasing performance.
870 static int
871 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
873 spa_t *spa = tx->tx_pool->dp_spa;
875 ASSERT0(tx->tx_txg);
877 if (tx->tx_err) {
878 DMU_TX_STAT_BUMP(dmu_tx_error);
879 return (tx->tx_err);
882 if (spa_suspended(spa)) {
883 DMU_TX_STAT_BUMP(dmu_tx_suspended);
886 * If the user has indicated a blocking failure mode
887 * then return ERESTART which will block in dmu_tx_wait().
888 * Otherwise, return EIO so that an error can get
889 * propagated back to the VOP calls.
891 * Note that we always honor the txg_how flag regardless
892 * of the failuremode setting.
894 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
895 !(txg_how & TXG_WAIT))
896 return (SET_ERROR(EIO));
898 return (SET_ERROR(ERESTART));
901 if (!tx->tx_dirty_delayed &&
902 dsl_pool_need_wrlog_delay(tx->tx_pool)) {
903 tx->tx_wait_dirty = B_TRUE;
904 DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
905 return (SET_ERROR(ERESTART));
908 if (!tx->tx_dirty_delayed &&
909 dsl_pool_need_dirty_delay(tx->tx_pool)) {
910 tx->tx_wait_dirty = B_TRUE;
911 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
912 return (SET_ERROR(ERESTART));
915 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
916 tx->tx_needassign_txh = NULL;
919 * NB: No error returns are allowed after txg_hold_open, but
920 * before processing the dnode holds, due to the
921 * dmu_tx_unassign() logic.
924 uint64_t towrite = 0;
925 uint64_t tohold = 0;
926 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
927 txh = list_next(&tx->tx_holds, txh)) {
928 dnode_t *dn = txh->txh_dnode;
929 if (dn != NULL) {
931 * This thread can't hold the dn_struct_rwlock
932 * while assigning the tx, because this can lead to
933 * deadlock. Specifically, if this dnode is already
934 * assigned to an earlier txg, this thread may need
935 * to wait for that txg to sync (the ERESTART case
936 * below). The other thread that has assigned this
937 * dnode to an earlier txg prevents this txg from
938 * syncing until its tx can complete (calling
939 * dmu_tx_commit()), but it may need to acquire the
940 * dn_struct_rwlock to do so (e.g. via
941 * dmu_buf_hold*()).
943 * Note that this thread can't hold the lock for
944 * read either, but the rwlock doesn't record
945 * enough information to make that assertion.
947 ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
949 mutex_enter(&dn->dn_mtx);
950 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
951 mutex_exit(&dn->dn_mtx);
952 tx->tx_needassign_txh = txh;
953 DMU_TX_STAT_BUMP(dmu_tx_group);
954 return (SET_ERROR(ERESTART));
956 if (dn->dn_assigned_txg == 0)
957 dn->dn_assigned_txg = tx->tx_txg;
958 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
959 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
960 mutex_exit(&dn->dn_mtx);
962 towrite += zfs_refcount_count(&txh->txh_space_towrite);
963 tohold += zfs_refcount_count(&txh->txh_memory_tohold);
966 /* needed allocation: worst-case estimate of write space */
967 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
968 /* calculate memory footprint estimate */
969 uint64_t memory = towrite + tohold;
971 if (tx->tx_dir != NULL && asize != 0) {
972 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
973 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
974 if (err != 0)
975 return (err);
978 DMU_TX_STAT_BUMP(dmu_tx_assigned);
980 return (0);
983 static void
984 dmu_tx_unassign(dmu_tx_t *tx)
986 if (tx->tx_txg == 0)
987 return;
989 txg_rele_to_quiesce(&tx->tx_txgh);
992 * Walk the transaction's hold list, removing the hold on the
993 * associated dnode, and notifying waiters if the refcount drops to 0.
995 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
996 txh && txh != tx->tx_needassign_txh;
997 txh = list_next(&tx->tx_holds, txh)) {
998 dnode_t *dn = txh->txh_dnode;
1000 if (dn == NULL)
1001 continue;
1002 mutex_enter(&dn->dn_mtx);
1003 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1005 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1006 dn->dn_assigned_txg = 0;
1007 cv_broadcast(&dn->dn_notxholds);
1009 mutex_exit(&dn->dn_mtx);
1012 txg_rele_to_sync(&tx->tx_txgh);
1014 tx->tx_lasttried_txg = tx->tx_txg;
1015 tx->tx_txg = 0;
1019 * Assign tx to a transaction group; txg_how is a bitmask:
1021 * If TXG_WAIT is set and the currently open txg is full, this function
1022 * will wait until there's a new txg. This should be used when no locks
1023 * are being held. With this bit set, this function will only fail if
1024 * we're truly out of space (or over quota).
1026 * If TXG_WAIT is *not* set and we can't assign into the currently open
1027 * txg without blocking, this function will return immediately with
1028 * ERESTART. This should be used whenever locks are being held. On an
1029 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1030 * and try again.
1032 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1033 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1034 * details on the throttle). This is used by the VFS operations, after
1035 * they have already called dmu_tx_wait() (though most likely on a
1036 * different tx).
1038 * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1039 * will assign the tx to monotonically increasing txgs. Of course this is
1040 * not strong monotonicity, because the same txg can be returned multiple
1041 * times in a row. This guarantee holds both for subsequent calls from
1042 * one thread and for multiple threads. For example, it is impossible to
1043 * observe the following sequence of events:
1045 * Thread 1 Thread 2
1047 * dmu_tx_assign(T1, ...)
1048 * 1 <- dmu_tx_get_txg(T1)
1049 * dmu_tx_assign(T2, ...)
1050 * 2 <- dmu_tx_get_txg(T2)
1051 * dmu_tx_assign(T3, ...)
1052 * 1 <- dmu_tx_get_txg(T3)
1055 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1057 int err;
1059 ASSERT(tx->tx_txg == 0);
1060 ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1061 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1063 /* If we might wait, we must not hold the config lock. */
1064 IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1066 if ((txg_how & TXG_NOTHROTTLE))
1067 tx->tx_dirty_delayed = B_TRUE;
1069 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1070 dmu_tx_unassign(tx);
1072 if (err != ERESTART || !(txg_how & TXG_WAIT))
1073 return (err);
1075 dmu_tx_wait(tx);
1078 txg_rele_to_quiesce(&tx->tx_txgh);
1080 return (0);
1083 void
1084 dmu_tx_wait(dmu_tx_t *tx)
1086 spa_t *spa = tx->tx_pool->dp_spa;
1087 dsl_pool_t *dp = tx->tx_pool;
1088 hrtime_t before;
1090 ASSERT(tx->tx_txg == 0);
1091 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1093 before = gethrtime();
1095 if (tx->tx_wait_dirty) {
1096 uint64_t dirty;
1099 * dmu_tx_try_assign() has determined that we need to wait
1100 * because we've consumed much or all of the dirty buffer
1101 * space.
1103 mutex_enter(&dp->dp_lock);
1104 if (dp->dp_dirty_total >= zfs_dirty_data_max)
1105 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1106 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1107 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1108 dirty = dp->dp_dirty_total;
1109 mutex_exit(&dp->dp_lock);
1111 dmu_tx_delay(tx, dirty);
1113 tx->tx_wait_dirty = B_FALSE;
1116 * Note: setting tx_dirty_delayed only has effect if the
1117 * caller used TX_WAIT. Otherwise they are going to
1118 * destroy this tx and try again. The common case,
1119 * zfs_write(), uses TX_WAIT.
1121 tx->tx_dirty_delayed = B_TRUE;
1122 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1124 * If the pool is suspended we need to wait until it
1125 * is resumed. Note that it's possible that the pool
1126 * has become active after this thread has tried to
1127 * obtain a tx. If that's the case then tx_lasttried_txg
1128 * would not have been set.
1130 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1131 } else if (tx->tx_needassign_txh) {
1132 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1134 mutex_enter(&dn->dn_mtx);
1135 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1136 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1137 mutex_exit(&dn->dn_mtx);
1138 tx->tx_needassign_txh = NULL;
1139 } else {
1141 * If we have a lot of dirty data just wait until we sync
1142 * out a TXG at which point we'll hopefully have synced
1143 * a portion of the changes.
1145 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1148 spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1151 static void
1152 dmu_tx_destroy(dmu_tx_t *tx)
1154 dmu_tx_hold_t *txh;
1156 while ((txh = list_head(&tx->tx_holds)) != NULL) {
1157 dnode_t *dn = txh->txh_dnode;
1159 list_remove(&tx->tx_holds, txh);
1160 zfs_refcount_destroy_many(&txh->txh_space_towrite,
1161 zfs_refcount_count(&txh->txh_space_towrite));
1162 zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1163 zfs_refcount_count(&txh->txh_memory_tohold));
1164 kmem_free(txh, sizeof (dmu_tx_hold_t));
1165 if (dn != NULL)
1166 dnode_rele(dn, tx);
1169 list_destroy(&tx->tx_callbacks);
1170 list_destroy(&tx->tx_holds);
1171 kmem_free(tx, sizeof (dmu_tx_t));
1174 void
1175 dmu_tx_commit(dmu_tx_t *tx)
1177 ASSERT(tx->tx_txg != 0);
1180 * Go through the transaction's hold list and remove holds on
1181 * associated dnodes, notifying waiters if no holds remain.
1183 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1184 txh = list_next(&tx->tx_holds, txh)) {
1185 dnode_t *dn = txh->txh_dnode;
1187 if (dn == NULL)
1188 continue;
1190 mutex_enter(&dn->dn_mtx);
1191 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1193 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1194 dn->dn_assigned_txg = 0;
1195 cv_broadcast(&dn->dn_notxholds);
1197 mutex_exit(&dn->dn_mtx);
1200 if (tx->tx_tempreserve_cookie)
1201 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1203 if (!list_is_empty(&tx->tx_callbacks))
1204 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1206 if (tx->tx_anyobj == FALSE)
1207 txg_rele_to_sync(&tx->tx_txgh);
1209 dmu_tx_destroy(tx);
1212 void
1213 dmu_tx_abort(dmu_tx_t *tx)
1215 ASSERT(tx->tx_txg == 0);
1218 * Call any registered callbacks with an error code.
1220 if (!list_is_empty(&tx->tx_callbacks))
1221 dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1223 dmu_tx_destroy(tx);
1226 uint64_t
1227 dmu_tx_get_txg(dmu_tx_t *tx)
1229 ASSERT(tx->tx_txg != 0);
1230 return (tx->tx_txg);
1233 dsl_pool_t *
1234 dmu_tx_pool(dmu_tx_t *tx)
1236 ASSERT(tx->tx_pool != NULL);
1237 return (tx->tx_pool);
1240 void
1241 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1243 dmu_tx_callback_t *dcb;
1245 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1247 dcb->dcb_func = func;
1248 dcb->dcb_data = data;
1250 list_insert_tail(&tx->tx_callbacks, dcb);
1254 * Call all the commit callbacks on a list, with a given error code.
1256 void
1257 dmu_tx_do_callbacks(list_t *cb_list, int error)
1259 dmu_tx_callback_t *dcb;
1261 while ((dcb = list_tail(cb_list)) != NULL) {
1262 list_remove(cb_list, dcb);
1263 dcb->dcb_func(dcb->dcb_data, error);
1264 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1269 * Interface to hold a bunch of attributes.
1270 * used for creating new files.
1271 * attrsize is the total size of all attributes
1272 * to be added during object creation
1274 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1278 * hold necessary attribute name for attribute registration.
1279 * should be a very rare case where this is needed. If it does
1280 * happen it would only happen on the first write to the file system.
1282 static void
1283 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1285 if (!sa->sa_need_attr_registration)
1286 return;
1288 for (int i = 0; i != sa->sa_num_attrs; i++) {
1289 if (!sa->sa_attr_table[i].sa_registered) {
1290 if (sa->sa_reg_attr_obj)
1291 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1292 B_TRUE, sa->sa_attr_table[i].sa_name);
1293 else
1294 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1295 B_TRUE, sa->sa_attr_table[i].sa_name);
1300 void
1301 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1303 dmu_tx_hold_t *txh;
1305 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1306 THT_SPILL, 0, 0);
1307 if (txh != NULL)
1308 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
1309 SPA_OLD_MAXBLOCKSIZE, FTAG);
1312 void
1313 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1315 sa_os_t *sa = tx->tx_objset->os_sa;
1317 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1319 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1320 return;
1322 if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1323 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1324 } else {
1325 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1326 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1327 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1328 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1331 dmu_tx_sa_registration_hold(sa, tx);
1333 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1334 return;
1336 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1337 THT_SPILL, 0, 0);
1341 * Hold SA attribute
1343 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1345 * variable_size is the total size of all variable sized attributes
1346 * passed to this function. It is not the total size of all
1347 * variable size attributes that *may* exist on this object.
1349 void
1350 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1352 uint64_t object;
1353 sa_os_t *sa = tx->tx_objset->os_sa;
1355 ASSERT(hdl != NULL);
1357 object = sa_handle_object(hdl);
1359 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1360 DB_DNODE_ENTER(db);
1361 dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1362 DB_DNODE_EXIT(db);
1364 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1365 return;
1367 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1368 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1369 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1370 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1371 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1372 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1375 dmu_tx_sa_registration_hold(sa, tx);
1377 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1378 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1380 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1381 ASSERT(tx->tx_txg == 0);
1382 dmu_tx_hold_spill(tx, object);
1383 } else {
1384 dnode_t *dn;
1386 DB_DNODE_ENTER(db);
1387 dn = DB_DNODE(db);
1388 if (dn->dn_have_spill) {
1389 ASSERT(tx->tx_txg == 0);
1390 dmu_tx_hold_spill(tx, object);
1392 DB_DNODE_EXIT(db);
1396 void
1397 dmu_tx_init(void)
1399 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1400 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1401 KSTAT_FLAG_VIRTUAL);
1403 if (dmu_tx_ksp != NULL) {
1404 dmu_tx_ksp->ks_data = &dmu_tx_stats;
1405 kstat_install(dmu_tx_ksp);
1409 void
1410 dmu_tx_fini(void)
1412 if (dmu_tx_ksp != NULL) {
1413 kstat_delete(dmu_tx_ksp);
1414 dmu_tx_ksp = NULL;
1418 #if defined(_KERNEL)
1419 EXPORT_SYMBOL(dmu_tx_create);
1420 EXPORT_SYMBOL(dmu_tx_hold_write);
1421 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1422 EXPORT_SYMBOL(dmu_tx_hold_free);
1423 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1424 EXPORT_SYMBOL(dmu_tx_hold_zap);
1425 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1426 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1427 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1428 EXPORT_SYMBOL(dmu_tx_abort);
1429 EXPORT_SYMBOL(dmu_tx_assign);
1430 EXPORT_SYMBOL(dmu_tx_wait);
1431 EXPORT_SYMBOL(dmu_tx_commit);
1432 EXPORT_SYMBOL(dmu_tx_mark_netfree);
1433 EXPORT_SYMBOL(dmu_tx_get_txg);
1434 EXPORT_SYMBOL(dmu_tx_callback_register);
1435 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1436 EXPORT_SYMBOL(dmu_tx_hold_spill);
1437 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1438 EXPORT_SYMBOL(dmu_tx_hold_sa);
1439 #endif