4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
32 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
33 #include <sys/dsl_pool.h>
34 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/zfs_context.h>
38 typedef void (*dmu_tx_hold_func_t
)(dmu_tx_t
*tx
, struct dnode
*dn
,
39 uint64_t arg1
, uint64_t arg2
);
43 dmu_tx_create_dd(dsl_dir_t
*dd
)
45 dmu_tx_t
*tx
= kmem_zalloc(sizeof (dmu_tx_t
), KM_SLEEP
);
48 tx
->tx_pool
= dd
->dd_pool
;
49 list_create(&tx
->tx_holds
, sizeof (dmu_tx_hold_t
),
50 offsetof(dmu_tx_hold_t
, txh_node
));
52 refcount_create(&tx
->tx_space_written
);
53 refcount_create(&tx
->tx_space_freed
);
59 dmu_tx_create(objset_t
*os
)
61 dmu_tx_t
*tx
= dmu_tx_create_dd(os
->os
->os_dsl_dataset
->ds_dir
);
63 tx
->tx_lastsnap_txg
= dsl_dataset_prev_snap_txg(os
->os
->os_dsl_dataset
);
68 dmu_tx_create_assigned(struct dsl_pool
*dp
, uint64_t txg
)
70 dmu_tx_t
*tx
= dmu_tx_create_dd(NULL
);
72 ASSERT3U(txg
, <=, dp
->dp_tx
.tx_open_txg
);
81 dmu_tx_is_syncing(dmu_tx_t
*tx
)
83 return (tx
->tx_anyobj
);
87 dmu_tx_private_ok(dmu_tx_t
*tx
)
89 return (tx
->tx_anyobj
);
92 static dmu_tx_hold_t
*
93 dmu_tx_hold_object_impl(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
,
94 enum dmu_tx_hold_type type
, uint64_t arg1
, uint64_t arg2
)
100 if (object
!= DMU_NEW_OBJECT
) {
101 err
= dnode_hold(os
->os
, object
, tx
, &dn
);
107 if (err
== 0 && tx
->tx_txg
!= 0) {
108 mutex_enter(&dn
->dn_mtx
);
110 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
111 * problem, but there's no way for it to happen (for
114 ASSERT(dn
->dn_assigned_txg
== 0);
115 dn
->dn_assigned_txg
= tx
->tx_txg
;
116 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
117 mutex_exit(&dn
->dn_mtx
);
121 txh
= kmem_zalloc(sizeof (dmu_tx_hold_t
), KM_SLEEP
);
125 txh
->txh_type
= type
;
126 txh
->txh_arg1
= arg1
;
127 txh
->txh_arg2
= arg2
;
129 list_insert_tail(&tx
->tx_holds
, txh
);
135 dmu_tx_add_new_object(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
)
138 * If we're syncing, they can manipulate any object anyhow, and
139 * the hold on the dnode_t can cause problems.
141 if (!dmu_tx_is_syncing(tx
)) {
142 (void) dmu_tx_hold_object_impl(tx
, os
,
143 object
, THT_NEWOBJECT
, 0, 0);
148 dmu_tx_check_ioerr(zio_t
*zio
, dnode_t
*dn
, int level
, uint64_t blkid
)
153 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
154 db
= dbuf_hold_level(dn
, level
, blkid
, FTAG
);
155 rw_exit(&dn
->dn_struct_rwlock
);
158 err
= dbuf_read(db
, zio
, DB_RF_CANFAIL
| DB_RF_NOPREFETCH
);
165 dmu_tx_count_write(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
167 dnode_t
*dn
= txh
->txh_dnode
;
168 uint64_t start
, end
, i
;
169 int min_bs
, max_bs
, min_ibs
, max_ibs
, epbs
, bits
;
175 min_bs
= SPA_MINBLOCKSHIFT
;
176 max_bs
= SPA_MAXBLOCKSHIFT
;
177 min_ibs
= DN_MIN_INDBLKSHIFT
;
178 max_ibs
= DN_MAX_INDBLKSHIFT
;
181 * For i/o error checking, read the first and last level-0
182 * blocks (if they are not aligned), and all the level-1 blocks.
186 if (dn
->dn_maxblkid
== 0) {
187 if ((off
> 0 || len
< dn
->dn_datablksz
) &&
188 off
< dn
->dn_datablksz
) {
189 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
194 zio_t
*zio
= zio_root(dn
->dn_objset
->os_spa
,
195 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
197 /* first level-0 block */
198 start
= off
>> dn
->dn_datablkshift
;
199 if (P2PHASE(off
, dn
->dn_datablksz
) ||
200 len
< dn
->dn_datablksz
) {
201 err
= dmu_tx_check_ioerr(zio
, dn
, 0, start
);
206 /* last level-0 block */
207 end
= (off
+len
-1) >> dn
->dn_datablkshift
;
208 if (end
!= start
&& end
<= dn
->dn_maxblkid
&&
209 P2PHASE(off
+len
, dn
->dn_datablksz
)) {
210 err
= dmu_tx_check_ioerr(zio
, dn
, 0, end
);
216 if (dn
->dn_nlevels
> 1) {
217 start
>>= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
218 end
>>= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
219 for (i
= start
+1; i
< end
; i
++) {
220 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
233 * If there's more than one block, the blocksize can't change,
234 * so we can make a more precise estimate. Alternatively,
235 * if the dnode's ibs is larger than max_ibs, always use that.
236 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
237 * the code will still work correctly on existing pools.
239 if (dn
&& (dn
->dn_maxblkid
!= 0 || dn
->dn_indblkshift
> max_ibs
)) {
240 min_ibs
= max_ibs
= dn
->dn_indblkshift
;
241 if (dn
->dn_datablkshift
!= 0)
242 min_bs
= max_bs
= dn
->dn_datablkshift
;
246 * 'end' is the last thing we will access, not one past.
247 * This way we won't overflow when accessing the last byte.
249 start
= P2ALIGN(off
, 1ULL << max_bs
);
250 end
= P2ROUNDUP(off
+ len
, 1ULL << max_bs
) - 1;
251 txh
->txh_space_towrite
+= end
- start
+ 1;
256 epbs
= min_ibs
- SPA_BLKPTRSHIFT
;
259 * The object contains at most 2^(64 - min_bs) blocks,
260 * and each indirect level maps 2^epbs.
262 for (bits
= 64 - min_bs
; bits
>= 0; bits
-= epbs
) {
266 * If we increase the number of levels of indirection,
267 * we'll need new blkid=0 indirect blocks. If start == 0,
268 * we're already accounting for that blocks; and if end == 0,
269 * we can't increase the number of levels beyond that.
271 if (start
!= 0 && end
!= 0)
272 txh
->txh_space_towrite
+= 1ULL << max_ibs
;
273 txh
->txh_space_towrite
+= (end
- start
+ 1) << max_ibs
;
276 ASSERT(txh
->txh_space_towrite
< 2 * DMU_MAX_ACCESS
);
280 txh
->txh_tx
->tx_err
= err
;
284 dmu_tx_count_dnode(dmu_tx_hold_t
*txh
)
286 dnode_t
*dn
= txh
->txh_dnode
;
287 dnode_t
*mdn
= txh
->txh_tx
->tx_objset
->os
->os_meta_dnode
;
288 uint64_t space
= mdn
->dn_datablksz
+
289 ((mdn
->dn_nlevels
-1) << mdn
->dn_indblkshift
);
291 if (dn
&& dn
->dn_dbuf
->db_blkptr
&&
292 dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
293 dn
->dn_dbuf
->db_blkptr
->blk_birth
)) {
294 txh
->txh_space_tooverwrite
+= space
;
296 txh
->txh_space_towrite
+= space
;
297 if (dn
&& dn
->dn_dbuf
->db_blkptr
)
298 txh
->txh_space_tounref
+= space
;
303 dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
)
307 ASSERT(tx
->tx_txg
== 0);
308 ASSERT(len
< DMU_MAX_ACCESS
);
309 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
311 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
312 object
, THT_WRITE
, off
, len
);
316 dmu_tx_count_write(txh
, off
, len
);
317 dmu_tx_count_dnode(txh
);
321 dmu_tx_count_free(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
323 uint64_t blkid
, nblks
, lastblk
;
324 uint64_t space
= 0, unref
= 0, skipped
= 0;
325 dnode_t
*dn
= txh
->txh_dnode
;
326 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
327 spa_t
*spa
= txh
->txh_tx
->tx_pool
->dp_spa
;
330 if (dn
->dn_nlevels
== 0)
334 * The struct_rwlock protects us against dn_nlevels
335 * changing, in case (against all odds) we manage to dirty &
336 * sync out the changes after we check for being dirty.
337 * Also, dbuf_hold_level() wants us to have the struct_rwlock.
339 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
340 epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
341 if (dn
->dn_maxblkid
== 0) {
342 if (off
== 0 && len
>= dn
->dn_datablksz
) {
346 rw_exit(&dn
->dn_struct_rwlock
);
350 blkid
= off
>> dn
->dn_datablkshift
;
351 nblks
= (len
+ dn
->dn_datablksz
- 1) >> dn
->dn_datablkshift
;
353 if (blkid
>= dn
->dn_maxblkid
) {
354 rw_exit(&dn
->dn_struct_rwlock
);
357 if (blkid
+ nblks
> dn
->dn_maxblkid
)
358 nblks
= dn
->dn_maxblkid
- blkid
;
361 if (dn
->dn_nlevels
== 1) {
363 for (i
= 0; i
< nblks
; i
++) {
364 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
365 ASSERT3U(blkid
+ i
, <, dn
->dn_nblkptr
);
367 if (dsl_dataset_block_freeable(ds
, bp
->blk_birth
)) {
368 dprintf_bp(bp
, "can free old%s", "");
369 space
+= bp_get_dasize(spa
, bp
);
371 unref
+= BP_GET_ASIZE(bp
);
377 * Add in memory requirements of higher-level indirects.
378 * This assumes a worst-possible scenario for dn_nlevels.
381 uint64_t blkcnt
= 1 + ((nblks
>> epbs
) >> epbs
);
382 int level
= (dn
->dn_nlevels
> 1) ? 2 : 1;
384 while (level
++ < DN_MAX_LEVELS
) {
385 txh
->txh_memory_tohold
+= blkcnt
<< dn
->dn_indblkshift
;
386 blkcnt
= 1 + (blkcnt
>> epbs
);
388 ASSERT(blkcnt
<= dn
->dn_nblkptr
);
391 lastblk
= blkid
+ nblks
- 1;
393 dmu_buf_impl_t
*dbuf
;
394 uint64_t ibyte
, new_blkid
;
396 int err
, i
, blkoff
, tochk
;
399 ibyte
= blkid
<< dn
->dn_datablkshift
;
400 err
= dnode_next_offset(dn
,
401 DNODE_FIND_HAVELOCK
, &ibyte
, 2, 1, 0);
402 new_blkid
= ibyte
>> dn
->dn_datablkshift
;
404 skipped
+= (lastblk
>> epbs
) - (blkid
>> epbs
) + 1;
408 txh
->txh_tx
->tx_err
= err
;
411 if (new_blkid
> lastblk
) {
412 skipped
+= (lastblk
>> epbs
) - (blkid
>> epbs
) + 1;
416 if (new_blkid
> blkid
) {
417 ASSERT((new_blkid
>> epbs
) > (blkid
>> epbs
));
418 skipped
+= (new_blkid
>> epbs
) - (blkid
>> epbs
) - 1;
419 nblks
-= new_blkid
- blkid
;
422 blkoff
= P2PHASE(blkid
, epb
);
423 tochk
= MIN(epb
- blkoff
, nblks
);
425 dbuf
= dbuf_hold_level(dn
, 1, blkid
>> epbs
, FTAG
);
427 txh
->txh_memory_tohold
+= dbuf
->db
.db_size
;
428 if (txh
->txh_memory_tohold
> DMU_MAX_ACCESS
) {
429 txh
->txh_tx
->tx_err
= E2BIG
;
430 dbuf_rele(dbuf
, FTAG
);
433 err
= dbuf_read(dbuf
, NULL
, DB_RF_HAVESTRUCT
| DB_RF_CANFAIL
);
435 txh
->txh_tx
->tx_err
= err
;
436 dbuf_rele(dbuf
, FTAG
);
440 bp
= dbuf
->db
.db_data
;
443 for (i
= 0; i
< tochk
; i
++) {
444 if (dsl_dataset_block_freeable(ds
, bp
[i
].blk_birth
)) {
445 dprintf_bp(&bp
[i
], "can free old%s", "");
446 space
+= bp_get_dasize(spa
, &bp
[i
]);
448 unref
+= BP_GET_ASIZE(bp
);
450 dbuf_rele(dbuf
, FTAG
);
455 rw_exit(&dn
->dn_struct_rwlock
);
457 /* account for new level 1 indirect blocks that might show up */
459 txh
->txh_fudge
+= skipped
<< dn
->dn_indblkshift
;
460 skipped
= MIN(skipped
, DMU_MAX_DELETEBLKCNT
>> epbs
);
461 txh
->txh_memory_tohold
+= skipped
<< dn
->dn_indblkshift
;
463 txh
->txh_space_tofree
+= space
;
464 txh
->txh_space_tounref
+= unref
;
468 dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, uint64_t len
)
472 uint64_t start
, end
, i
;
476 ASSERT(tx
->tx_txg
== 0);
478 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
479 object
, THT_FREE
, off
, len
);
486 dmu_tx_count_write(txh
, off
, 1);
488 if (len
!= DMU_OBJECT_END
)
489 dmu_tx_count_write(txh
, off
+len
, 1);
491 if (off
>= (dn
->dn_maxblkid
+1) * dn
->dn_datablksz
)
493 if (len
== DMU_OBJECT_END
)
494 len
= (dn
->dn_maxblkid
+1) * dn
->dn_datablksz
- off
;
497 * For i/o error checking, read the first and last level-0
498 * blocks, and all the level-1 blocks. The above count_write's
499 * have already taken care of the level-0 blocks.
501 if (dn
->dn_nlevels
> 1) {
502 shift
= dn
->dn_datablkshift
+ dn
->dn_indblkshift
-
504 start
= off
>> shift
;
505 end
= dn
->dn_datablkshift
? ((off
+len
) >> shift
) : 0;
507 zio
= zio_root(tx
->tx_pool
->dp_spa
,
508 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
509 for (i
= start
; i
<= end
; i
++) {
510 uint64_t ibyte
= i
<< shift
;
511 err
= dnode_next_offset(dn
, 0, &ibyte
, 2, 1, 0);
520 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
533 dmu_tx_count_dnode(txh
);
534 dmu_tx_count_free(txh
, off
, len
);
538 dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, char *name
)
545 ASSERT(tx
->tx_txg
== 0);
547 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
548 object
, THT_ZAP
, add
, (uintptr_t)name
);
553 dmu_tx_count_dnode(txh
);
557 * We will be able to fit a new object's entries into one leaf
558 * block. So there will be at most 2 blocks total,
559 * including the header block.
561 dmu_tx_count_write(txh
, 0, 2 << fzap_default_block_shift
);
565 ASSERT3P(dmu_ot
[dn
->dn_type
].ot_byteswap
, ==, zap_byteswap
);
567 if (dn
->dn_maxblkid
== 0 && !add
) {
569 * If there is only one block (i.e. this is a micro-zap)
570 * and we are not adding anything, the accounting is simple.
572 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
579 * Use max block size here, since we don't know how much
580 * the size will change between now and the dbuf dirty call.
582 if (dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
583 dn
->dn_phys
->dn_blkptr
[0].blk_birth
)) {
584 txh
->txh_space_tooverwrite
+= SPA_MAXBLOCKSIZE
;
586 txh
->txh_space_towrite
+= SPA_MAXBLOCKSIZE
;
587 txh
->txh_space_tounref
+=
588 BP_GET_ASIZE(dn
->dn_phys
->dn_blkptr
);
593 if (dn
->dn_maxblkid
> 0 && name
) {
595 * access the name in this fat-zap so that we'll check
596 * for i/o errors to the leaf blocks, etc.
598 err
= zap_lookup(&dn
->dn_objset
->os
, dn
->dn_object
, name
,
607 * 3 blocks overwritten: target leaf, ptrtbl block, header block
608 * 3 new blocks written if adding: new split leaf, 2 grown ptrtbl blocks
610 dmu_tx_count_write(txh
, dn
->dn_maxblkid
* dn
->dn_datablksz
,
611 (3 + (add
? 3 : 0)) << dn
->dn_datablkshift
);
614 * If the modified blocks are scattered to the four winds,
615 * we'll have to modify an indirect twig for each.
617 epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
618 for (nblocks
= dn
->dn_maxblkid
>> epbs
; nblocks
!= 0; nblocks
>>= epbs
)
619 txh
->txh_space_towrite
+= 3 << dn
->dn_indblkshift
;
623 dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
)
627 ASSERT(tx
->tx_txg
== 0);
629 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
630 object
, THT_BONUS
, 0, 0);
632 dmu_tx_count_dnode(txh
);
636 dmu_tx_hold_space(dmu_tx_t
*tx
, uint64_t space
)
639 ASSERT(tx
->tx_txg
== 0);
641 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
642 DMU_NEW_OBJECT
, THT_SPACE
, space
, 0);
644 txh
->txh_space_towrite
+= space
;
648 dmu_tx_holds(dmu_tx_t
*tx
, uint64_t object
)
654 * By asserting that the tx is assigned, we're counting the
655 * number of dn_tx_holds, which is the same as the number of
656 * dn_holds. Otherwise, we'd be counting dn_holds, but
657 * dn_tx_holds could be 0.
659 ASSERT(tx
->tx_txg
!= 0);
661 /* if (tx->tx_anyobj == TRUE) */
664 for (txh
= list_head(&tx
->tx_holds
); txh
;
665 txh
= list_next(&tx
->tx_holds
, txh
)) {
666 if (txh
->txh_dnode
&& txh
->txh_dnode
->dn_object
== object
)
675 dmu_tx_dirty_buf(dmu_tx_t
*tx
, dmu_buf_impl_t
*db
)
678 int match_object
= FALSE
, match_offset
= FALSE
;
679 dnode_t
*dn
= db
->db_dnode
;
681 ASSERT(tx
->tx_txg
!= 0);
682 ASSERT(tx
->tx_objset
== NULL
|| dn
->dn_objset
== tx
->tx_objset
->os
);
683 ASSERT3U(dn
->dn_object
, ==, db
->db
.db_object
);
688 /* XXX No checking on the meta dnode for now */
689 if (db
->db
.db_object
== DMU_META_DNODE_OBJECT
)
692 for (txh
= list_head(&tx
->tx_holds
); txh
;
693 txh
= list_next(&tx
->tx_holds
, txh
)) {
694 ASSERT(dn
== NULL
|| dn
->dn_assigned_txg
== tx
->tx_txg
);
695 if (txh
->txh_dnode
== dn
&& txh
->txh_type
!= THT_NEWOBJECT
)
697 if (txh
->txh_dnode
== NULL
|| txh
->txh_dnode
== dn
) {
698 int datablkshift
= dn
->dn_datablkshift
?
699 dn
->dn_datablkshift
: SPA_MAXBLOCKSHIFT
;
700 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
701 int shift
= datablkshift
+ epbs
* db
->db_level
;
702 uint64_t beginblk
= shift
>= 64 ? 0 :
703 (txh
->txh_arg1
>> shift
);
704 uint64_t endblk
= shift
>= 64 ? 0 :
705 ((txh
->txh_arg1
+ txh
->txh_arg2
- 1) >> shift
);
706 uint64_t blkid
= db
->db_blkid
;
708 /* XXX txh_arg2 better not be zero... */
710 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
711 txh
->txh_type
, beginblk
, endblk
);
713 switch (txh
->txh_type
) {
715 if (blkid
>= beginblk
&& blkid
<= endblk
)
718 * We will let this hold work for the bonus
719 * buffer so that we don't need to hold it
720 * when creating a new object.
722 if (blkid
== DB_BONUS_BLKID
)
725 * They might have to increase nlevels,
726 * thus dirtying the new TLIBs. Or the
727 * might have to change the block size,
728 * thus dirying the new lvl=0 blk=0.
735 * We will dirty all the level 1 blocks in
736 * the free range and perhaps the first and
737 * last level 0 block.
739 if (blkid
>= beginblk
&& (blkid
<= endblk
||
740 txh
->txh_arg2
== DMU_OBJECT_END
))
744 if (blkid
== DB_BONUS_BLKID
)
754 ASSERT(!"bad txh_type");
757 if (match_object
&& match_offset
)
760 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
761 (u_longlong_t
)db
->db
.db_object
, db
->db_level
,
762 (u_longlong_t
)db
->db_blkid
);
767 dmu_tx_try_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
770 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
771 uint64_t memory
, asize
, fsize
, usize
;
772 uint64_t towrite
, tofree
, tooverwrite
, tounref
, tohold
, fudge
;
774 ASSERT3U(tx
->tx_txg
, ==, 0);
779 if (spa_suspended(spa
)) {
781 * If the user has indicated a blocking failure mode
782 * then return ERESTART which will block in dmu_tx_wait().
783 * Otherwise, return EIO so that an error can get
784 * propagated back to the VOP calls.
786 * Note that we always honor the txg_how flag regardless
787 * of the failuremode setting.
789 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
&&
796 tx
->tx_txg
= txg_hold_open(tx
->tx_pool
, &tx
->tx_txgh
);
797 tx
->tx_needassign_txh
= NULL
;
800 * NB: No error returns are allowed after txg_hold_open, but
801 * before processing the dnode holds, due to the
802 * dmu_tx_unassign() logic.
805 towrite
= tofree
= tooverwrite
= tounref
= tohold
= fudge
= 0;
806 for (txh
= list_head(&tx
->tx_holds
); txh
;
807 txh
= list_next(&tx
->tx_holds
, txh
)) {
808 dnode_t
*dn
= txh
->txh_dnode
;
810 mutex_enter(&dn
->dn_mtx
);
811 if (dn
->dn_assigned_txg
== tx
->tx_txg
- 1) {
812 mutex_exit(&dn
->dn_mtx
);
813 tx
->tx_needassign_txh
= txh
;
816 if (dn
->dn_assigned_txg
== 0)
817 dn
->dn_assigned_txg
= tx
->tx_txg
;
818 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
819 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
820 mutex_exit(&dn
->dn_mtx
);
822 towrite
+= txh
->txh_space_towrite
;
823 tofree
+= txh
->txh_space_tofree
;
824 tooverwrite
+= txh
->txh_space_tooverwrite
;
825 tounref
+= txh
->txh_space_tounref
;
826 tohold
+= txh
->txh_memory_tohold
;
827 fudge
+= txh
->txh_fudge
;
831 * NB: This check must be after we've held the dnodes, so that
832 * the dmu_tx_unassign() logic will work properly
834 if (txg_how
>= TXG_INITIAL
&& txg_how
!= tx
->tx_txg
)
838 * If a snapshot has been taken since we made our estimates,
839 * assume that we won't be able to free or overwrite anything.
842 dsl_dataset_prev_snap_txg(tx
->tx_objset
->os
->os_dsl_dataset
) >
843 tx
->tx_lastsnap_txg
) {
844 towrite
+= tooverwrite
;
845 tooverwrite
= tofree
= 0;
848 /* needed allocation: worst-case estimate of write space */
849 asize
= spa_get_asize(tx
->tx_pool
->dp_spa
, towrite
+ tooverwrite
);
850 /* freed space estimate: worst-case overwrite + free estimate */
851 fsize
= spa_get_asize(tx
->tx_pool
->dp_spa
, tooverwrite
) + tofree
;
852 /* convert unrefd space to worst-case estimate */
853 usize
= spa_get_asize(tx
->tx_pool
->dp_spa
, tounref
);
854 /* calculate memory footprint estimate */
855 memory
= towrite
+ tooverwrite
+ tohold
;
859 * Add in 'tohold' to account for our dirty holds on this memory
860 * XXX - the "fudge" factor is to account for skipped blocks that
861 * we missed because dnode_next_offset() misses in-core-only blocks.
863 tx
->tx_space_towrite
= asize
+
864 spa_get_asize(tx
->tx_pool
->dp_spa
, tohold
+ fudge
);
865 tx
->tx_space_tofree
= tofree
;
866 tx
->tx_space_tooverwrite
= tooverwrite
;
867 tx
->tx_space_tounref
= tounref
;
870 if (tx
->tx_dir
&& asize
!= 0) {
871 int err
= dsl_dir_tempreserve_space(tx
->tx_dir
, memory
,
872 asize
, fsize
, usize
, &tx
->tx_tempreserve_cookie
, tx
);
881 dmu_tx_unassign(dmu_tx_t
*tx
)
888 txg_rele_to_quiesce(&tx
->tx_txgh
);
890 for (txh
= list_head(&tx
->tx_holds
); txh
!= tx
->tx_needassign_txh
;
891 txh
= list_next(&tx
->tx_holds
, txh
)) {
892 dnode_t
*dn
= txh
->txh_dnode
;
896 mutex_enter(&dn
->dn_mtx
);
897 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
899 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
900 dn
->dn_assigned_txg
= 0;
901 cv_broadcast(&dn
->dn_notxholds
);
903 mutex_exit(&dn
->dn_mtx
);
906 txg_rele_to_sync(&tx
->tx_txgh
);
908 tx
->tx_lasttried_txg
= tx
->tx_txg
;
913 * Assign tx to a transaction group. txg_how can be one of:
915 * (1) TXG_WAIT. If the current open txg is full, waits until there's
916 * a new one. This should be used when you're not holding locks.
917 * If will only fail if we're truly out of space (or over quota).
919 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
920 * blocking, returns immediately with ERESTART. This should be used
921 * whenever you're holding locks. On an ERESTART error, the caller
922 * should drop locks, do a dmu_tx_wait(tx), and try again.
924 * (3) A specific txg. Use this if you need to ensure that multiple
925 * transactions all sync in the same txg. Like TXG_NOWAIT, it
926 * returns ERESTART if it can't assign you into the requested txg.
929 dmu_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
933 ASSERT(tx
->tx_txg
== 0);
934 ASSERT(txg_how
!= 0);
935 ASSERT(!dsl_pool_sync_context(tx
->tx_pool
));
937 while ((err
= dmu_tx_try_assign(tx
, txg_how
)) != 0) {
940 if (err
!= ERESTART
|| txg_how
!= TXG_WAIT
)
946 txg_rele_to_quiesce(&tx
->tx_txgh
);
952 dmu_tx_wait(dmu_tx_t
*tx
)
954 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
956 ASSERT(tx
->tx_txg
== 0);
959 * It's possible that the pool has become active after this thread
960 * has tried to obtain a tx. If that's the case then his
961 * tx_lasttried_txg would not have been assigned.
963 if (spa_suspended(spa
) || tx
->tx_lasttried_txg
== 0) {
964 txg_wait_synced(tx
->tx_pool
, spa_last_synced_txg(spa
) + 1);
965 } else if (tx
->tx_needassign_txh
) {
966 dnode_t
*dn
= tx
->tx_needassign_txh
->txh_dnode
;
968 mutex_enter(&dn
->dn_mtx
);
969 while (dn
->dn_assigned_txg
== tx
->tx_lasttried_txg
- 1)
970 cv_wait(&dn
->dn_notxholds
, &dn
->dn_mtx
);
971 mutex_exit(&dn
->dn_mtx
);
972 tx
->tx_needassign_txh
= NULL
;
974 txg_wait_open(tx
->tx_pool
, tx
->tx_lasttried_txg
+ 1);
979 dmu_tx_willuse_space(dmu_tx_t
*tx
, int64_t delta
)
982 if (tx
->tx_dir
== NULL
|| delta
== 0)
986 ASSERT3U(refcount_count(&tx
->tx_space_written
) + delta
, <=,
987 tx
->tx_space_towrite
);
988 (void) refcount_add_many(&tx
->tx_space_written
, delta
, NULL
);
990 (void) refcount_add_many(&tx
->tx_space_freed
, -delta
, NULL
);
996 dmu_tx_commit(dmu_tx_t
*tx
)
1000 ASSERT(tx
->tx_txg
!= 0);
1002 while (txh
= list_head(&tx
->tx_holds
)) {
1003 dnode_t
*dn
= txh
->txh_dnode
;
1005 list_remove(&tx
->tx_holds
, txh
);
1006 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1009 mutex_enter(&dn
->dn_mtx
);
1010 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1012 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1013 dn
->dn_assigned_txg
= 0;
1014 cv_broadcast(&dn
->dn_notxholds
);
1016 mutex_exit(&dn
->dn_mtx
);
1020 if (tx
->tx_tempreserve_cookie
)
1021 dsl_dir_tempreserve_clear(tx
->tx_tempreserve_cookie
, tx
);
1023 if (tx
->tx_anyobj
== FALSE
)
1024 txg_rele_to_sync(&tx
->tx_txgh
);
1025 list_destroy(&tx
->tx_holds
);
1027 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1028 tx
->tx_space_towrite
, refcount_count(&tx
->tx_space_written
),
1029 tx
->tx_space_tofree
, refcount_count(&tx
->tx_space_freed
));
1030 refcount_destroy_many(&tx
->tx_space_written
,
1031 refcount_count(&tx
->tx_space_written
));
1032 refcount_destroy_many(&tx
->tx_space_freed
,
1033 refcount_count(&tx
->tx_space_freed
));
1035 kmem_free(tx
, sizeof (dmu_tx_t
));
1039 dmu_tx_abort(dmu_tx_t
*tx
)
1043 ASSERT(tx
->tx_txg
== 0);
1045 while (txh
= list_head(&tx
->tx_holds
)) {
1046 dnode_t
*dn
= txh
->txh_dnode
;
1048 list_remove(&tx
->tx_holds
, txh
);
1049 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1053 list_destroy(&tx
->tx_holds
);
1055 refcount_destroy_many(&tx
->tx_space_written
,
1056 refcount_count(&tx
->tx_space_written
));
1057 refcount_destroy_many(&tx
->tx_space_freed
,
1058 refcount_count(&tx
->tx_space_freed
));
1060 kmem_free(tx
, sizeof (dmu_tx_t
));
1064 dmu_tx_get_txg(dmu_tx_t
*tx
)
1066 ASSERT(tx
->tx_txg
!= 0);
1067 return (tx
->tx_txg
);