2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * Copyright (C) 2010 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_extent_busy.h"
28 #include "xfs_quota.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
32 #include "xfs_trace.h"
33 #include "xfs_error.h"
35 kmem_zone_t
*xfs_trans_zone
;
36 kmem_zone_t
*xfs_log_item_desc_zone
;
38 #if defined(CONFIG_TRACEPOINTS)
40 xfs_trans_trace_reservations(
43 struct xfs_trans_res resv
;
44 struct xfs_trans_res
*res
;
45 struct xfs_trans_res
*end_res
;
48 res
= (struct xfs_trans_res
*)M_RES(mp
);
49 end_res
= (struct xfs_trans_res
*)(M_RES(mp
) + 1);
50 for (i
= 0; res
< end_res
; i
++, res
++)
51 trace_xfs_trans_resv_calc(mp
, i
, res
);
52 xfs_log_get_max_trans_res(mp
, &resv
);
53 trace_xfs_trans_resv_calc(mp
, -1, &resv
);
56 # define xfs_trans_trace_reservations(mp)
60 * Initialize the precomputed transaction reservation values
61 * in the mount structure.
67 xfs_trans_resv_calc(mp
, M_RES(mp
));
68 xfs_trans_trace_reservations(mp
);
72 * Free the transaction structure. If there is more clean up
73 * to do when the structure is freed, add it here.
79 xfs_extent_busy_sort(&tp
->t_busy
);
80 xfs_extent_busy_clear(tp
->t_mountp
, &tp
->t_busy
, false);
82 atomic_dec(&tp
->t_mountp
->m_active_trans
);
83 if (!(tp
->t_flags
& XFS_TRANS_NO_WRITECOUNT
))
84 sb_end_intwrite(tp
->t_mountp
->m_super
);
85 xfs_trans_free_dqinfo(tp
);
86 kmem_zone_free(xfs_trans_zone
, tp
);
90 * This is called to create a new transaction which will share the
91 * permanent log reservation of the given transaction. The remaining
92 * unused block and rt extent reservations are also inherited. This
93 * implies that the original transaction is no longer allowed to allocate
94 * blocks. Locks and log items, however, are no inherited. They must
95 * be added to the new transaction explicitly.
103 ntp
= kmem_zone_zalloc(xfs_trans_zone
, KM_SLEEP
);
106 * Initialize the new transaction structure.
108 ntp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
109 ntp
->t_mountp
= tp
->t_mountp
;
110 INIT_LIST_HEAD(&ntp
->t_items
);
111 INIT_LIST_HEAD(&ntp
->t_busy
);
113 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
114 ASSERT(tp
->t_ticket
!= NULL
);
116 ntp
->t_flags
= XFS_TRANS_PERM_LOG_RES
|
117 (tp
->t_flags
& XFS_TRANS_RESERVE
) |
118 (tp
->t_flags
& XFS_TRANS_NO_WRITECOUNT
);
119 /* We gave our writer reference to the new transaction */
120 tp
->t_flags
|= XFS_TRANS_NO_WRITECOUNT
;
121 ntp
->t_ticket
= xfs_log_ticket_get(tp
->t_ticket
);
122 ntp
->t_blk_res
= tp
->t_blk_res
- tp
->t_blk_res_used
;
123 tp
->t_blk_res
= tp
->t_blk_res_used
;
124 ntp
->t_rtx_res
= tp
->t_rtx_res
- tp
->t_rtx_res_used
;
125 tp
->t_rtx_res
= tp
->t_rtx_res_used
;
126 ntp
->t_pflags
= tp
->t_pflags
;
128 xfs_trans_dup_dqinfo(tp
, ntp
);
130 atomic_inc(&tp
->t_mountp
->m_active_trans
);
135 * This is called to reserve free disk blocks and log space for the
136 * given transaction. This must be done before allocating any resources
137 * within the transaction.
139 * This will return ENOSPC if there are not enough blocks available.
140 * It will sleep waiting for available log space.
141 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
142 * is used by long running transactions. If any one of the reservations
143 * fails then they will all be backed out.
145 * This does not do quota reservations. That typically is done by the
150 struct xfs_trans
*tp
,
151 struct xfs_trans_res
*resp
,
156 bool rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
158 /* Mark this thread as being in a transaction */
159 current_set_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
162 * Attempt to reserve the needed disk blocks by decrementing
163 * the number needed from the number available. This will
164 * fail if the count would go below zero.
167 error
= xfs_mod_fdblocks(tp
->t_mountp
, -((int64_t)blocks
), rsvd
);
169 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
172 tp
->t_blk_res
+= blocks
;
176 * Reserve the log space needed for this transaction.
178 if (resp
->tr_logres
> 0) {
179 bool permanent
= false;
181 ASSERT(tp
->t_log_res
== 0 ||
182 tp
->t_log_res
== resp
->tr_logres
);
183 ASSERT(tp
->t_log_count
== 0 ||
184 tp
->t_log_count
== resp
->tr_logcount
);
186 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
) {
187 tp
->t_flags
|= XFS_TRANS_PERM_LOG_RES
;
190 ASSERT(tp
->t_ticket
== NULL
);
191 ASSERT(!(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
194 if (tp
->t_ticket
!= NULL
) {
195 ASSERT(resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
);
196 error
= xfs_log_regrant(tp
->t_mountp
, tp
->t_ticket
);
198 error
= xfs_log_reserve(tp
->t_mountp
,
201 &tp
->t_ticket
, XFS_TRANSACTION
,
208 tp
->t_log_res
= resp
->tr_logres
;
209 tp
->t_log_count
= resp
->tr_logcount
;
213 * Attempt to reserve the needed realtime extents by decrementing
214 * the number needed from the number available. This will
215 * fail if the count would go below zero.
218 error
= xfs_mod_frextents(tp
->t_mountp
, -((int64_t)rtextents
));
223 tp
->t_rtx_res
+= rtextents
;
229 * Error cases jump to one of these labels to undo any
230 * reservations which have already been performed.
233 if (resp
->tr_logres
> 0) {
234 xfs_log_done(tp
->t_mountp
, tp
->t_ticket
, NULL
, false);
237 tp
->t_flags
&= ~XFS_TRANS_PERM_LOG_RES
;
242 xfs_mod_fdblocks(tp
->t_mountp
, (int64_t)blocks
, rsvd
);
246 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
253 struct xfs_mount
*mp
,
254 struct xfs_trans_res
*resp
,
258 struct xfs_trans
**tpp
)
260 struct xfs_trans
*tp
;
263 if (!(flags
& XFS_TRANS_NO_WRITECOUNT
))
264 sb_start_intwrite(mp
->m_super
);
266 WARN_ON(mp
->m_super
->s_writers
.frozen
== SB_FREEZE_COMPLETE
);
267 atomic_inc(&mp
->m_active_trans
);
269 tp
= kmem_zone_zalloc(xfs_trans_zone
,
270 (flags
& XFS_TRANS_NOFS
) ? KM_NOFS
: KM_SLEEP
);
271 tp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
274 INIT_LIST_HEAD(&tp
->t_items
);
275 INIT_LIST_HEAD(&tp
->t_busy
);
277 error
= xfs_trans_reserve(tp
, resp
, blocks
, rtextents
);
279 xfs_trans_cancel(tp
);
288 * Create an empty transaction with no reservation. This is a defensive
289 * mechanism for routines that query metadata without actually modifying
290 * them -- if the metadata being queried is somehow cross-linked (think a
291 * btree block pointer that points higher in the tree), we risk deadlock.
292 * However, blocks grabbed as part of a transaction can be re-grabbed.
293 * The verifiers will notice the corrupt block and the operation will fail
294 * back to userspace without deadlocking.
296 * Note the zero-length reservation; this transaction MUST be cancelled
297 * without any dirty data.
300 xfs_trans_alloc_empty(
301 struct xfs_mount
*mp
,
302 struct xfs_trans
**tpp
)
304 struct xfs_trans_res resv
= {0};
306 return xfs_trans_alloc(mp
, &resv
, 0, 0, XFS_TRANS_NO_WRITECOUNT
, tpp
);
310 * Record the indicated change to the given field for application
311 * to the file system's superblock when the transaction commits.
312 * For now, just store the change in the transaction structure.
314 * Mark the transaction structure to indicate that the superblock
315 * needs to be updated before committing.
317 * Because we may not be keeping track of allocated/free inodes and
318 * used filesystem blocks in the superblock, we do not mark the
319 * superblock dirty in this transaction if we modify these fields.
320 * We still need to update the transaction deltas so that they get
321 * applied to the incore superblock, but we don't want them to
322 * cause the superblock to get locked and logged if these are the
323 * only fields in the superblock that the transaction modifies.
331 uint32_t flags
= (XFS_TRANS_DIRTY
|XFS_TRANS_SB_DIRTY
);
332 xfs_mount_t
*mp
= tp
->t_mountp
;
335 case XFS_TRANS_SB_ICOUNT
:
336 tp
->t_icount_delta
+= delta
;
337 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
338 flags
&= ~XFS_TRANS_SB_DIRTY
;
340 case XFS_TRANS_SB_IFREE
:
341 tp
->t_ifree_delta
+= delta
;
342 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
343 flags
&= ~XFS_TRANS_SB_DIRTY
;
345 case XFS_TRANS_SB_FDBLOCKS
:
347 * Track the number of blocks allocated in the
348 * transaction. Make sure it does not exceed the
352 tp
->t_blk_res_used
+= (uint
)-delta
;
353 ASSERT(tp
->t_blk_res_used
<= tp
->t_blk_res
);
355 tp
->t_fdblocks_delta
+= delta
;
356 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
357 flags
&= ~XFS_TRANS_SB_DIRTY
;
359 case XFS_TRANS_SB_RES_FDBLOCKS
:
361 * The allocation has already been applied to the
362 * in-core superblock's counter. This should only
363 * be applied to the on-disk superblock.
365 tp
->t_res_fdblocks_delta
+= delta
;
366 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
367 flags
&= ~XFS_TRANS_SB_DIRTY
;
369 case XFS_TRANS_SB_FREXTENTS
:
371 * Track the number of blocks allocated in the
372 * transaction. Make sure it does not exceed the
376 tp
->t_rtx_res_used
+= (uint
)-delta
;
377 ASSERT(tp
->t_rtx_res_used
<= tp
->t_rtx_res
);
379 tp
->t_frextents_delta
+= delta
;
381 case XFS_TRANS_SB_RES_FREXTENTS
:
383 * The allocation has already been applied to the
384 * in-core superblock's counter. This should only
385 * be applied to the on-disk superblock.
388 tp
->t_res_frextents_delta
+= delta
;
390 case XFS_TRANS_SB_DBLOCKS
:
392 tp
->t_dblocks_delta
+= delta
;
394 case XFS_TRANS_SB_AGCOUNT
:
396 tp
->t_agcount_delta
+= delta
;
398 case XFS_TRANS_SB_IMAXPCT
:
399 tp
->t_imaxpct_delta
+= delta
;
401 case XFS_TRANS_SB_REXTSIZE
:
402 tp
->t_rextsize_delta
+= delta
;
404 case XFS_TRANS_SB_RBMBLOCKS
:
405 tp
->t_rbmblocks_delta
+= delta
;
407 case XFS_TRANS_SB_RBLOCKS
:
408 tp
->t_rblocks_delta
+= delta
;
410 case XFS_TRANS_SB_REXTENTS
:
411 tp
->t_rextents_delta
+= delta
;
413 case XFS_TRANS_SB_REXTSLOG
:
414 tp
->t_rextslog_delta
+= delta
;
421 tp
->t_flags
|= flags
;
425 * xfs_trans_apply_sb_deltas() is called from the commit code
426 * to bring the superblock buffer into the current transaction
427 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
429 * For now we just look at each field allowed to change and change
433 xfs_trans_apply_sb_deltas(
440 bp
= xfs_trans_getsb(tp
, tp
->t_mountp
, 0);
441 sbp
= XFS_BUF_TO_SBP(bp
);
444 * Check that superblock mods match the mods made to AGF counters.
446 ASSERT((tp
->t_fdblocks_delta
+ tp
->t_res_fdblocks_delta
) ==
447 (tp
->t_ag_freeblks_delta
+ tp
->t_ag_flist_delta
+
448 tp
->t_ag_btree_delta
));
451 * Only update the superblock counters if we are logging them
453 if (!xfs_sb_version_haslazysbcount(&(tp
->t_mountp
->m_sb
))) {
454 if (tp
->t_icount_delta
)
455 be64_add_cpu(&sbp
->sb_icount
, tp
->t_icount_delta
);
456 if (tp
->t_ifree_delta
)
457 be64_add_cpu(&sbp
->sb_ifree
, tp
->t_ifree_delta
);
458 if (tp
->t_fdblocks_delta
)
459 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_fdblocks_delta
);
460 if (tp
->t_res_fdblocks_delta
)
461 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_res_fdblocks_delta
);
464 if (tp
->t_frextents_delta
)
465 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_frextents_delta
);
466 if (tp
->t_res_frextents_delta
)
467 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_res_frextents_delta
);
469 if (tp
->t_dblocks_delta
) {
470 be64_add_cpu(&sbp
->sb_dblocks
, tp
->t_dblocks_delta
);
473 if (tp
->t_agcount_delta
) {
474 be32_add_cpu(&sbp
->sb_agcount
, tp
->t_agcount_delta
);
477 if (tp
->t_imaxpct_delta
) {
478 sbp
->sb_imax_pct
+= tp
->t_imaxpct_delta
;
481 if (tp
->t_rextsize_delta
) {
482 be32_add_cpu(&sbp
->sb_rextsize
, tp
->t_rextsize_delta
);
485 if (tp
->t_rbmblocks_delta
) {
486 be32_add_cpu(&sbp
->sb_rbmblocks
, tp
->t_rbmblocks_delta
);
489 if (tp
->t_rblocks_delta
) {
490 be64_add_cpu(&sbp
->sb_rblocks
, tp
->t_rblocks_delta
);
493 if (tp
->t_rextents_delta
) {
494 be64_add_cpu(&sbp
->sb_rextents
, tp
->t_rextents_delta
);
497 if (tp
->t_rextslog_delta
) {
498 sbp
->sb_rextslog
+= tp
->t_rextslog_delta
;
502 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_SB_BUF
);
505 * Log the whole thing, the fields are noncontiguous.
507 xfs_trans_log_buf(tp
, bp
, 0, sizeof(xfs_dsb_t
) - 1);
510 * Since all the modifiable fields are contiguous, we
511 * can get away with this.
513 xfs_trans_log_buf(tp
, bp
, offsetof(xfs_dsb_t
, sb_icount
),
514 offsetof(xfs_dsb_t
, sb_frextents
) +
515 sizeof(sbp
->sb_frextents
) - 1);
523 int8_t counter
= *field
;
539 int32_t counter
= *field
;
555 int64_t counter
= *field
;
567 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
568 * and apply superblock counter changes to the in-core superblock. The
569 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
570 * applied to the in-core superblock. The idea is that that has already been
573 * If we are not logging superblock counters, then the inode allocated/free and
574 * used block counts are not updated in the on disk superblock. In this case,
575 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
576 * still need to update the incore superblock with the changes.
579 xfs_trans_unreserve_and_mod_sb(
580 struct xfs_trans
*tp
)
582 struct xfs_mount
*mp
= tp
->t_mountp
;
583 bool rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
584 int64_t blkdelta
= 0;
585 int64_t rtxdelta
= 0;
587 int64_t ifreedelta
= 0;
590 /* calculate deltas */
591 if (tp
->t_blk_res
> 0)
592 blkdelta
= tp
->t_blk_res
;
593 if ((tp
->t_fdblocks_delta
!= 0) &&
594 (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
595 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)))
596 blkdelta
+= tp
->t_fdblocks_delta
;
598 if (tp
->t_rtx_res
> 0)
599 rtxdelta
= tp
->t_rtx_res
;
600 if ((tp
->t_frextents_delta
!= 0) &&
601 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
))
602 rtxdelta
+= tp
->t_frextents_delta
;
604 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
605 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)) {
606 idelta
= tp
->t_icount_delta
;
607 ifreedelta
= tp
->t_ifree_delta
;
610 /* apply the per-cpu counters */
612 error
= xfs_mod_fdblocks(mp
, blkdelta
, rsvd
);
618 error
= xfs_mod_icount(mp
, idelta
);
620 goto out_undo_fdblocks
;
624 error
= xfs_mod_ifree(mp
, ifreedelta
);
626 goto out_undo_icount
;
629 if (rtxdelta
== 0 && !(tp
->t_flags
& XFS_TRANS_SB_DIRTY
))
632 /* apply remaining deltas */
633 spin_lock(&mp
->m_sb_lock
);
635 error
= xfs_sb_mod64(&mp
->m_sb
.sb_frextents
, rtxdelta
);
640 if (tp
->t_dblocks_delta
!= 0) {
641 error
= xfs_sb_mod64(&mp
->m_sb
.sb_dblocks
, tp
->t_dblocks_delta
);
643 goto out_undo_frextents
;
645 if (tp
->t_agcount_delta
!= 0) {
646 error
= xfs_sb_mod32(&mp
->m_sb
.sb_agcount
, tp
->t_agcount_delta
);
648 goto out_undo_dblocks
;
650 if (tp
->t_imaxpct_delta
!= 0) {
651 error
= xfs_sb_mod8(&mp
->m_sb
.sb_imax_pct
, tp
->t_imaxpct_delta
);
653 goto out_undo_agcount
;
655 if (tp
->t_rextsize_delta
!= 0) {
656 error
= xfs_sb_mod32(&mp
->m_sb
.sb_rextsize
,
657 tp
->t_rextsize_delta
);
659 goto out_undo_imaxpct
;
661 if (tp
->t_rbmblocks_delta
!= 0) {
662 error
= xfs_sb_mod32(&mp
->m_sb
.sb_rbmblocks
,
663 tp
->t_rbmblocks_delta
);
665 goto out_undo_rextsize
;
667 if (tp
->t_rblocks_delta
!= 0) {
668 error
= xfs_sb_mod64(&mp
->m_sb
.sb_rblocks
, tp
->t_rblocks_delta
);
670 goto out_undo_rbmblocks
;
672 if (tp
->t_rextents_delta
!= 0) {
673 error
= xfs_sb_mod64(&mp
->m_sb
.sb_rextents
,
674 tp
->t_rextents_delta
);
676 goto out_undo_rblocks
;
678 if (tp
->t_rextslog_delta
!= 0) {
679 error
= xfs_sb_mod8(&mp
->m_sb
.sb_rextslog
,
680 tp
->t_rextslog_delta
);
682 goto out_undo_rextents
;
684 spin_unlock(&mp
->m_sb_lock
);
688 if (tp
->t_rextents_delta
)
689 xfs_sb_mod64(&mp
->m_sb
.sb_rextents
, -tp
->t_rextents_delta
);
691 if (tp
->t_rblocks_delta
)
692 xfs_sb_mod64(&mp
->m_sb
.sb_rblocks
, -tp
->t_rblocks_delta
);
694 if (tp
->t_rbmblocks_delta
)
695 xfs_sb_mod32(&mp
->m_sb
.sb_rbmblocks
, -tp
->t_rbmblocks_delta
);
697 if (tp
->t_rextsize_delta
)
698 xfs_sb_mod32(&mp
->m_sb
.sb_rextsize
, -tp
->t_rextsize_delta
);
700 if (tp
->t_rextsize_delta
)
701 xfs_sb_mod8(&mp
->m_sb
.sb_imax_pct
, -tp
->t_imaxpct_delta
);
703 if (tp
->t_agcount_delta
)
704 xfs_sb_mod32(&mp
->m_sb
.sb_agcount
, -tp
->t_agcount_delta
);
706 if (tp
->t_dblocks_delta
)
707 xfs_sb_mod64(&mp
->m_sb
.sb_dblocks
, -tp
->t_dblocks_delta
);
710 xfs_sb_mod64(&mp
->m_sb
.sb_frextents
, -rtxdelta
);
712 spin_unlock(&mp
->m_sb_lock
);
714 xfs_mod_ifree(mp
, -ifreedelta
);
717 xfs_mod_icount(mp
, -idelta
);
720 xfs_mod_fdblocks(mp
, -blkdelta
, rsvd
);
727 * Add the given log item to the transaction's list of log items.
729 * The log item will now point to its new descriptor with its li_desc field.
733 struct xfs_trans
*tp
,
734 struct xfs_log_item
*lip
)
736 struct xfs_log_item_desc
*lidp
;
738 ASSERT(lip
->li_mountp
== tp
->t_mountp
);
739 ASSERT(lip
->li_ailp
== tp
->t_mountp
->m_ail
);
741 lidp
= kmem_zone_zalloc(xfs_log_item_desc_zone
, KM_SLEEP
| KM_NOFS
);
743 lidp
->lid_item
= lip
;
745 list_add_tail(&lidp
->lid_trans
, &tp
->t_items
);
751 xfs_trans_free_item_desc(
752 struct xfs_log_item_desc
*lidp
)
754 list_del_init(&lidp
->lid_trans
);
755 kmem_zone_free(xfs_log_item_desc_zone
, lidp
);
759 * Unlink and free the given descriptor.
763 struct xfs_log_item
*lip
)
765 xfs_trans_free_item_desc(lip
->li_desc
);
770 * Unlock all of the items of a transaction and free all the descriptors
771 * of that transaction.
774 xfs_trans_free_items(
775 struct xfs_trans
*tp
,
776 xfs_lsn_t commit_lsn
,
779 struct xfs_log_item_desc
*lidp
, *next
;
781 list_for_each_entry_safe(lidp
, next
, &tp
->t_items
, lid_trans
) {
782 struct xfs_log_item
*lip
= lidp
->lid_item
;
786 if (commit_lsn
!= NULLCOMMITLSN
)
787 lip
->li_ops
->iop_committing(lip
, commit_lsn
);
789 lip
->li_flags
|= XFS_LI_ABORTED
;
790 lip
->li_ops
->iop_unlock(lip
);
792 xfs_trans_free_item_desc(lidp
);
797 xfs_log_item_batch_insert(
798 struct xfs_ail
*ailp
,
799 struct xfs_ail_cursor
*cur
,
800 struct xfs_log_item
**log_items
,
802 xfs_lsn_t commit_lsn
)
806 spin_lock(&ailp
->xa_lock
);
807 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
808 xfs_trans_ail_update_bulk(ailp
, cur
, log_items
, nr_items
, commit_lsn
);
810 for (i
= 0; i
< nr_items
; i
++) {
811 struct xfs_log_item
*lip
= log_items
[i
];
813 lip
->li_ops
->iop_unpin(lip
, 0);
818 * Bulk operation version of xfs_trans_committed that takes a log vector of
819 * items to insert into the AIL. This uses bulk AIL insertion techniques to
820 * minimise lock traffic.
822 * If we are called with the aborted flag set, it is because a log write during
823 * a CIL checkpoint commit has failed. In this case, all the items in the
824 * checkpoint have already gone through iop_commited and iop_unlock, which
825 * means that checkpoint commit abort handling is treated exactly the same
826 * as an iclog write error even though we haven't started any IO yet. Hence in
827 * this case all we need to do is iop_committed processing, followed by an
828 * iop_unpin(aborted) call.
830 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
831 * at the end of the AIL, the insert cursor avoids the need to walk
832 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
833 * call. This saves a lot of needless list walking and is a net win, even
834 * though it slightly increases that amount of AIL lock traffic to set it up
838 xfs_trans_committed_bulk(
839 struct xfs_ail
*ailp
,
840 struct xfs_log_vec
*log_vector
,
841 xfs_lsn_t commit_lsn
,
844 #define LOG_ITEM_BATCH_SIZE 32
845 struct xfs_log_item
*log_items
[LOG_ITEM_BATCH_SIZE
];
846 struct xfs_log_vec
*lv
;
847 struct xfs_ail_cursor cur
;
850 spin_lock(&ailp
->xa_lock
);
851 xfs_trans_ail_cursor_last(ailp
, &cur
, commit_lsn
);
852 spin_unlock(&ailp
->xa_lock
);
854 /* unpin all the log items */
855 for (lv
= log_vector
; lv
; lv
= lv
->lv_next
) {
856 struct xfs_log_item
*lip
= lv
->lv_item
;
860 lip
->li_flags
|= XFS_LI_ABORTED
;
861 item_lsn
= lip
->li_ops
->iop_committed(lip
, commit_lsn
);
863 /* item_lsn of -1 means the item needs no further processing */
864 if (XFS_LSN_CMP(item_lsn
, (xfs_lsn_t
)-1) == 0)
868 * if we are aborting the operation, no point in inserting the
869 * object into the AIL as we are in a shutdown situation.
872 ASSERT(XFS_FORCED_SHUTDOWN(ailp
->xa_mount
));
873 lip
->li_ops
->iop_unpin(lip
, 1);
877 if (item_lsn
!= commit_lsn
) {
880 * Not a bulk update option due to unusual item_lsn.
881 * Push into AIL immediately, rechecking the lsn once
882 * we have the ail lock. Then unpin the item. This does
883 * not affect the AIL cursor the bulk insert path is
886 spin_lock(&ailp
->xa_lock
);
887 if (XFS_LSN_CMP(item_lsn
, lip
->li_lsn
) > 0)
888 xfs_trans_ail_update(ailp
, lip
, item_lsn
);
890 spin_unlock(&ailp
->xa_lock
);
891 lip
->li_ops
->iop_unpin(lip
, 0);
895 /* Item is a candidate for bulk AIL insert. */
896 log_items
[i
++] = lv
->lv_item
;
897 if (i
>= LOG_ITEM_BATCH_SIZE
) {
898 xfs_log_item_batch_insert(ailp
, &cur
, log_items
,
899 LOG_ITEM_BATCH_SIZE
, commit_lsn
);
904 /* make sure we insert the remainder! */
906 xfs_log_item_batch_insert(ailp
, &cur
, log_items
, i
, commit_lsn
);
908 spin_lock(&ailp
->xa_lock
);
909 xfs_trans_ail_cursor_done(&cur
);
910 spin_unlock(&ailp
->xa_lock
);
914 * Commit the given transaction to the log.
916 * XFS disk error handling mechanism is not based on a typical
917 * transaction abort mechanism. Logically after the filesystem
918 * gets marked 'SHUTDOWN', we can't let any new transactions
919 * be durable - ie. committed to disk - because some metadata might
920 * be inconsistent. In such cases, this returns an error, and the
921 * caller may assume that all locked objects joined to the transaction
922 * have already been unlocked as if the commit had succeeded.
923 * Do not reference the transaction structure after this call.
927 struct xfs_trans
*tp
,
930 struct xfs_mount
*mp
= tp
->t_mountp
;
931 xfs_lsn_t commit_lsn
= -1;
933 int sync
= tp
->t_flags
& XFS_TRANS_SYNC
;
936 * If there is nothing to be logged by the transaction,
937 * then unlock all of the items associated with the
938 * transaction and free the transaction structure.
939 * Also make sure to return any reserved blocks to
942 if (!(tp
->t_flags
& XFS_TRANS_DIRTY
))
945 if (XFS_FORCED_SHUTDOWN(mp
)) {
950 ASSERT(tp
->t_ticket
!= NULL
);
953 * If we need to update the superblock, then do it now.
955 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)
956 xfs_trans_apply_sb_deltas(tp
);
957 xfs_trans_apply_dquot_deltas(tp
);
959 xfs_log_commit_cil(mp
, tp
, &commit_lsn
, regrant
);
961 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
965 * If the transaction needs to be synchronous, then force the
966 * log out now and wait for it.
969 error
= _xfs_log_force_lsn(mp
, commit_lsn
, XFS_LOG_SYNC
, NULL
);
970 XFS_STATS_INC(mp
, xs_trans_sync
);
972 XFS_STATS_INC(mp
, xs_trans_async
);
978 xfs_trans_unreserve_and_mod_sb(tp
);
981 * It is indeed possible for the transaction to be not dirty but
982 * the dqinfo portion to be. All that means is that we have some
983 * (non-persistent) quota reservations that need to be unreserved.
985 xfs_trans_unreserve_and_mod_dquots(tp
);
987 commit_lsn
= xfs_log_done(mp
, tp
->t_ticket
, NULL
, regrant
);
988 if (commit_lsn
== -1 && !error
)
991 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
992 xfs_trans_free_items(tp
, NULLCOMMITLSN
, !!error
);
995 XFS_STATS_INC(mp
, xs_trans_empty
);
1001 struct xfs_trans
*tp
)
1003 return __xfs_trans_commit(tp
, false);
1007 * Unlock all of the transaction's items and free the transaction.
1008 * The transaction must not have modified any of its items, because
1009 * there is no way to restore them to their previous state.
1011 * If the transaction has made a log reservation, make sure to release
1016 struct xfs_trans
*tp
)
1018 struct xfs_mount
*mp
= tp
->t_mountp
;
1019 bool dirty
= (tp
->t_flags
& XFS_TRANS_DIRTY
);
1022 * See if the caller is relying on us to shut down the
1023 * filesystem. This happens in paths where we detect
1024 * corruption and decide to give up.
1026 if (dirty
&& !XFS_FORCED_SHUTDOWN(mp
)) {
1027 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW
, mp
);
1028 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1031 if (!dirty
&& !XFS_FORCED_SHUTDOWN(mp
)) {
1032 struct xfs_log_item_desc
*lidp
;
1034 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
)
1035 ASSERT(!(lidp
->lid_item
->li_type
== XFS_LI_EFD
));
1038 xfs_trans_unreserve_and_mod_sb(tp
);
1039 xfs_trans_unreserve_and_mod_dquots(tp
);
1042 xfs_log_done(mp
, tp
->t_ticket
, NULL
, false);
1044 /* mark this thread as no longer being in a transaction */
1045 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
1047 xfs_trans_free_items(tp
, NULLCOMMITLSN
, dirty
);
1052 * Roll from one trans in the sequence of PERMANENT transactions to
1053 * the next: permanent transactions are only flushed out when
1054 * committed with xfs_trans_commit(), but we still want as soon
1055 * as possible to let chunks of it go to the log. So we commit the
1056 * chunk we've been working on and get a new transaction to continue.
1060 struct xfs_trans
**tpp
)
1062 struct xfs_trans
*trans
= *tpp
;
1063 struct xfs_trans_res tres
;
1067 * Copy the critical parameters from one trans to the next.
1069 tres
.tr_logres
= trans
->t_log_res
;
1070 tres
.tr_logcount
= trans
->t_log_count
;
1072 *tpp
= xfs_trans_dup(trans
);
1075 * Commit the current transaction.
1076 * If this commit failed, then it'd just unlock those items that
1077 * are not marked ihold. That also means that a filesystem shutdown
1078 * is in progress. The caller takes the responsibility to cancel
1079 * the duplicate transaction that gets returned.
1081 error
= __xfs_trans_commit(trans
, true);
1086 * Reserve space in the log for the next transaction.
1087 * This also pushes items in the "AIL", the list of logged items,
1088 * out to disk if they are taking up space at the tail of the log
1089 * that we want to use. This requires that either nothing be locked
1090 * across this call, or that anything that is locked be logged in
1091 * the prior and the next transactions.
1093 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
1094 return xfs_trans_reserve(*tpp
, &tres
, 0, 0);