gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / fs / xfs / xfs_trans.c
blob28b983ff8b113baaa8f84775ea99fc09c6bc26ef
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_log_priv.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_extent_busy.h"
16 #include "xfs_quota.h"
17 #include "xfs_trans.h"
18 #include "xfs_trans_priv.h"
19 #include "xfs_log.h"
20 #include "xfs_trace.h"
21 #include "xfs_error.h"
22 #include "xfs_defer.h"
24 kmem_zone_t *xfs_trans_zone;
26 #if defined(CONFIG_TRACEPOINTS)
27 static void
28 xfs_trans_trace_reservations(
29 struct xfs_mount *mp)
31 struct xfs_trans_res resv;
32 struct xfs_trans_res *res;
33 struct xfs_trans_res *end_res;
34 int i;
36 res = (struct xfs_trans_res *)M_RES(mp);
37 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
38 for (i = 0; res < end_res; i++, res++)
39 trace_xfs_trans_resv_calc(mp, i, res);
40 xfs_log_get_max_trans_res(mp, &resv);
41 trace_xfs_trans_resv_calc(mp, -1, &resv);
43 #else
44 # define xfs_trans_trace_reservations(mp)
45 #endif
48 * Initialize the precomputed transaction reservation values
49 * in the mount structure.
51 void
52 xfs_trans_init(
53 struct xfs_mount *mp)
55 xfs_trans_resv_calc(mp, M_RES(mp));
56 xfs_trans_trace_reservations(mp);
60 * Free the transaction structure. If there is more clean up
61 * to do when the structure is freed, add it here.
63 STATIC void
64 xfs_trans_free(
65 struct xfs_trans *tp)
67 xfs_extent_busy_sort(&tp->t_busy);
68 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
70 trace_xfs_trans_free(tp, _RET_IP_);
71 atomic_dec(&tp->t_mountp->m_active_trans);
72 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
73 sb_end_intwrite(tp->t_mountp->m_super);
74 xfs_trans_free_dqinfo(tp);
75 kmem_cache_free(xfs_trans_zone, tp);
79 * This is called to create a new transaction which will share the
80 * permanent log reservation of the given transaction. The remaining
81 * unused block and rt extent reservations are also inherited. This
82 * implies that the original transaction is no longer allowed to allocate
83 * blocks. Locks and log items, however, are no inherited. They must
84 * be added to the new transaction explicitly.
86 STATIC struct xfs_trans *
87 xfs_trans_dup(
88 struct xfs_trans *tp)
90 struct xfs_trans *ntp;
92 trace_xfs_trans_dup(tp, _RET_IP_);
94 ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
97 * Initialize the new transaction structure.
99 ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
100 ntp->t_mountp = tp->t_mountp;
101 INIT_LIST_HEAD(&ntp->t_items);
102 INIT_LIST_HEAD(&ntp->t_busy);
103 INIT_LIST_HEAD(&ntp->t_dfops);
104 ntp->t_firstblock = NULLFSBLOCK;
106 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
107 ASSERT(tp->t_ticket != NULL);
109 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
110 (tp->t_flags & XFS_TRANS_RESERVE) |
111 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
112 /* We gave our writer reference to the new transaction */
113 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
114 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
116 ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
117 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
118 tp->t_blk_res = tp->t_blk_res_used;
120 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
121 tp->t_rtx_res = tp->t_rtx_res_used;
122 ntp->t_pflags = tp->t_pflags;
124 /* move deferred ops over to the new tp */
125 xfs_defer_move(ntp, tp);
127 xfs_trans_dup_dqinfo(tp, ntp);
129 atomic_inc(&tp->t_mountp->m_active_trans);
130 return ntp;
134 * This is called to reserve free disk blocks and log space for the
135 * given transaction. This must be done before allocating any resources
136 * within the transaction.
138 * This will return ENOSPC if there are not enough blocks available.
139 * It will sleep waiting for available log space.
140 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
141 * is used by long running transactions. If any one of the reservations
142 * fails then they will all be backed out.
144 * This does not do quota reservations. That typically is done by the
145 * caller afterwards.
147 static int
148 xfs_trans_reserve(
149 struct xfs_trans *tp,
150 struct xfs_trans_res *resp,
151 uint blocks,
152 uint rtextents)
154 struct xfs_mount *mp = tp->t_mountp;
155 int error = 0;
156 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
158 /* Mark this thread as being in a transaction */
159 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
162 * Attempt to reserve the needed disk blocks by decrementing
163 * the number needed from the number available. This will
164 * fail if the count would go below zero.
166 if (blocks > 0) {
167 error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
168 if (error != 0) {
169 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
170 return -ENOSPC;
172 tp->t_blk_res += blocks;
176 * Reserve the log space needed for this transaction.
178 if (resp->tr_logres > 0) {
179 bool permanent = false;
181 ASSERT(tp->t_log_res == 0 ||
182 tp->t_log_res == resp->tr_logres);
183 ASSERT(tp->t_log_count == 0 ||
184 tp->t_log_count == resp->tr_logcount);
186 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
187 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
188 permanent = true;
189 } else {
190 ASSERT(tp->t_ticket == NULL);
191 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
194 if (tp->t_ticket != NULL) {
195 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
196 error = xfs_log_regrant(mp, tp->t_ticket);
197 } else {
198 error = xfs_log_reserve(mp,
199 resp->tr_logres,
200 resp->tr_logcount,
201 &tp->t_ticket, XFS_TRANSACTION,
202 permanent);
205 if (error)
206 goto undo_blocks;
208 tp->t_log_res = resp->tr_logres;
209 tp->t_log_count = resp->tr_logcount;
213 * Attempt to reserve the needed realtime extents by decrementing
214 * the number needed from the number available. This will
215 * fail if the count would go below zero.
217 if (rtextents > 0) {
218 error = xfs_mod_frextents(mp, -((int64_t)rtextents));
219 if (error) {
220 error = -ENOSPC;
221 goto undo_log;
223 tp->t_rtx_res += rtextents;
226 return 0;
229 * Error cases jump to one of these labels to undo any
230 * reservations which have already been performed.
232 undo_log:
233 if (resp->tr_logres > 0) {
234 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
235 tp->t_ticket = NULL;
236 tp->t_log_res = 0;
237 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
240 undo_blocks:
241 if (blocks > 0) {
242 xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
243 tp->t_blk_res = 0;
246 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
248 return error;
252 xfs_trans_alloc(
253 struct xfs_mount *mp,
254 struct xfs_trans_res *resp,
255 uint blocks,
256 uint rtextents,
257 uint flags,
258 struct xfs_trans **tpp)
260 struct xfs_trans *tp;
261 int error;
264 * Allocate the handle before we do our freeze accounting and setting up
265 * GFP_NOFS allocation context so that we avoid lockdep false positives
266 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
268 tp = kmem_zone_zalloc(xfs_trans_zone, 0);
269 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
270 sb_start_intwrite(mp->m_super);
273 * Zero-reservation ("empty") transactions can't modify anything, so
274 * they're allowed to run while we're frozen.
276 WARN_ON(resp->tr_logres > 0 &&
277 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
278 atomic_inc(&mp->m_active_trans);
280 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
281 tp->t_flags = flags;
282 tp->t_mountp = mp;
283 INIT_LIST_HEAD(&tp->t_items);
284 INIT_LIST_HEAD(&tp->t_busy);
285 INIT_LIST_HEAD(&tp->t_dfops);
286 tp->t_firstblock = NULLFSBLOCK;
288 error = xfs_trans_reserve(tp, resp, blocks, rtextents);
289 if (error) {
290 xfs_trans_cancel(tp);
291 return error;
294 trace_xfs_trans_alloc(tp, _RET_IP_);
296 *tpp = tp;
297 return 0;
301 * Create an empty transaction with no reservation. This is a defensive
302 * mechanism for routines that query metadata without actually modifying
303 * them -- if the metadata being queried is somehow cross-linked (think a
304 * btree block pointer that points higher in the tree), we risk deadlock.
305 * However, blocks grabbed as part of a transaction can be re-grabbed.
306 * The verifiers will notice the corrupt block and the operation will fail
307 * back to userspace without deadlocking.
309 * Note the zero-length reservation; this transaction MUST be cancelled
310 * without any dirty data.
312 * Callers should obtain freeze protection to avoid two conflicts with fs
313 * freezing: (1) having active transactions trip the m_active_trans ASSERTs;
314 * and (2) grabbing buffers at the same time that freeze is trying to drain
315 * the buffer LRU list.
318 xfs_trans_alloc_empty(
319 struct xfs_mount *mp,
320 struct xfs_trans **tpp)
322 struct xfs_trans_res resv = {0};
324 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
328 * Record the indicated change to the given field for application
329 * to the file system's superblock when the transaction commits.
330 * For now, just store the change in the transaction structure.
332 * Mark the transaction structure to indicate that the superblock
333 * needs to be updated before committing.
335 * Because we may not be keeping track of allocated/free inodes and
336 * used filesystem blocks in the superblock, we do not mark the
337 * superblock dirty in this transaction if we modify these fields.
338 * We still need to update the transaction deltas so that they get
339 * applied to the incore superblock, but we don't want them to
340 * cause the superblock to get locked and logged if these are the
341 * only fields in the superblock that the transaction modifies.
343 void
344 xfs_trans_mod_sb(
345 xfs_trans_t *tp,
346 uint field,
347 int64_t delta)
349 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
350 xfs_mount_t *mp = tp->t_mountp;
352 switch (field) {
353 case XFS_TRANS_SB_ICOUNT:
354 tp->t_icount_delta += delta;
355 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
356 flags &= ~XFS_TRANS_SB_DIRTY;
357 break;
358 case XFS_TRANS_SB_IFREE:
359 tp->t_ifree_delta += delta;
360 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
361 flags &= ~XFS_TRANS_SB_DIRTY;
362 break;
363 case XFS_TRANS_SB_FDBLOCKS:
365 * Track the number of blocks allocated in the transaction.
366 * Make sure it does not exceed the number reserved. If so,
367 * shutdown as this can lead to accounting inconsistency.
369 if (delta < 0) {
370 tp->t_blk_res_used += (uint)-delta;
371 if (tp->t_blk_res_used > tp->t_blk_res)
372 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
374 tp->t_fdblocks_delta += delta;
375 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
376 flags &= ~XFS_TRANS_SB_DIRTY;
377 break;
378 case XFS_TRANS_SB_RES_FDBLOCKS:
380 * The allocation has already been applied to the
381 * in-core superblock's counter. This should only
382 * be applied to the on-disk superblock.
384 tp->t_res_fdblocks_delta += delta;
385 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
386 flags &= ~XFS_TRANS_SB_DIRTY;
387 break;
388 case XFS_TRANS_SB_FREXTENTS:
390 * Track the number of blocks allocated in the
391 * transaction. Make sure it does not exceed the
392 * number reserved.
394 if (delta < 0) {
395 tp->t_rtx_res_used += (uint)-delta;
396 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
398 tp->t_frextents_delta += delta;
399 break;
400 case XFS_TRANS_SB_RES_FREXTENTS:
402 * The allocation has already been applied to the
403 * in-core superblock's counter. This should only
404 * be applied to the on-disk superblock.
406 ASSERT(delta < 0);
407 tp->t_res_frextents_delta += delta;
408 break;
409 case XFS_TRANS_SB_DBLOCKS:
410 ASSERT(delta > 0);
411 tp->t_dblocks_delta += delta;
412 break;
413 case XFS_TRANS_SB_AGCOUNT:
414 ASSERT(delta > 0);
415 tp->t_agcount_delta += delta;
416 break;
417 case XFS_TRANS_SB_IMAXPCT:
418 tp->t_imaxpct_delta += delta;
419 break;
420 case XFS_TRANS_SB_REXTSIZE:
421 tp->t_rextsize_delta += delta;
422 break;
423 case XFS_TRANS_SB_RBMBLOCKS:
424 tp->t_rbmblocks_delta += delta;
425 break;
426 case XFS_TRANS_SB_RBLOCKS:
427 tp->t_rblocks_delta += delta;
428 break;
429 case XFS_TRANS_SB_REXTENTS:
430 tp->t_rextents_delta += delta;
431 break;
432 case XFS_TRANS_SB_REXTSLOG:
433 tp->t_rextslog_delta += delta;
434 break;
435 default:
436 ASSERT(0);
437 return;
440 tp->t_flags |= flags;
444 * xfs_trans_apply_sb_deltas() is called from the commit code
445 * to bring the superblock buffer into the current transaction
446 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
448 * For now we just look at each field allowed to change and change
449 * it if necessary.
451 STATIC void
452 xfs_trans_apply_sb_deltas(
453 xfs_trans_t *tp)
455 xfs_dsb_t *sbp;
456 xfs_buf_t *bp;
457 int whole = 0;
459 bp = xfs_trans_getsb(tp, tp->t_mountp);
460 sbp = bp->b_addr;
463 * Check that superblock mods match the mods made to AGF counters.
465 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
466 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
467 tp->t_ag_btree_delta));
470 * Only update the superblock counters if we are logging them
472 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
473 if (tp->t_icount_delta)
474 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
475 if (tp->t_ifree_delta)
476 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
477 if (tp->t_fdblocks_delta)
478 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
479 if (tp->t_res_fdblocks_delta)
480 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
483 if (tp->t_frextents_delta)
484 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
485 if (tp->t_res_frextents_delta)
486 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
488 if (tp->t_dblocks_delta) {
489 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
490 whole = 1;
492 if (tp->t_agcount_delta) {
493 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
494 whole = 1;
496 if (tp->t_imaxpct_delta) {
497 sbp->sb_imax_pct += tp->t_imaxpct_delta;
498 whole = 1;
500 if (tp->t_rextsize_delta) {
501 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
502 whole = 1;
504 if (tp->t_rbmblocks_delta) {
505 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
506 whole = 1;
508 if (tp->t_rblocks_delta) {
509 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
510 whole = 1;
512 if (tp->t_rextents_delta) {
513 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
514 whole = 1;
516 if (tp->t_rextslog_delta) {
517 sbp->sb_rextslog += tp->t_rextslog_delta;
518 whole = 1;
521 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
522 if (whole)
524 * Log the whole thing, the fields are noncontiguous.
526 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
527 else
529 * Since all the modifiable fields are contiguous, we
530 * can get away with this.
532 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
533 offsetof(xfs_dsb_t, sb_frextents) +
534 sizeof(sbp->sb_frextents) - 1);
537 STATIC int
538 xfs_sb_mod8(
539 uint8_t *field,
540 int8_t delta)
542 int8_t counter = *field;
544 counter += delta;
545 if (counter < 0) {
546 ASSERT(0);
547 return -EINVAL;
549 *field = counter;
550 return 0;
553 STATIC int
554 xfs_sb_mod32(
555 uint32_t *field,
556 int32_t delta)
558 int32_t counter = *field;
560 counter += delta;
561 if (counter < 0) {
562 ASSERT(0);
563 return -EINVAL;
565 *field = counter;
566 return 0;
569 STATIC int
570 xfs_sb_mod64(
571 uint64_t *field,
572 int64_t delta)
574 int64_t counter = *field;
576 counter += delta;
577 if (counter < 0) {
578 ASSERT(0);
579 return -EINVAL;
581 *field = counter;
582 return 0;
586 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
587 * and apply superblock counter changes to the in-core superblock. The
588 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
589 * applied to the in-core superblock. The idea is that that has already been
590 * done.
592 * If we are not logging superblock counters, then the inode allocated/free and
593 * used block counts are not updated in the on disk superblock. In this case,
594 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
595 * still need to update the incore superblock with the changes.
597 void
598 xfs_trans_unreserve_and_mod_sb(
599 struct xfs_trans *tp)
601 struct xfs_mount *mp = tp->t_mountp;
602 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
603 int64_t blkdelta = 0;
604 int64_t rtxdelta = 0;
605 int64_t idelta = 0;
606 int64_t ifreedelta = 0;
607 int error;
609 /* calculate deltas */
610 if (tp->t_blk_res > 0)
611 blkdelta = tp->t_blk_res;
612 if ((tp->t_fdblocks_delta != 0) &&
613 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
614 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
615 blkdelta += tp->t_fdblocks_delta;
617 if (tp->t_rtx_res > 0)
618 rtxdelta = tp->t_rtx_res;
619 if ((tp->t_frextents_delta != 0) &&
620 (tp->t_flags & XFS_TRANS_SB_DIRTY))
621 rtxdelta += tp->t_frextents_delta;
623 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
624 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
625 idelta = tp->t_icount_delta;
626 ifreedelta = tp->t_ifree_delta;
629 /* apply the per-cpu counters */
630 if (blkdelta) {
631 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
632 if (error)
633 goto out;
636 if (idelta) {
637 error = xfs_mod_icount(mp, idelta);
638 if (error)
639 goto out_undo_fdblocks;
642 if (ifreedelta) {
643 error = xfs_mod_ifree(mp, ifreedelta);
644 if (error)
645 goto out_undo_icount;
648 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
649 return;
651 /* apply remaining deltas */
652 spin_lock(&mp->m_sb_lock);
653 if (rtxdelta) {
654 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
655 if (error)
656 goto out_undo_ifree;
659 if (tp->t_dblocks_delta != 0) {
660 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
661 if (error)
662 goto out_undo_frextents;
664 if (tp->t_agcount_delta != 0) {
665 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
666 if (error)
667 goto out_undo_dblocks;
669 if (tp->t_imaxpct_delta != 0) {
670 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
671 if (error)
672 goto out_undo_agcount;
674 if (tp->t_rextsize_delta != 0) {
675 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
676 tp->t_rextsize_delta);
677 if (error)
678 goto out_undo_imaxpct;
680 if (tp->t_rbmblocks_delta != 0) {
681 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
682 tp->t_rbmblocks_delta);
683 if (error)
684 goto out_undo_rextsize;
686 if (tp->t_rblocks_delta != 0) {
687 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
688 if (error)
689 goto out_undo_rbmblocks;
691 if (tp->t_rextents_delta != 0) {
692 error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
693 tp->t_rextents_delta);
694 if (error)
695 goto out_undo_rblocks;
697 if (tp->t_rextslog_delta != 0) {
698 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
699 tp->t_rextslog_delta);
700 if (error)
701 goto out_undo_rextents;
703 spin_unlock(&mp->m_sb_lock);
704 return;
706 out_undo_rextents:
707 if (tp->t_rextents_delta)
708 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
709 out_undo_rblocks:
710 if (tp->t_rblocks_delta)
711 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
712 out_undo_rbmblocks:
713 if (tp->t_rbmblocks_delta)
714 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
715 out_undo_rextsize:
716 if (tp->t_rextsize_delta)
717 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
718 out_undo_imaxpct:
719 if (tp->t_rextsize_delta)
720 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
721 out_undo_agcount:
722 if (tp->t_agcount_delta)
723 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
724 out_undo_dblocks:
725 if (tp->t_dblocks_delta)
726 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
727 out_undo_frextents:
728 if (rtxdelta)
729 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
730 out_undo_ifree:
731 spin_unlock(&mp->m_sb_lock);
732 if (ifreedelta)
733 xfs_mod_ifree(mp, -ifreedelta);
734 out_undo_icount:
735 if (idelta)
736 xfs_mod_icount(mp, -idelta);
737 out_undo_fdblocks:
738 if (blkdelta)
739 xfs_mod_fdblocks(mp, -blkdelta, rsvd);
740 out:
741 ASSERT(error == 0);
742 return;
745 /* Add the given log item to the transaction's list of log items. */
746 void
747 xfs_trans_add_item(
748 struct xfs_trans *tp,
749 struct xfs_log_item *lip)
751 ASSERT(lip->li_mountp == tp->t_mountp);
752 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
753 ASSERT(list_empty(&lip->li_trans));
754 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
756 list_add_tail(&lip->li_trans, &tp->t_items);
757 trace_xfs_trans_add_item(tp, _RET_IP_);
761 * Unlink the log item from the transaction. the log item is no longer
762 * considered dirty in this transaction, as the linked transaction has
763 * finished, either by abort or commit completion.
765 void
766 xfs_trans_del_item(
767 struct xfs_log_item *lip)
769 clear_bit(XFS_LI_DIRTY, &lip->li_flags);
770 list_del_init(&lip->li_trans);
773 /* Detach and unlock all of the items in a transaction */
774 static void
775 xfs_trans_free_items(
776 struct xfs_trans *tp,
777 bool abort)
779 struct xfs_log_item *lip, *next;
781 trace_xfs_trans_free_items(tp, _RET_IP_);
783 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
784 xfs_trans_del_item(lip);
785 if (abort)
786 set_bit(XFS_LI_ABORTED, &lip->li_flags);
787 if (lip->li_ops->iop_release)
788 lip->li_ops->iop_release(lip);
792 static inline void
793 xfs_log_item_batch_insert(
794 struct xfs_ail *ailp,
795 struct xfs_ail_cursor *cur,
796 struct xfs_log_item **log_items,
797 int nr_items,
798 xfs_lsn_t commit_lsn)
800 int i;
802 spin_lock(&ailp->ail_lock);
803 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
804 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
806 for (i = 0; i < nr_items; i++) {
807 struct xfs_log_item *lip = log_items[i];
809 if (lip->li_ops->iop_unpin)
810 lip->li_ops->iop_unpin(lip, 0);
815 * Bulk operation version of xfs_trans_committed that takes a log vector of
816 * items to insert into the AIL. This uses bulk AIL insertion techniques to
817 * minimise lock traffic.
819 * If we are called with the aborted flag set, it is because a log write during
820 * a CIL checkpoint commit has failed. In this case, all the items in the
821 * checkpoint have already gone through iop_committed and iop_committing, which
822 * means that checkpoint commit abort handling is treated exactly the same
823 * as an iclog write error even though we haven't started any IO yet. Hence in
824 * this case all we need to do is iop_committed processing, followed by an
825 * iop_unpin(aborted) call.
827 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
828 * at the end of the AIL, the insert cursor avoids the need to walk
829 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
830 * call. This saves a lot of needless list walking and is a net win, even
831 * though it slightly increases that amount of AIL lock traffic to set it up
832 * and tear it down.
834 void
835 xfs_trans_committed_bulk(
836 struct xfs_ail *ailp,
837 struct xfs_log_vec *log_vector,
838 xfs_lsn_t commit_lsn,
839 bool aborted)
841 #define LOG_ITEM_BATCH_SIZE 32
842 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
843 struct xfs_log_vec *lv;
844 struct xfs_ail_cursor cur;
845 int i = 0;
847 spin_lock(&ailp->ail_lock);
848 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
849 spin_unlock(&ailp->ail_lock);
851 /* unpin all the log items */
852 for (lv = log_vector; lv; lv = lv->lv_next ) {
853 struct xfs_log_item *lip = lv->lv_item;
854 xfs_lsn_t item_lsn;
856 if (aborted)
857 set_bit(XFS_LI_ABORTED, &lip->li_flags);
859 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
860 lip->li_ops->iop_release(lip);
861 continue;
864 if (lip->li_ops->iop_committed)
865 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
866 else
867 item_lsn = commit_lsn;
869 /* item_lsn of -1 means the item needs no further processing */
870 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
871 continue;
874 * if we are aborting the operation, no point in inserting the
875 * object into the AIL as we are in a shutdown situation.
877 if (aborted) {
878 ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
879 if (lip->li_ops->iop_unpin)
880 lip->li_ops->iop_unpin(lip, 1);
881 continue;
884 if (item_lsn != commit_lsn) {
887 * Not a bulk update option due to unusual item_lsn.
888 * Push into AIL immediately, rechecking the lsn once
889 * we have the ail lock. Then unpin the item. This does
890 * not affect the AIL cursor the bulk insert path is
891 * using.
893 spin_lock(&ailp->ail_lock);
894 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
895 xfs_trans_ail_update(ailp, lip, item_lsn);
896 else
897 spin_unlock(&ailp->ail_lock);
898 if (lip->li_ops->iop_unpin)
899 lip->li_ops->iop_unpin(lip, 0);
900 continue;
903 /* Item is a candidate for bulk AIL insert. */
904 log_items[i++] = lv->lv_item;
905 if (i >= LOG_ITEM_BATCH_SIZE) {
906 xfs_log_item_batch_insert(ailp, &cur, log_items,
907 LOG_ITEM_BATCH_SIZE, commit_lsn);
908 i = 0;
912 /* make sure we insert the remainder! */
913 if (i)
914 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
916 spin_lock(&ailp->ail_lock);
917 xfs_trans_ail_cursor_done(&cur);
918 spin_unlock(&ailp->ail_lock);
922 * Commit the given transaction to the log.
924 * XFS disk error handling mechanism is not based on a typical
925 * transaction abort mechanism. Logically after the filesystem
926 * gets marked 'SHUTDOWN', we can't let any new transactions
927 * be durable - ie. committed to disk - because some metadata might
928 * be inconsistent. In such cases, this returns an error, and the
929 * caller may assume that all locked objects joined to the transaction
930 * have already been unlocked as if the commit had succeeded.
931 * Do not reference the transaction structure after this call.
933 static int
934 __xfs_trans_commit(
935 struct xfs_trans *tp,
936 bool regrant)
938 struct xfs_mount *mp = tp->t_mountp;
939 xfs_lsn_t commit_lsn = -1;
940 int error = 0;
941 int sync = tp->t_flags & XFS_TRANS_SYNC;
943 trace_xfs_trans_commit(tp, _RET_IP_);
946 * Finish deferred items on final commit. Only permanent transactions
947 * should ever have deferred ops.
949 WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
950 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
951 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
952 error = xfs_defer_finish_noroll(&tp);
953 if (error)
954 goto out_unreserve;
958 * If there is nothing to be logged by the transaction,
959 * then unlock all of the items associated with the
960 * transaction and free the transaction structure.
961 * Also make sure to return any reserved blocks to
962 * the free pool.
964 if (!(tp->t_flags & XFS_TRANS_DIRTY))
965 goto out_unreserve;
967 if (XFS_FORCED_SHUTDOWN(mp)) {
968 error = -EIO;
969 goto out_unreserve;
972 ASSERT(tp->t_ticket != NULL);
975 * If we need to update the superblock, then do it now.
977 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
978 xfs_trans_apply_sb_deltas(tp);
979 xfs_trans_apply_dquot_deltas(tp);
981 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
983 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
984 xfs_trans_free(tp);
987 * If the transaction needs to be synchronous, then force the
988 * log out now and wait for it.
990 if (sync) {
991 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
992 XFS_STATS_INC(mp, xs_trans_sync);
993 } else {
994 XFS_STATS_INC(mp, xs_trans_async);
997 return error;
999 out_unreserve:
1000 xfs_trans_unreserve_and_mod_sb(tp);
1003 * It is indeed possible for the transaction to be not dirty but
1004 * the dqinfo portion to be. All that means is that we have some
1005 * (non-persistent) quota reservations that need to be unreserved.
1007 xfs_trans_unreserve_and_mod_dquots(tp);
1008 if (tp->t_ticket) {
1009 if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log))
1010 xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
1011 else
1012 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
1013 tp->t_ticket = NULL;
1015 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1016 xfs_trans_free_items(tp, !!error);
1017 xfs_trans_free(tp);
1019 XFS_STATS_INC(mp, xs_trans_empty);
1020 return error;
1024 xfs_trans_commit(
1025 struct xfs_trans *tp)
1027 return __xfs_trans_commit(tp, false);
1031 * Unlock all of the transaction's items and free the transaction.
1032 * The transaction must not have modified any of its items, because
1033 * there is no way to restore them to their previous state.
1035 * If the transaction has made a log reservation, make sure to release
1036 * it as well.
1038 void
1039 xfs_trans_cancel(
1040 struct xfs_trans *tp)
1042 struct xfs_mount *mp = tp->t_mountp;
1043 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1045 trace_xfs_trans_cancel(tp, _RET_IP_);
1047 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
1048 xfs_defer_cancel(tp);
1051 * See if the caller is relying on us to shut down the
1052 * filesystem. This happens in paths where we detect
1053 * corruption and decide to give up.
1055 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1056 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1057 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1059 #ifdef DEBUG
1060 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1061 struct xfs_log_item *lip;
1063 list_for_each_entry(lip, &tp->t_items, li_trans)
1064 ASSERT(!(lip->li_type == XFS_LI_EFD));
1066 #endif
1067 xfs_trans_unreserve_and_mod_sb(tp);
1068 xfs_trans_unreserve_and_mod_dquots(tp);
1070 if (tp->t_ticket) {
1071 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
1072 tp->t_ticket = NULL;
1075 /* mark this thread as no longer being in a transaction */
1076 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1078 xfs_trans_free_items(tp, dirty);
1079 xfs_trans_free(tp);
1083 * Roll from one trans in the sequence of PERMANENT transactions to
1084 * the next: permanent transactions are only flushed out when
1085 * committed with xfs_trans_commit(), but we still want as soon
1086 * as possible to let chunks of it go to the log. So we commit the
1087 * chunk we've been working on and get a new transaction to continue.
1090 xfs_trans_roll(
1091 struct xfs_trans **tpp)
1093 struct xfs_trans *trans = *tpp;
1094 struct xfs_trans_res tres;
1095 int error;
1097 trace_xfs_trans_roll(trans, _RET_IP_);
1100 * Copy the critical parameters from one trans to the next.
1102 tres.tr_logres = trans->t_log_res;
1103 tres.tr_logcount = trans->t_log_count;
1105 *tpp = xfs_trans_dup(trans);
1108 * Commit the current transaction.
1109 * If this commit failed, then it'd just unlock those items that
1110 * are not marked ihold. That also means that a filesystem shutdown
1111 * is in progress. The caller takes the responsibility to cancel
1112 * the duplicate transaction that gets returned.
1114 error = __xfs_trans_commit(trans, true);
1115 if (error)
1116 return error;
1119 * Reserve space in the log for the next transaction.
1120 * This also pushes items in the "AIL", the list of logged items,
1121 * out to disk if they are taking up space at the tail of the log
1122 * that we want to use. This requires that either nothing be locked
1123 * across this call, or that anything that is locked be logged in
1124 * the prior and the next transactions.
1126 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1127 return xfs_trans_reserve(*tpp, &tres, 0, 0);