Linux 6.14-rc2
[linux.git] / fs / xfs / xfs_refcount_item.c
blobfe2d7aab8554fcfa01d39be75174436914ff919d
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_refcount_item.h"
18 #include "xfs_log.h"
19 #include "xfs_refcount.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_ag.h"
24 #include "xfs_btree.h"
25 #include "xfs_trace.h"
26 #include "xfs_rtgroup.h"
28 struct kmem_cache *xfs_cui_cache;
29 struct kmem_cache *xfs_cud_cache;
31 static const struct xfs_item_ops xfs_cui_item_ops;
33 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
35 return container_of(lip, struct xfs_cui_log_item, cui_item);
38 STATIC void
39 xfs_cui_item_free(
40 struct xfs_cui_log_item *cuip)
42 kvfree(cuip->cui_item.li_lv_shadow);
43 if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
44 kfree(cuip);
45 else
46 kmem_cache_free(xfs_cui_cache, cuip);
50 * Freeing the CUI requires that we remove it from the AIL if it has already
51 * been placed there. However, the CUI may not yet have been placed in the AIL
52 * when called by xfs_cui_release() from CUD processing due to the ordering of
53 * committed vs unpin operations in bulk insert operations. Hence the reference
54 * count to ensure only the last caller frees the CUI.
56 STATIC void
57 xfs_cui_release(
58 struct xfs_cui_log_item *cuip)
60 ASSERT(atomic_read(&cuip->cui_refcount) > 0);
61 if (!atomic_dec_and_test(&cuip->cui_refcount))
62 return;
64 xfs_trans_ail_delete(&cuip->cui_item, 0);
65 xfs_cui_item_free(cuip);
69 STATIC void
70 xfs_cui_item_size(
71 struct xfs_log_item *lip,
72 int *nvecs,
73 int *nbytes)
75 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
77 *nvecs += 1;
78 *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
82 * This is called to fill in the vector of log iovecs for the
83 * given cui log item. We use only 1 iovec, and we point that
84 * at the cui_log_format structure embedded in the cui item.
85 * It is at this point that we assert that all of the extent
86 * slots in the cui item have been filled.
88 STATIC void
89 xfs_cui_item_format(
90 struct xfs_log_item *lip,
91 struct xfs_log_vec *lv)
93 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
94 struct xfs_log_iovec *vecp = NULL;
96 ASSERT(atomic_read(&cuip->cui_next_extent) ==
97 cuip->cui_format.cui_nextents);
98 ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
100 cuip->cui_format.cui_type = lip->li_type;
101 cuip->cui_format.cui_size = 1;
103 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
104 xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
108 * The unpin operation is the last place an CUI is manipulated in the log. It is
109 * either inserted in the AIL or aborted in the event of a log I/O error. In
110 * either case, the CUI transaction has been successfully committed to make it
111 * this far. Therefore, we expect whoever committed the CUI to either construct
112 * and commit the CUD or drop the CUD's reference in the event of error. Simply
113 * drop the log's CUI reference now that the log is done with it.
115 STATIC void
116 xfs_cui_item_unpin(
117 struct xfs_log_item *lip,
118 int remove)
120 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
122 xfs_cui_release(cuip);
126 * The CUI has been either committed or aborted if the transaction has been
127 * cancelled. If the transaction was cancelled, an CUD isn't going to be
128 * constructed and thus we free the CUI here directly.
130 STATIC void
131 xfs_cui_item_release(
132 struct xfs_log_item *lip)
134 xfs_cui_release(CUI_ITEM(lip));
138 * Allocate and initialize an cui item with the given number of extents.
140 STATIC struct xfs_cui_log_item *
141 xfs_cui_init(
142 struct xfs_mount *mp,
143 unsigned short item_type,
144 uint nextents)
146 struct xfs_cui_log_item *cuip;
148 ASSERT(nextents > 0);
149 ASSERT(item_type == XFS_LI_CUI || item_type == XFS_LI_CUI_RT);
151 if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
152 cuip = kzalloc(xfs_cui_log_item_sizeof(nextents),
153 GFP_KERNEL | __GFP_NOFAIL);
154 else
155 cuip = kmem_cache_zalloc(xfs_cui_cache,
156 GFP_KERNEL | __GFP_NOFAIL);
158 xfs_log_item_init(mp, &cuip->cui_item, item_type, &xfs_cui_item_ops);
159 cuip->cui_format.cui_nextents = nextents;
160 cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
161 atomic_set(&cuip->cui_next_extent, 0);
162 atomic_set(&cuip->cui_refcount, 2);
164 return cuip;
167 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
169 return container_of(lip, struct xfs_cud_log_item, cud_item);
172 STATIC void
173 xfs_cud_item_size(
174 struct xfs_log_item *lip,
175 int *nvecs,
176 int *nbytes)
178 *nvecs += 1;
179 *nbytes += sizeof(struct xfs_cud_log_format);
183 * This is called to fill in the vector of log iovecs for the
184 * given cud log item. We use only 1 iovec, and we point that
185 * at the cud_log_format structure embedded in the cud item.
186 * It is at this point that we assert that all of the extent
187 * slots in the cud item have been filled.
189 STATIC void
190 xfs_cud_item_format(
191 struct xfs_log_item *lip,
192 struct xfs_log_vec *lv)
194 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
195 struct xfs_log_iovec *vecp = NULL;
197 ASSERT(lip->li_type == XFS_LI_CUD || lip->li_type == XFS_LI_CUD_RT);
199 cudp->cud_format.cud_type = lip->li_type;
200 cudp->cud_format.cud_size = 1;
202 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
203 sizeof(struct xfs_cud_log_format));
207 * The CUD is either committed or aborted if the transaction is cancelled. If
208 * the transaction is cancelled, drop our reference to the CUI and free the
209 * CUD.
211 STATIC void
212 xfs_cud_item_release(
213 struct xfs_log_item *lip)
215 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
217 xfs_cui_release(cudp->cud_cuip);
218 kvfree(cudp->cud_item.li_lv_shadow);
219 kmem_cache_free(xfs_cud_cache, cudp);
222 static struct xfs_log_item *
223 xfs_cud_item_intent(
224 struct xfs_log_item *lip)
226 return &CUD_ITEM(lip)->cud_cuip->cui_item;
229 static const struct xfs_item_ops xfs_cud_item_ops = {
230 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
231 XFS_ITEM_INTENT_DONE,
232 .iop_size = xfs_cud_item_size,
233 .iop_format = xfs_cud_item_format,
234 .iop_release = xfs_cud_item_release,
235 .iop_intent = xfs_cud_item_intent,
238 static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e)
240 return list_entry(e, struct xfs_refcount_intent, ri_list);
243 static inline bool
244 xfs_cui_item_isrt(const struct xfs_log_item *lip)
246 ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
248 return lip->li_type == XFS_LI_CUI_RT;
251 /* Sort refcount intents by AG. */
252 static int
253 xfs_refcount_update_diff_items(
254 void *priv,
255 const struct list_head *a,
256 const struct list_head *b)
258 struct xfs_refcount_intent *ra = ci_entry(a);
259 struct xfs_refcount_intent *rb = ci_entry(b);
261 return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
264 /* Log refcount updates in the intent item. */
265 STATIC void
266 xfs_refcount_update_log_item(
267 struct xfs_trans *tp,
268 struct xfs_cui_log_item *cuip,
269 struct xfs_refcount_intent *ri)
271 uint next_extent;
272 struct xfs_phys_extent *pmap;
275 * atomic_inc_return gives us the value after the increment;
276 * we want to use it as an array index so we need to subtract 1 from
277 * it.
279 next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
280 ASSERT(next_extent < cuip->cui_format.cui_nextents);
281 pmap = &cuip->cui_format.cui_extents[next_extent];
282 pmap->pe_startblock = ri->ri_startblock;
283 pmap->pe_len = ri->ri_blockcount;
285 pmap->pe_flags = 0;
286 switch (ri->ri_type) {
287 case XFS_REFCOUNT_INCREASE:
288 case XFS_REFCOUNT_DECREASE:
289 case XFS_REFCOUNT_ALLOC_COW:
290 case XFS_REFCOUNT_FREE_COW:
291 pmap->pe_flags |= ri->ri_type;
292 break;
293 default:
294 ASSERT(0);
298 static struct xfs_log_item *
299 __xfs_refcount_update_create_intent(
300 struct xfs_trans *tp,
301 struct list_head *items,
302 unsigned int count,
303 bool sort,
304 unsigned short item_type)
306 struct xfs_mount *mp = tp->t_mountp;
307 struct xfs_cui_log_item *cuip;
308 struct xfs_refcount_intent *ri;
310 ASSERT(count > 0);
312 cuip = xfs_cui_init(mp, item_type, count);
313 if (sort)
314 list_sort(mp, items, xfs_refcount_update_diff_items);
315 list_for_each_entry(ri, items, ri_list)
316 xfs_refcount_update_log_item(tp, cuip, ri);
317 return &cuip->cui_item;
320 static struct xfs_log_item *
321 xfs_refcount_update_create_intent(
322 struct xfs_trans *tp,
323 struct list_head *items,
324 unsigned int count,
325 bool sort)
327 return __xfs_refcount_update_create_intent(tp, items, count, sort,
328 XFS_LI_CUI);
331 static inline unsigned short
332 xfs_cud_type_from_cui(const struct xfs_cui_log_item *cuip)
334 return xfs_cui_item_isrt(&cuip->cui_item) ? XFS_LI_CUD_RT : XFS_LI_CUD;
337 /* Get an CUD so we can process all the deferred refcount updates. */
338 static struct xfs_log_item *
339 xfs_refcount_update_create_done(
340 struct xfs_trans *tp,
341 struct xfs_log_item *intent,
342 unsigned int count)
344 struct xfs_cui_log_item *cuip = CUI_ITEM(intent);
345 struct xfs_cud_log_item *cudp;
347 cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
348 xfs_log_item_init(tp->t_mountp, &cudp->cud_item,
349 xfs_cud_type_from_cui(cuip), &xfs_cud_item_ops);
350 cudp->cud_cuip = cuip;
351 cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
353 return &cudp->cud_item;
356 /* Add this deferred CUI to the transaction. */
357 void
358 xfs_refcount_defer_add(
359 struct xfs_trans *tp,
360 struct xfs_refcount_intent *ri)
362 struct xfs_mount *mp = tp->t_mountp;
365 * Deferred refcount updates for the realtime and data sections must
366 * use separate transactions to finish deferred work because updates to
367 * realtime metadata files can lock AGFs to allocate btree blocks and
368 * we don't want that mixing with the AGF locks taken to finish data
369 * section updates.
371 ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock,
372 ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
374 trace_xfs_refcount_defer(mp, ri);
375 xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
376 &xfs_rtrefcount_update_defer_type :
377 &xfs_refcount_update_defer_type);
380 /* Cancel a deferred refcount update. */
381 STATIC void
382 xfs_refcount_update_cancel_item(
383 struct list_head *item)
385 struct xfs_refcount_intent *ri = ci_entry(item);
387 xfs_group_intent_put(ri->ri_group);
388 kmem_cache_free(xfs_refcount_intent_cache, ri);
391 /* Process a deferred refcount update. */
392 STATIC int
393 xfs_refcount_update_finish_item(
394 struct xfs_trans *tp,
395 struct xfs_log_item *done,
396 struct list_head *item,
397 struct xfs_btree_cur **state)
399 struct xfs_refcount_intent *ri = ci_entry(item);
400 int error;
402 /* Did we run out of reservation? Requeue what we didn't finish. */
403 error = xfs_refcount_finish_one(tp, ri, state);
404 if (!error && ri->ri_blockcount > 0) {
405 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
406 ri->ri_type == XFS_REFCOUNT_DECREASE);
407 return -EAGAIN;
410 xfs_refcount_update_cancel_item(item);
411 return error;
414 /* Clean up after calling xfs_refcount_finish_one. */
415 STATIC void
416 xfs_refcount_finish_one_cleanup(
417 struct xfs_trans *tp,
418 struct xfs_btree_cur *rcur,
419 int error)
421 struct xfs_buf *agbp;
423 if (rcur == NULL)
424 return;
425 agbp = rcur->bc_ag.agbp;
426 xfs_btree_del_cursor(rcur, error);
427 if (error && agbp)
428 xfs_trans_brelse(tp, agbp);
431 /* Abort all pending CUIs. */
432 STATIC void
433 xfs_refcount_update_abort_intent(
434 struct xfs_log_item *intent)
436 xfs_cui_release(CUI_ITEM(intent));
439 /* Is this recovered CUI ok? */
440 static inline bool
441 xfs_cui_validate_phys(
442 struct xfs_mount *mp,
443 bool isrt,
444 struct xfs_phys_extent *pmap)
446 if (!xfs_has_reflink(mp))
447 return false;
449 if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
450 return false;
452 switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
453 case XFS_REFCOUNT_INCREASE:
454 case XFS_REFCOUNT_DECREASE:
455 case XFS_REFCOUNT_ALLOC_COW:
456 case XFS_REFCOUNT_FREE_COW:
457 break;
458 default:
459 return false;
462 if (isrt)
463 return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
465 return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
468 static inline void
469 xfs_cui_recover_work(
470 struct xfs_mount *mp,
471 struct xfs_defer_pending *dfp,
472 bool isrt,
473 struct xfs_phys_extent *pmap)
475 struct xfs_refcount_intent *ri;
477 ri = kmem_cache_alloc(xfs_refcount_intent_cache,
478 GFP_KERNEL | __GFP_NOFAIL);
479 ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
480 ri->ri_startblock = pmap->pe_startblock;
481 ri->ri_blockcount = pmap->pe_len;
482 ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock,
483 isrt ? XG_TYPE_RTG : XG_TYPE_AG);
484 ri->ri_realtime = isrt;
486 xfs_defer_add_item(dfp, &ri->ri_list);
490 * Process a refcount update intent item that was recovered from the log.
491 * We need to update the refcountbt.
493 STATIC int
494 xfs_refcount_recover_work(
495 struct xfs_defer_pending *dfp,
496 struct list_head *capture_list)
498 struct xfs_trans_res resv;
499 struct xfs_log_item *lip = dfp->dfp_intent;
500 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
501 struct xfs_trans *tp;
502 struct xfs_mount *mp = lip->li_log->l_mp;
503 bool isrt = xfs_cui_item_isrt(lip);
504 int i;
505 int error = 0;
508 * First check the validity of the extents described by the
509 * CUI. If any are bad, then assume that all are bad and
510 * just toss the CUI.
512 for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
513 if (!xfs_cui_validate_phys(mp, isrt,
514 &cuip->cui_format.cui_extents[i])) {
515 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
516 &cuip->cui_format,
517 sizeof(cuip->cui_format));
518 return -EFSCORRUPTED;
521 xfs_cui_recover_work(mp, dfp, isrt,
522 &cuip->cui_format.cui_extents[i]);
526 * Under normal operation, refcount updates are deferred, so we
527 * wouldn't be adding them directly to a transaction. All
528 * refcount updates manage reservation usage internally and
529 * dynamically by deferring work that won't fit in the
530 * transaction. Normally, any work that needs to be deferred
531 * gets attached to the same defer_ops that scheduled the
532 * refcount update. However, we're in log recovery here, so we
533 * use the passed in defer_ops and to finish up any work that
534 * doesn't fit. We need to reserve enough blocks to handle a
535 * full btree split on either end of the refcount range.
537 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
538 error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
539 XFS_TRANS_RESERVE, &tp);
540 if (error)
541 return error;
543 error = xlog_recover_finish_intent(tp, dfp);
544 if (error == -EFSCORRUPTED)
545 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
546 &cuip->cui_format,
547 sizeof(cuip->cui_format));
548 if (error)
549 goto abort_error;
551 return xfs_defer_ops_capture_and_commit(tp, capture_list);
553 abort_error:
554 xfs_trans_cancel(tp);
555 return error;
558 /* Relog an intent item to push the log tail forward. */
559 static struct xfs_log_item *
560 xfs_refcount_relog_intent(
561 struct xfs_trans *tp,
562 struct xfs_log_item *intent,
563 struct xfs_log_item *done_item)
565 struct xfs_cui_log_item *cuip;
566 struct xfs_phys_extent *pmap;
567 unsigned int count;
569 ASSERT(intent->li_type == XFS_LI_CUI ||
570 intent->li_type == XFS_LI_CUI_RT);
572 count = CUI_ITEM(intent)->cui_format.cui_nextents;
573 pmap = CUI_ITEM(intent)->cui_format.cui_extents;
575 cuip = xfs_cui_init(tp->t_mountp, intent->li_type, count);
576 memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
577 atomic_set(&cuip->cui_next_extent, count);
579 return &cuip->cui_item;
582 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
583 .name = "refcount",
584 .max_items = XFS_CUI_MAX_FAST_EXTENTS,
585 .create_intent = xfs_refcount_update_create_intent,
586 .abort_intent = xfs_refcount_update_abort_intent,
587 .create_done = xfs_refcount_update_create_done,
588 .finish_item = xfs_refcount_update_finish_item,
589 .finish_cleanup = xfs_refcount_finish_one_cleanup,
590 .cancel_item = xfs_refcount_update_cancel_item,
591 .recover_work = xfs_refcount_recover_work,
592 .relog_intent = xfs_refcount_relog_intent,
595 #ifdef CONFIG_XFS_RT
596 static struct xfs_log_item *
597 xfs_rtrefcount_update_create_intent(
598 struct xfs_trans *tp,
599 struct list_head *items,
600 unsigned int count,
601 bool sort)
603 return __xfs_refcount_update_create_intent(tp, items, count, sort,
604 XFS_LI_CUI_RT);
607 /* Process a deferred realtime refcount update. */
608 STATIC int
609 xfs_rtrefcount_update_finish_item(
610 struct xfs_trans *tp,
611 struct xfs_log_item *done,
612 struct list_head *item,
613 struct xfs_btree_cur **state)
615 struct xfs_refcount_intent *ri = ci_entry(item);
616 int error;
618 error = xfs_rtrefcount_finish_one(tp, ri, state);
620 /* Did we run out of reservation? Requeue what we didn't finish. */
621 if (!error && ri->ri_blockcount > 0) {
622 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
623 ri->ri_type == XFS_REFCOUNT_DECREASE);
624 return -EAGAIN;
627 xfs_refcount_update_cancel_item(item);
628 return error;
631 /* Clean up after calling xfs_rtrefcount_finish_one. */
632 STATIC void
633 xfs_rtrefcount_finish_one_cleanup(
634 struct xfs_trans *tp,
635 struct xfs_btree_cur *rcur,
636 int error)
638 if (rcur)
639 xfs_btree_del_cursor(rcur, error);
642 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
643 .name = "rtrefcount",
644 .max_items = XFS_CUI_MAX_FAST_EXTENTS,
645 .create_intent = xfs_rtrefcount_update_create_intent,
646 .abort_intent = xfs_refcount_update_abort_intent,
647 .create_done = xfs_refcount_update_create_done,
648 .finish_item = xfs_rtrefcount_update_finish_item,
649 .finish_cleanup = xfs_rtrefcount_finish_one_cleanup,
650 .cancel_item = xfs_refcount_update_cancel_item,
651 .recover_work = xfs_refcount_recover_work,
652 .relog_intent = xfs_refcount_relog_intent,
654 #else
655 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
656 .name = "rtrefcount",
658 #endif /* CONFIG_XFS_RT */
660 STATIC bool
661 xfs_cui_item_match(
662 struct xfs_log_item *lip,
663 uint64_t intent_id)
665 return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
668 static const struct xfs_item_ops xfs_cui_item_ops = {
669 .flags = XFS_ITEM_INTENT,
670 .iop_size = xfs_cui_item_size,
671 .iop_format = xfs_cui_item_format,
672 .iop_unpin = xfs_cui_item_unpin,
673 .iop_release = xfs_cui_item_release,
674 .iop_match = xfs_cui_item_match,
677 static inline void
678 xfs_cui_copy_format(
679 struct xfs_cui_log_format *dst,
680 const struct xfs_cui_log_format *src)
682 unsigned int i;
684 memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
686 for (i = 0; i < src->cui_nextents; i++)
687 memcpy(&dst->cui_extents[i], &src->cui_extents[i],
688 sizeof(struct xfs_phys_extent));
692 * This routine is called to create an in-core extent refcount update
693 * item from the cui format structure which was logged on disk.
694 * It allocates an in-core cui, copies the extents from the format
695 * structure into it, and adds the cui to the AIL with the given
696 * LSN.
698 STATIC int
699 xlog_recover_cui_commit_pass2(
700 struct xlog *log,
701 struct list_head *buffer_list,
702 struct xlog_recover_item *item,
703 xfs_lsn_t lsn)
705 struct xfs_mount *mp = log->l_mp;
706 struct xfs_cui_log_item *cuip;
707 struct xfs_cui_log_format *cui_formatp;
708 size_t len;
710 cui_formatp = item->ri_buf[0].i_addr;
712 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
713 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
714 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
715 return -EFSCORRUPTED;
718 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
719 if (item->ri_buf[0].i_len != len) {
720 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
721 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
722 return -EFSCORRUPTED;
725 cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
726 xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
727 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
729 xlog_recover_intent_item(log, &cuip->cui_item, lsn,
730 &xfs_refcount_update_defer_type);
731 return 0;
734 const struct xlog_recover_item_ops xlog_cui_item_ops = {
735 .item_type = XFS_LI_CUI,
736 .commit_pass2 = xlog_recover_cui_commit_pass2,
739 #ifdef CONFIG_XFS_RT
740 STATIC int
741 xlog_recover_rtcui_commit_pass2(
742 struct xlog *log,
743 struct list_head *buffer_list,
744 struct xlog_recover_item *item,
745 xfs_lsn_t lsn)
747 struct xfs_mount *mp = log->l_mp;
748 struct xfs_cui_log_item *cuip;
749 struct xfs_cui_log_format *cui_formatp;
750 size_t len;
752 cui_formatp = item->ri_buf[0].i_addr;
754 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
755 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
756 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
757 return -EFSCORRUPTED;
760 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
761 if (item->ri_buf[0].i_len != len) {
762 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
763 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
764 return -EFSCORRUPTED;
767 cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
768 xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
769 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
771 xlog_recover_intent_item(log, &cuip->cui_item, lsn,
772 &xfs_rtrefcount_update_defer_type);
773 return 0;
775 #else
776 STATIC int
777 xlog_recover_rtcui_commit_pass2(
778 struct xlog *log,
779 struct list_head *buffer_list,
780 struct xlog_recover_item *item,
781 xfs_lsn_t lsn)
783 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
784 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
785 return -EFSCORRUPTED;
787 #endif
789 const struct xlog_recover_item_ops xlog_rtcui_item_ops = {
790 .item_type = XFS_LI_CUI_RT,
791 .commit_pass2 = xlog_recover_rtcui_commit_pass2,
795 * This routine is called when an CUD format structure is found in a committed
796 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
797 * was still in the log. To do this it searches the AIL for the CUI with an id
798 * equal to that in the CUD format structure. If we find it we drop the CUD
799 * reference, which removes the CUI from the AIL and frees it.
801 STATIC int
802 xlog_recover_cud_commit_pass2(
803 struct xlog *log,
804 struct list_head *buffer_list,
805 struct xlog_recover_item *item,
806 xfs_lsn_t lsn)
808 struct xfs_cud_log_format *cud_formatp;
810 cud_formatp = item->ri_buf[0].i_addr;
811 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
812 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
813 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
814 return -EFSCORRUPTED;
817 xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
818 return 0;
821 const struct xlog_recover_item_ops xlog_cud_item_ops = {
822 .item_type = XFS_LI_CUD,
823 .commit_pass2 = xlog_recover_cud_commit_pass2,
826 #ifdef CONFIG_XFS_RT
827 STATIC int
828 xlog_recover_rtcud_commit_pass2(
829 struct xlog *log,
830 struct list_head *buffer_list,
831 struct xlog_recover_item *item,
832 xfs_lsn_t lsn)
834 struct xfs_cud_log_format *cud_formatp;
836 cud_formatp = item->ri_buf[0].i_addr;
837 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
838 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
839 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
840 return -EFSCORRUPTED;
843 xlog_recover_release_intent(log, XFS_LI_CUI_RT,
844 cud_formatp->cud_cui_id);
845 return 0;
847 #else
848 # define xlog_recover_rtcud_commit_pass2 xlog_recover_rtcui_commit_pass2
849 #endif
851 const struct xlog_recover_item_ops xlog_rtcud_item_ops = {
852 .item_type = XFS_LI_CUD_RT,
853 .commit_pass2 = xlog_recover_rtcud_commit_pass2,