1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_refcount_item.h"
17 #include "xfs_alloc.h"
18 #include "xfs_refcount.h"
21 * This routine is called to allocate a "refcount update done"
24 struct xfs_cud_log_item
*
27 struct xfs_cui_log_item
*cuip
)
29 struct xfs_cud_log_item
*cudp
;
31 cudp
= xfs_cud_init(tp
->t_mountp
, cuip
);
32 xfs_trans_add_item(tp
, &cudp
->cud_item
);
37 * Finish an refcount update and log it to the CUD. Note that the
38 * transaction is marked dirty regardless of whether the refcount
39 * update succeeds or fails to support the CUI/CUD lifecycle rules.
42 xfs_trans_log_finish_refcount_update(
44 struct xfs_cud_log_item
*cudp
,
45 struct xfs_defer_ops
*dop
,
46 enum xfs_refcount_intent_type type
,
47 xfs_fsblock_t startblock
,
48 xfs_extlen_t blockcount
,
49 xfs_fsblock_t
*new_fsb
,
50 xfs_extlen_t
*new_len
,
51 struct xfs_btree_cur
**pcur
)
55 error
= xfs_refcount_finish_one(tp
, dop
, type
, startblock
,
56 blockcount
, new_fsb
, new_len
, pcur
);
59 * Mark the transaction dirty, even on error. This ensures the
60 * transaction is aborted, which:
62 * 1.) releases the CUI and frees the CUD
63 * 2.) shuts down the filesystem
65 tp
->t_flags
|= XFS_TRANS_DIRTY
;
66 set_bit(XFS_LI_DIRTY
, &cudp
->cud_item
.li_flags
);
71 /* Sort refcount intents by AG. */
73 xfs_refcount_update_diff_items(
78 struct xfs_mount
*mp
= priv
;
79 struct xfs_refcount_intent
*ra
;
80 struct xfs_refcount_intent
*rb
;
82 ra
= container_of(a
, struct xfs_refcount_intent
, ri_list
);
83 rb
= container_of(b
, struct xfs_refcount_intent
, ri_list
);
84 return XFS_FSB_TO_AGNO(mp
, ra
->ri_startblock
) -
85 XFS_FSB_TO_AGNO(mp
, rb
->ri_startblock
);
90 xfs_refcount_update_create_intent(
94 struct xfs_cui_log_item
*cuip
;
99 cuip
= xfs_cui_init(tp
->t_mountp
, count
);
100 ASSERT(cuip
!= NULL
);
103 * Get a log_item_desc to point at the new item.
105 xfs_trans_add_item(tp
, &cuip
->cui_item
);
109 /* Set the phys extent flags for this reverse mapping. */
111 xfs_trans_set_refcount_flags(
112 struct xfs_phys_extent
*refc
,
113 enum xfs_refcount_intent_type type
)
117 case XFS_REFCOUNT_INCREASE
:
118 case XFS_REFCOUNT_DECREASE
:
119 case XFS_REFCOUNT_ALLOC_COW
:
120 case XFS_REFCOUNT_FREE_COW
:
121 refc
->pe_flags
|= type
;
128 /* Log refcount updates in the intent item. */
130 xfs_refcount_update_log_item(
131 struct xfs_trans
*tp
,
133 struct list_head
*item
)
135 struct xfs_cui_log_item
*cuip
= intent
;
136 struct xfs_refcount_intent
*refc
;
138 struct xfs_phys_extent
*ext
;
140 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
142 tp
->t_flags
|= XFS_TRANS_DIRTY
;
143 set_bit(XFS_LI_DIRTY
, &cuip
->cui_item
.li_flags
);
146 * atomic_inc_return gives us the value after the increment;
147 * we want to use it as an array index so we need to subtract 1 from
150 next_extent
= atomic_inc_return(&cuip
->cui_next_extent
) - 1;
151 ASSERT(next_extent
< cuip
->cui_format
.cui_nextents
);
152 ext
= &cuip
->cui_format
.cui_extents
[next_extent
];
153 ext
->pe_startblock
= refc
->ri_startblock
;
154 ext
->pe_len
= refc
->ri_blockcount
;
155 xfs_trans_set_refcount_flags(ext
, refc
->ri_type
);
158 /* Get an CUD so we can process all the deferred refcount updates. */
160 xfs_refcount_update_create_done(
161 struct xfs_trans
*tp
,
165 return xfs_trans_get_cud(tp
, intent
);
168 /* Process a deferred refcount update. */
170 xfs_refcount_update_finish_item(
171 struct xfs_trans
*tp
,
172 struct xfs_defer_ops
*dop
,
173 struct list_head
*item
,
177 struct xfs_refcount_intent
*refc
;
178 xfs_fsblock_t new_fsb
;
179 xfs_extlen_t new_aglen
;
182 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
183 error
= xfs_trans_log_finish_refcount_update(tp
, done_item
, dop
,
187 &new_fsb
, &new_aglen
,
188 (struct xfs_btree_cur
**)state
);
189 /* Did we run out of reservation? Requeue what we didn't finish. */
190 if (!error
&& new_aglen
> 0) {
191 ASSERT(refc
->ri_type
== XFS_REFCOUNT_INCREASE
||
192 refc
->ri_type
== XFS_REFCOUNT_DECREASE
);
193 refc
->ri_startblock
= new_fsb
;
194 refc
->ri_blockcount
= new_aglen
;
201 /* Clean up after processing deferred refcounts. */
203 xfs_refcount_update_finish_cleanup(
204 struct xfs_trans
*tp
,
208 struct xfs_btree_cur
*rcur
= state
;
210 xfs_refcount_finish_one_cleanup(tp
, rcur
, error
);
213 /* Abort all pending CUIs. */
215 xfs_refcount_update_abort_intent(
218 xfs_cui_release(intent
);
221 /* Cancel a deferred refcount update. */
223 xfs_refcount_update_cancel_item(
224 struct list_head
*item
)
226 struct xfs_refcount_intent
*refc
;
228 refc
= container_of(item
, struct xfs_refcount_intent
, ri_list
);
232 static const struct xfs_defer_op_type xfs_refcount_update_defer_type
= {
233 .type
= XFS_DEFER_OPS_TYPE_REFCOUNT
,
234 .max_items
= XFS_CUI_MAX_FAST_EXTENTS
,
235 .diff_items
= xfs_refcount_update_diff_items
,
236 .create_intent
= xfs_refcount_update_create_intent
,
237 .abort_intent
= xfs_refcount_update_abort_intent
,
238 .log_item
= xfs_refcount_update_log_item
,
239 .create_done
= xfs_refcount_update_create_done
,
240 .finish_item
= xfs_refcount_update_finish_item
,
241 .finish_cleanup
= xfs_refcount_update_finish_cleanup
,
242 .cancel_item
= xfs_refcount_update_cancel_item
,
245 /* Register the deferred op type. */
247 xfs_refcount_update_init_defer_op(void)
249 xfs_defer_init_op_type(&xfs_refcount_update_defer_type
);