1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_rmap_item.h"
17 #include "xfs_alloc.h"
20 /* Set the map extent flags for this reverse mapping. */
22 xfs_trans_set_rmap_flags(
23 struct xfs_map_extent
*rmap
,
24 enum xfs_rmap_intent_type type
,
29 if (state
== XFS_EXT_UNWRITTEN
)
30 rmap
->me_flags
|= XFS_RMAP_EXTENT_UNWRITTEN
;
31 if (whichfork
== XFS_ATTR_FORK
)
32 rmap
->me_flags
|= XFS_RMAP_EXTENT_ATTR_FORK
;
35 rmap
->me_flags
|= XFS_RMAP_EXTENT_MAP
;
37 case XFS_RMAP_MAP_SHARED
:
38 rmap
->me_flags
|= XFS_RMAP_EXTENT_MAP_SHARED
;
41 rmap
->me_flags
|= XFS_RMAP_EXTENT_UNMAP
;
43 case XFS_RMAP_UNMAP_SHARED
:
44 rmap
->me_flags
|= XFS_RMAP_EXTENT_UNMAP_SHARED
;
46 case XFS_RMAP_CONVERT
:
47 rmap
->me_flags
|= XFS_RMAP_EXTENT_CONVERT
;
49 case XFS_RMAP_CONVERT_SHARED
:
50 rmap
->me_flags
|= XFS_RMAP_EXTENT_CONVERT_SHARED
;
53 rmap
->me_flags
|= XFS_RMAP_EXTENT_ALLOC
;
56 rmap
->me_flags
|= XFS_RMAP_EXTENT_FREE
;
63 struct xfs_rud_log_item
*
66 struct xfs_rui_log_item
*ruip
)
68 struct xfs_rud_log_item
*rudp
;
70 rudp
= xfs_rud_init(tp
->t_mountp
, ruip
);
71 xfs_trans_add_item(tp
, &rudp
->rud_item
);
76 * Finish an rmap update and log it to the RUD. Note that the transaction is
77 * marked dirty regardless of whether the rmap update succeeds or fails to
78 * support the RUI/RUD lifecycle rules.
81 xfs_trans_log_finish_rmap_update(
83 struct xfs_rud_log_item
*rudp
,
84 enum xfs_rmap_intent_type type
,
87 xfs_fileoff_t startoff
,
88 xfs_fsblock_t startblock
,
89 xfs_filblks_t blockcount
,
91 struct xfs_btree_cur
**pcur
)
95 error
= xfs_rmap_finish_one(tp
, type
, owner
, whichfork
, startoff
,
96 startblock
, blockcount
, state
, pcur
);
99 * Mark the transaction dirty, even on error. This ensures the
100 * transaction is aborted, which:
102 * 1.) releases the RUI and frees the RUD
103 * 2.) shuts down the filesystem
105 tp
->t_flags
|= XFS_TRANS_DIRTY
;
106 set_bit(XFS_LI_DIRTY
, &rudp
->rud_item
.li_flags
);
111 /* Sort rmap intents by AG. */
113 xfs_rmap_update_diff_items(
118 struct xfs_mount
*mp
= priv
;
119 struct xfs_rmap_intent
*ra
;
120 struct xfs_rmap_intent
*rb
;
122 ra
= container_of(a
, struct xfs_rmap_intent
, ri_list
);
123 rb
= container_of(b
, struct xfs_rmap_intent
, ri_list
);
124 return XFS_FSB_TO_AGNO(mp
, ra
->ri_bmap
.br_startblock
) -
125 XFS_FSB_TO_AGNO(mp
, rb
->ri_bmap
.br_startblock
);
130 xfs_rmap_update_create_intent(
131 struct xfs_trans
*tp
,
134 struct xfs_rui_log_item
*ruip
;
139 ruip
= xfs_rui_init(tp
->t_mountp
, count
);
140 ASSERT(ruip
!= NULL
);
143 * Get a log_item_desc to point at the new item.
145 xfs_trans_add_item(tp
, &ruip
->rui_item
);
149 /* Log rmap updates in the intent item. */
151 xfs_rmap_update_log_item(
152 struct xfs_trans
*tp
,
154 struct list_head
*item
)
156 struct xfs_rui_log_item
*ruip
= intent
;
157 struct xfs_rmap_intent
*rmap
;
159 struct xfs_map_extent
*map
;
161 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
163 tp
->t_flags
|= XFS_TRANS_DIRTY
;
164 set_bit(XFS_LI_DIRTY
, &ruip
->rui_item
.li_flags
);
167 * atomic_inc_return gives us the value after the increment;
168 * we want to use it as an array index so we need to subtract 1 from
171 next_extent
= atomic_inc_return(&ruip
->rui_next_extent
) - 1;
172 ASSERT(next_extent
< ruip
->rui_format
.rui_nextents
);
173 map
= &ruip
->rui_format
.rui_extents
[next_extent
];
174 map
->me_owner
= rmap
->ri_owner
;
175 map
->me_startblock
= rmap
->ri_bmap
.br_startblock
;
176 map
->me_startoff
= rmap
->ri_bmap
.br_startoff
;
177 map
->me_len
= rmap
->ri_bmap
.br_blockcount
;
178 xfs_trans_set_rmap_flags(map
, rmap
->ri_type
, rmap
->ri_whichfork
,
179 rmap
->ri_bmap
.br_state
);
182 /* Get an RUD so we can process all the deferred rmap updates. */
184 xfs_rmap_update_create_done(
185 struct xfs_trans
*tp
,
189 return xfs_trans_get_rud(tp
, intent
);
192 /* Process a deferred rmap update. */
194 xfs_rmap_update_finish_item(
195 struct xfs_trans
*tp
,
196 struct list_head
*item
,
200 struct xfs_rmap_intent
*rmap
;
203 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
204 error
= xfs_trans_log_finish_rmap_update(tp
, done_item
,
206 rmap
->ri_owner
, rmap
->ri_whichfork
,
207 rmap
->ri_bmap
.br_startoff
,
208 rmap
->ri_bmap
.br_startblock
,
209 rmap
->ri_bmap
.br_blockcount
,
210 rmap
->ri_bmap
.br_state
,
211 (struct xfs_btree_cur
**)state
);
216 /* Clean up after processing deferred rmaps. */
218 xfs_rmap_update_finish_cleanup(
219 struct xfs_trans
*tp
,
223 struct xfs_btree_cur
*rcur
= state
;
225 xfs_rmap_finish_one_cleanup(tp
, rcur
, error
);
228 /* Abort all pending RUIs. */
230 xfs_rmap_update_abort_intent(
233 xfs_rui_release(intent
);
236 /* Cancel a deferred rmap update. */
238 xfs_rmap_update_cancel_item(
239 struct list_head
*item
)
241 struct xfs_rmap_intent
*rmap
;
243 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
247 static const struct xfs_defer_op_type xfs_rmap_update_defer_type
= {
248 .type
= XFS_DEFER_OPS_TYPE_RMAP
,
249 .max_items
= XFS_RUI_MAX_FAST_EXTENTS
,
250 .diff_items
= xfs_rmap_update_diff_items
,
251 .create_intent
= xfs_rmap_update_create_intent
,
252 .abort_intent
= xfs_rmap_update_abort_intent
,
253 .log_item
= xfs_rmap_update_log_item
,
254 .create_done
= xfs_rmap_update_create_done
,
255 .finish_item
= xfs_rmap_update_finish_item
,
256 .finish_cleanup
= xfs_rmap_update_finish_cleanup
,
257 .cancel_item
= xfs_rmap_update_cancel_item
,
260 /* Register the deferred op type. */
262 xfs_rmap_update_init_defer_op(void)
264 xfs_defer_init_op_type(&xfs_rmap_update_defer_type
);