1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_rmap_item.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
24 #include "xfs_btree.h"
25 #include "xfs_trace.h"
26 #include "xfs_rtgroup.h"
28 struct kmem_cache
*xfs_rui_cache
;
29 struct kmem_cache
*xfs_rud_cache
;
31 static const struct xfs_item_ops xfs_rui_item_ops
;
33 static inline struct xfs_rui_log_item
*RUI_ITEM(struct xfs_log_item
*lip
)
35 return container_of(lip
, struct xfs_rui_log_item
, rui_item
);
40 struct xfs_rui_log_item
*ruip
)
42 kvfree(ruip
->rui_item
.li_lv_shadow
);
43 if (ruip
->rui_format
.rui_nextents
> XFS_RUI_MAX_FAST_EXTENTS
)
46 kmem_cache_free(xfs_rui_cache
, ruip
);
50 * Freeing the RUI requires that we remove it from the AIL if it has already
51 * been placed there. However, the RUI may not yet have been placed in the AIL
52 * when called by xfs_rui_release() from RUD processing due to the ordering of
53 * committed vs unpin operations in bulk insert operations. Hence the reference
54 * count to ensure only the last caller frees the RUI.
58 struct xfs_rui_log_item
*ruip
)
60 ASSERT(atomic_read(&ruip
->rui_refcount
) > 0);
61 if (!atomic_dec_and_test(&ruip
->rui_refcount
))
64 xfs_trans_ail_delete(&ruip
->rui_item
, 0);
65 xfs_rui_item_free(ruip
);
70 struct xfs_log_item
*lip
,
74 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
77 *nbytes
+= xfs_rui_log_format_sizeof(ruip
->rui_format
.rui_nextents
);
81 * This is called to fill in the vector of log iovecs for the
82 * given rui log item. We use only 1 iovec, and we point that
83 * at the rui_log_format structure embedded in the rui item.
84 * It is at this point that we assert that all of the extent
85 * slots in the rui item have been filled.
89 struct xfs_log_item
*lip
,
90 struct xfs_log_vec
*lv
)
92 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
93 struct xfs_log_iovec
*vecp
= NULL
;
95 ASSERT(atomic_read(&ruip
->rui_next_extent
) ==
96 ruip
->rui_format
.rui_nextents
);
98 ASSERT(lip
->li_type
== XFS_LI_RUI
|| lip
->li_type
== XFS_LI_RUI_RT
);
100 ruip
->rui_format
.rui_type
= lip
->li_type
;
101 ruip
->rui_format
.rui_size
= 1;
103 xlog_copy_iovec(lv
, &vecp
, XLOG_REG_TYPE_RUI_FORMAT
, &ruip
->rui_format
,
104 xfs_rui_log_format_sizeof(ruip
->rui_format
.rui_nextents
));
108 * The unpin operation is the last place an RUI is manipulated in the log. It is
109 * either inserted in the AIL or aborted in the event of a log I/O error. In
110 * either case, the RUI transaction has been successfully committed to make it
111 * this far. Therefore, we expect whoever committed the RUI to either construct
112 * and commit the RUD or drop the RUD's reference in the event of error. Simply
113 * drop the log's RUI reference now that the log is done with it.
117 struct xfs_log_item
*lip
,
120 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
122 xfs_rui_release(ruip
);
126 * The RUI has been either committed or aborted if the transaction has been
127 * cancelled. If the transaction was cancelled, an RUD isn't going to be
128 * constructed and thus we free the RUI here directly.
131 xfs_rui_item_release(
132 struct xfs_log_item
*lip
)
134 xfs_rui_release(RUI_ITEM(lip
));
138 * Allocate and initialize an rui item with the given number of extents.
140 STATIC
struct xfs_rui_log_item
*
142 struct xfs_mount
*mp
,
143 unsigned short item_type
,
147 struct xfs_rui_log_item
*ruip
;
149 ASSERT(nextents
> 0);
150 ASSERT(item_type
== XFS_LI_RUI
|| item_type
== XFS_LI_RUI_RT
);
152 if (nextents
> XFS_RUI_MAX_FAST_EXTENTS
)
153 ruip
= kzalloc(xfs_rui_log_item_sizeof(nextents
),
154 GFP_KERNEL
| __GFP_NOFAIL
);
156 ruip
= kmem_cache_zalloc(xfs_rui_cache
,
157 GFP_KERNEL
| __GFP_NOFAIL
);
159 xfs_log_item_init(mp
, &ruip
->rui_item
, item_type
, &xfs_rui_item_ops
);
160 ruip
->rui_format
.rui_nextents
= nextents
;
161 ruip
->rui_format
.rui_id
= (uintptr_t)(void *)ruip
;
162 atomic_set(&ruip
->rui_next_extent
, 0);
163 atomic_set(&ruip
->rui_refcount
, 2);
168 static inline struct xfs_rud_log_item
*RUD_ITEM(struct xfs_log_item
*lip
)
170 return container_of(lip
, struct xfs_rud_log_item
, rud_item
);
175 struct xfs_log_item
*lip
,
180 *nbytes
+= sizeof(struct xfs_rud_log_format
);
184 * This is called to fill in the vector of log iovecs for the
185 * given rud log item. We use only 1 iovec, and we point that
186 * at the rud_log_format structure embedded in the rud item.
187 * It is at this point that we assert that all of the extent
188 * slots in the rud item have been filled.
192 struct xfs_log_item
*lip
,
193 struct xfs_log_vec
*lv
)
195 struct xfs_rud_log_item
*rudp
= RUD_ITEM(lip
);
196 struct xfs_log_iovec
*vecp
= NULL
;
198 ASSERT(lip
->li_type
== XFS_LI_RUD
|| lip
->li_type
== XFS_LI_RUD_RT
);
200 rudp
->rud_format
.rud_type
= lip
->li_type
;
201 rudp
->rud_format
.rud_size
= 1;
203 xlog_copy_iovec(lv
, &vecp
, XLOG_REG_TYPE_RUD_FORMAT
, &rudp
->rud_format
,
204 sizeof(struct xfs_rud_log_format
));
208 * The RUD is either committed or aborted if the transaction is cancelled. If
209 * the transaction is cancelled, drop our reference to the RUI and free the
213 xfs_rud_item_release(
214 struct xfs_log_item
*lip
)
216 struct xfs_rud_log_item
*rudp
= RUD_ITEM(lip
);
218 xfs_rui_release(rudp
->rud_ruip
);
219 kvfree(rudp
->rud_item
.li_lv_shadow
);
220 kmem_cache_free(xfs_rud_cache
, rudp
);
223 static struct xfs_log_item
*
225 struct xfs_log_item
*lip
)
227 return &RUD_ITEM(lip
)->rud_ruip
->rui_item
;
230 static const struct xfs_item_ops xfs_rud_item_ops
= {
231 .flags
= XFS_ITEM_RELEASE_WHEN_COMMITTED
|
232 XFS_ITEM_INTENT_DONE
,
233 .iop_size
= xfs_rud_item_size
,
234 .iop_format
= xfs_rud_item_format
,
235 .iop_release
= xfs_rud_item_release
,
236 .iop_intent
= xfs_rud_item_intent
,
239 static inline struct xfs_rmap_intent
*ri_entry(const struct list_head
*e
)
241 return list_entry(e
, struct xfs_rmap_intent
, ri_list
);
245 xfs_rui_item_isrt(const struct xfs_log_item
*lip
)
247 ASSERT(lip
->li_type
== XFS_LI_RUI
|| lip
->li_type
== XFS_LI_RUI_RT
);
249 return lip
->li_type
== XFS_LI_RUI_RT
;
252 /* Sort rmap intents by AG. */
254 xfs_rmap_update_diff_items(
256 const struct list_head
*a
,
257 const struct list_head
*b
)
259 struct xfs_rmap_intent
*ra
= ri_entry(a
);
260 struct xfs_rmap_intent
*rb
= ri_entry(b
);
262 return ra
->ri_group
->xg_gno
- rb
->ri_group
->xg_gno
;
265 /* Log rmap updates in the intent item. */
267 xfs_rmap_update_log_item(
268 struct xfs_trans
*tp
,
269 struct xfs_rui_log_item
*ruip
,
270 struct xfs_rmap_intent
*ri
)
273 struct xfs_map_extent
*map
;
276 * atomic_inc_return gives us the value after the increment;
277 * we want to use it as an array index so we need to subtract 1 from
280 next_extent
= atomic_inc_return(&ruip
->rui_next_extent
) - 1;
281 ASSERT(next_extent
< ruip
->rui_format
.rui_nextents
);
282 map
= &ruip
->rui_format
.rui_extents
[next_extent
];
283 map
->me_owner
= ri
->ri_owner
;
284 map
->me_startblock
= ri
->ri_bmap
.br_startblock
;
285 map
->me_startoff
= ri
->ri_bmap
.br_startoff
;
286 map
->me_len
= ri
->ri_bmap
.br_blockcount
;
289 if (ri
->ri_bmap
.br_state
== XFS_EXT_UNWRITTEN
)
290 map
->me_flags
|= XFS_RMAP_EXTENT_UNWRITTEN
;
291 if (ri
->ri_whichfork
== XFS_ATTR_FORK
)
292 map
->me_flags
|= XFS_RMAP_EXTENT_ATTR_FORK
;
293 switch (ri
->ri_type
) {
295 map
->me_flags
|= XFS_RMAP_EXTENT_MAP
;
297 case XFS_RMAP_MAP_SHARED
:
298 map
->me_flags
|= XFS_RMAP_EXTENT_MAP_SHARED
;
301 map
->me_flags
|= XFS_RMAP_EXTENT_UNMAP
;
303 case XFS_RMAP_UNMAP_SHARED
:
304 map
->me_flags
|= XFS_RMAP_EXTENT_UNMAP_SHARED
;
306 case XFS_RMAP_CONVERT
:
307 map
->me_flags
|= XFS_RMAP_EXTENT_CONVERT
;
309 case XFS_RMAP_CONVERT_SHARED
:
310 map
->me_flags
|= XFS_RMAP_EXTENT_CONVERT_SHARED
;
313 map
->me_flags
|= XFS_RMAP_EXTENT_ALLOC
;
316 map
->me_flags
|= XFS_RMAP_EXTENT_FREE
;
323 static struct xfs_log_item
*
324 __xfs_rmap_update_create_intent(
325 struct xfs_trans
*tp
,
326 struct list_head
*items
,
329 unsigned short item_type
)
331 struct xfs_mount
*mp
= tp
->t_mountp
;
332 struct xfs_rui_log_item
*ruip
;
333 struct xfs_rmap_intent
*ri
;
337 ruip
= xfs_rui_init(mp
, item_type
, count
);
339 list_sort(mp
, items
, xfs_rmap_update_diff_items
);
340 list_for_each_entry(ri
, items
, ri_list
)
341 xfs_rmap_update_log_item(tp
, ruip
, ri
);
342 return &ruip
->rui_item
;
345 static struct xfs_log_item
*
346 xfs_rmap_update_create_intent(
347 struct xfs_trans
*tp
,
348 struct list_head
*items
,
352 return __xfs_rmap_update_create_intent(tp
, items
, count
, sort
,
356 static inline unsigned short
357 xfs_rud_type_from_rui(const struct xfs_rui_log_item
*ruip
)
359 return xfs_rui_item_isrt(&ruip
->rui_item
) ? XFS_LI_RUD_RT
: XFS_LI_RUD
;
362 /* Get an RUD so we can process all the deferred rmap updates. */
363 static struct xfs_log_item
*
364 xfs_rmap_update_create_done(
365 struct xfs_trans
*tp
,
366 struct xfs_log_item
*intent
,
369 struct xfs_rui_log_item
*ruip
= RUI_ITEM(intent
);
370 struct xfs_rud_log_item
*rudp
;
372 rudp
= kmem_cache_zalloc(xfs_rud_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
373 xfs_log_item_init(tp
->t_mountp
, &rudp
->rud_item
,
374 xfs_rud_type_from_rui(ruip
), &xfs_rud_item_ops
);
375 rudp
->rud_ruip
= ruip
;
376 rudp
->rud_format
.rud_rui_id
= ruip
->rui_format
.rui_id
;
378 return &rudp
->rud_item
;
381 /* Add this deferred RUI to the transaction. */
384 struct xfs_trans
*tp
,
385 struct xfs_rmap_intent
*ri
)
387 struct xfs_mount
*mp
= tp
->t_mountp
;
390 * Deferred rmap updates for the realtime and data sections must use
391 * separate transactions to finish deferred work because updates to
392 * realtime metadata files can lock AGFs to allocate btree blocks and
393 * we don't want that mixing with the AGF locks taken to finish data
396 ri
->ri_group
= xfs_group_intent_get(mp
, ri
->ri_bmap
.br_startblock
,
397 ri
->ri_realtime
? XG_TYPE_RTG
: XG_TYPE_AG
);
399 trace_xfs_rmap_defer(mp
, ri
);
400 xfs_defer_add(tp
, &ri
->ri_list
, ri
->ri_realtime
?
401 &xfs_rtrmap_update_defer_type
:
402 &xfs_rmap_update_defer_type
);
405 /* Cancel a deferred rmap update. */
407 xfs_rmap_update_cancel_item(
408 struct list_head
*item
)
410 struct xfs_rmap_intent
*ri
= ri_entry(item
);
412 xfs_group_intent_put(ri
->ri_group
);
413 kmem_cache_free(xfs_rmap_intent_cache
, ri
);
416 /* Process a deferred rmap update. */
418 xfs_rmap_update_finish_item(
419 struct xfs_trans
*tp
,
420 struct xfs_log_item
*done
,
421 struct list_head
*item
,
422 struct xfs_btree_cur
**state
)
424 struct xfs_rmap_intent
*ri
= ri_entry(item
);
427 error
= xfs_rmap_finish_one(tp
, ri
, state
);
429 xfs_rmap_update_cancel_item(item
);
433 /* Clean up after calling xfs_rmap_finish_one. */
435 xfs_rmap_finish_one_cleanup(
436 struct xfs_trans
*tp
,
437 struct xfs_btree_cur
*rcur
,
440 struct xfs_buf
*agbp
= NULL
;
444 agbp
= rcur
->bc_ag
.agbp
;
445 xfs_btree_del_cursor(rcur
, error
);
447 xfs_trans_brelse(tp
, agbp
);
450 /* Abort all pending RUIs. */
452 xfs_rmap_update_abort_intent(
453 struct xfs_log_item
*intent
)
455 xfs_rui_release(RUI_ITEM(intent
));
458 /* Is this recovered RUI ok? */
460 xfs_rui_validate_map(
461 struct xfs_mount
*mp
,
463 struct xfs_map_extent
*map
)
465 if (!xfs_has_rmapbt(mp
))
468 if (map
->me_flags
& ~XFS_RMAP_EXTENT_FLAGS
)
471 switch (map
->me_flags
& XFS_RMAP_EXTENT_TYPE_MASK
) {
472 case XFS_RMAP_EXTENT_MAP
:
473 case XFS_RMAP_EXTENT_MAP_SHARED
:
474 case XFS_RMAP_EXTENT_UNMAP
:
475 case XFS_RMAP_EXTENT_UNMAP_SHARED
:
476 case XFS_RMAP_EXTENT_CONVERT
:
477 case XFS_RMAP_EXTENT_CONVERT_SHARED
:
478 case XFS_RMAP_EXTENT_ALLOC
:
479 case XFS_RMAP_EXTENT_FREE
:
485 if (!XFS_RMAP_NON_INODE_OWNER(map
->me_owner
) &&
486 !xfs_verify_ino(mp
, map
->me_owner
))
489 if (!xfs_verify_fileext(mp
, map
->me_startoff
, map
->me_len
))
493 return xfs_verify_rtbext(mp
, map
->me_startblock
, map
->me_len
);
495 return xfs_verify_fsbext(mp
, map
->me_startblock
, map
->me_len
);
499 xfs_rui_recover_work(
500 struct xfs_mount
*mp
,
501 struct xfs_defer_pending
*dfp
,
503 const struct xfs_map_extent
*map
)
505 struct xfs_rmap_intent
*ri
;
507 ri
= kmem_cache_alloc(xfs_rmap_intent_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
509 switch (map
->me_flags
& XFS_RMAP_EXTENT_TYPE_MASK
) {
510 case XFS_RMAP_EXTENT_MAP
:
511 ri
->ri_type
= XFS_RMAP_MAP
;
513 case XFS_RMAP_EXTENT_MAP_SHARED
:
514 ri
->ri_type
= XFS_RMAP_MAP_SHARED
;
516 case XFS_RMAP_EXTENT_UNMAP
:
517 ri
->ri_type
= XFS_RMAP_UNMAP
;
519 case XFS_RMAP_EXTENT_UNMAP_SHARED
:
520 ri
->ri_type
= XFS_RMAP_UNMAP_SHARED
;
522 case XFS_RMAP_EXTENT_CONVERT
:
523 ri
->ri_type
= XFS_RMAP_CONVERT
;
525 case XFS_RMAP_EXTENT_CONVERT_SHARED
:
526 ri
->ri_type
= XFS_RMAP_CONVERT_SHARED
;
528 case XFS_RMAP_EXTENT_ALLOC
:
529 ri
->ri_type
= XFS_RMAP_ALLOC
;
531 case XFS_RMAP_EXTENT_FREE
:
532 ri
->ri_type
= XFS_RMAP_FREE
;
539 ri
->ri_owner
= map
->me_owner
;
540 ri
->ri_whichfork
= (map
->me_flags
& XFS_RMAP_EXTENT_ATTR_FORK
) ?
541 XFS_ATTR_FORK
: XFS_DATA_FORK
;
542 ri
->ri_bmap
.br_startblock
= map
->me_startblock
;
543 ri
->ri_bmap
.br_startoff
= map
->me_startoff
;
544 ri
->ri_bmap
.br_blockcount
= map
->me_len
;
545 ri
->ri_bmap
.br_state
= (map
->me_flags
& XFS_RMAP_EXTENT_UNWRITTEN
) ?
546 XFS_EXT_UNWRITTEN
: XFS_EXT_NORM
;
547 ri
->ri_group
= xfs_group_intent_get(mp
, map
->me_startblock
,
548 isrt
? XG_TYPE_RTG
: XG_TYPE_AG
);
549 ri
->ri_realtime
= isrt
;
551 xfs_defer_add_item(dfp
, &ri
->ri_list
);
555 * Process an rmap update intent item that was recovered from the log.
556 * We need to update the rmapbt.
559 xfs_rmap_recover_work(
560 struct xfs_defer_pending
*dfp
,
561 struct list_head
*capture_list
)
563 struct xfs_trans_res resv
;
564 struct xfs_log_item
*lip
= dfp
->dfp_intent
;
565 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
566 struct xfs_trans
*tp
;
567 struct xfs_mount
*mp
= lip
->li_log
->l_mp
;
568 bool isrt
= xfs_rui_item_isrt(lip
);
573 * First check the validity of the extents described by the
574 * RUI. If any are bad, then assume that all are bad and
577 for (i
= 0; i
< ruip
->rui_format
.rui_nextents
; i
++) {
578 if (!xfs_rui_validate_map(mp
, isrt
,
579 &ruip
->rui_format
.rui_extents
[i
])) {
580 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
582 sizeof(ruip
->rui_format
));
583 return -EFSCORRUPTED
;
586 xfs_rui_recover_work(mp
, dfp
, isrt
,
587 &ruip
->rui_format
.rui_extents
[i
]);
590 resv
= xlog_recover_resv(&M_RES(mp
)->tr_itruncate
);
591 error
= xfs_trans_alloc(mp
, &resv
, mp
->m_rmap_maxlevels
, 0,
592 XFS_TRANS_RESERVE
, &tp
);
596 error
= xlog_recover_finish_intent(tp
, dfp
);
597 if (error
== -EFSCORRUPTED
)
598 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
600 sizeof(ruip
->rui_format
));
604 return xfs_defer_ops_capture_and_commit(tp
, capture_list
);
607 xfs_trans_cancel(tp
);
611 /* Relog an intent item to push the log tail forward. */
612 static struct xfs_log_item
*
613 xfs_rmap_relog_intent(
614 struct xfs_trans
*tp
,
615 struct xfs_log_item
*intent
,
616 struct xfs_log_item
*done_item
)
618 struct xfs_rui_log_item
*ruip
;
619 struct xfs_map_extent
*map
;
622 ASSERT(intent
->li_type
== XFS_LI_RUI
||
623 intent
->li_type
== XFS_LI_RUI_RT
);
625 count
= RUI_ITEM(intent
)->rui_format
.rui_nextents
;
626 map
= RUI_ITEM(intent
)->rui_format
.rui_extents
;
628 ruip
= xfs_rui_init(tp
->t_mountp
, intent
->li_type
, count
);
629 memcpy(ruip
->rui_format
.rui_extents
, map
, count
* sizeof(*map
));
630 atomic_set(&ruip
->rui_next_extent
, count
);
632 return &ruip
->rui_item
;
635 const struct xfs_defer_op_type xfs_rmap_update_defer_type
= {
637 .max_items
= XFS_RUI_MAX_FAST_EXTENTS
,
638 .create_intent
= xfs_rmap_update_create_intent
,
639 .abort_intent
= xfs_rmap_update_abort_intent
,
640 .create_done
= xfs_rmap_update_create_done
,
641 .finish_item
= xfs_rmap_update_finish_item
,
642 .finish_cleanup
= xfs_rmap_finish_one_cleanup
,
643 .cancel_item
= xfs_rmap_update_cancel_item
,
644 .recover_work
= xfs_rmap_recover_work
,
645 .relog_intent
= xfs_rmap_relog_intent
,
649 static struct xfs_log_item
*
650 xfs_rtrmap_update_create_intent(
651 struct xfs_trans
*tp
,
652 struct list_head
*items
,
656 return __xfs_rmap_update_create_intent(tp
, items
, count
, sort
,
660 /* Clean up after calling xfs_rmap_finish_one. */
662 xfs_rtrmap_finish_one_cleanup(
663 struct xfs_trans
*tp
,
664 struct xfs_btree_cur
*rcur
,
668 xfs_btree_del_cursor(rcur
, error
);
671 const struct xfs_defer_op_type xfs_rtrmap_update_defer_type
= {
673 .max_items
= XFS_RUI_MAX_FAST_EXTENTS
,
674 .create_intent
= xfs_rtrmap_update_create_intent
,
675 .abort_intent
= xfs_rmap_update_abort_intent
,
676 .create_done
= xfs_rmap_update_create_done
,
677 .finish_item
= xfs_rmap_update_finish_item
,
678 .finish_cleanup
= xfs_rtrmap_finish_one_cleanup
,
679 .cancel_item
= xfs_rmap_update_cancel_item
,
680 .recover_work
= xfs_rmap_recover_work
,
681 .relog_intent
= xfs_rmap_relog_intent
,
684 const struct xfs_defer_op_type xfs_rtrmap_update_defer_type
= {
691 struct xfs_log_item
*lip
,
694 return RUI_ITEM(lip
)->rui_format
.rui_id
== intent_id
;
697 static const struct xfs_item_ops xfs_rui_item_ops
= {
698 .flags
= XFS_ITEM_INTENT
,
699 .iop_size
= xfs_rui_item_size
,
700 .iop_format
= xfs_rui_item_format
,
701 .iop_unpin
= xfs_rui_item_unpin
,
702 .iop_release
= xfs_rui_item_release
,
703 .iop_match
= xfs_rui_item_match
,
708 struct xfs_rui_log_format
*dst
,
709 const struct xfs_rui_log_format
*src
)
713 memcpy(dst
, src
, offsetof(struct xfs_rui_log_format
, rui_extents
));
715 for (i
= 0; i
< src
->rui_nextents
; i
++)
716 memcpy(&dst
->rui_extents
[i
], &src
->rui_extents
[i
],
717 sizeof(struct xfs_map_extent
));
721 * This routine is called to create an in-core extent rmap update
722 * item from the rui format structure which was logged on disk.
723 * It allocates an in-core rui, copies the extents from the format
724 * structure into it, and adds the rui to the AIL with the given
728 xlog_recover_rui_commit_pass2(
730 struct list_head
*buffer_list
,
731 struct xlog_recover_item
*item
,
734 struct xfs_mount
*mp
= log
->l_mp
;
735 struct xfs_rui_log_item
*ruip
;
736 struct xfs_rui_log_format
*rui_formatp
;
739 rui_formatp
= item
->ri_buf
[0].i_addr
;
741 if (item
->ri_buf
[0].i_len
< xfs_rui_log_format_sizeof(0)) {
742 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
743 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
744 return -EFSCORRUPTED
;
747 len
= xfs_rui_log_format_sizeof(rui_formatp
->rui_nextents
);
748 if (item
->ri_buf
[0].i_len
!= len
) {
749 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
750 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
751 return -EFSCORRUPTED
;
754 ruip
= xfs_rui_init(mp
, ITEM_TYPE(item
), rui_formatp
->rui_nextents
);
755 xfs_rui_copy_format(&ruip
->rui_format
, rui_formatp
);
756 atomic_set(&ruip
->rui_next_extent
, rui_formatp
->rui_nextents
);
758 xlog_recover_intent_item(log
, &ruip
->rui_item
, lsn
,
759 &xfs_rmap_update_defer_type
);
763 const struct xlog_recover_item_ops xlog_rui_item_ops
= {
764 .item_type
= XFS_LI_RUI
,
765 .commit_pass2
= xlog_recover_rui_commit_pass2
,
770 xlog_recover_rtrui_commit_pass2(
772 struct list_head
*buffer_list
,
773 struct xlog_recover_item
*item
,
776 struct xfs_mount
*mp
= log
->l_mp
;
777 struct xfs_rui_log_item
*ruip
;
778 struct xfs_rui_log_format
*rui_formatp
;
781 rui_formatp
= item
->ri_buf
[0].i_addr
;
783 if (item
->ri_buf
[0].i_len
< xfs_rui_log_format_sizeof(0)) {
784 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
785 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
786 return -EFSCORRUPTED
;
789 len
= xfs_rui_log_format_sizeof(rui_formatp
->rui_nextents
);
790 if (item
->ri_buf
[0].i_len
!= len
) {
791 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
792 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
793 return -EFSCORRUPTED
;
796 ruip
= xfs_rui_init(mp
, ITEM_TYPE(item
), rui_formatp
->rui_nextents
);
797 xfs_rui_copy_format(&ruip
->rui_format
, rui_formatp
);
798 atomic_set(&ruip
->rui_next_extent
, rui_formatp
->rui_nextents
);
800 xlog_recover_intent_item(log
, &ruip
->rui_item
, lsn
,
801 &xfs_rtrmap_update_defer_type
);
806 xlog_recover_rtrui_commit_pass2(
808 struct list_head
*buffer_list
,
809 struct xlog_recover_item
*item
,
812 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
,
813 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
814 return -EFSCORRUPTED
;
818 const struct xlog_recover_item_ops xlog_rtrui_item_ops
= {
819 .item_type
= XFS_LI_RUI_RT
,
820 .commit_pass2
= xlog_recover_rtrui_commit_pass2
,
824 * This routine is called when an RUD format structure is found in a committed
825 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
826 * was still in the log. To do this it searches the AIL for the RUI with an id
827 * equal to that in the RUD format structure. If we find it we drop the RUD
828 * reference, which removes the RUI from the AIL and frees it.
831 xlog_recover_rud_commit_pass2(
833 struct list_head
*buffer_list
,
834 struct xlog_recover_item
*item
,
837 struct xfs_rud_log_format
*rud_formatp
;
839 rud_formatp
= item
->ri_buf
[0].i_addr
;
840 if (item
->ri_buf
[0].i_len
!= sizeof(struct xfs_rud_log_format
)) {
841 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
,
842 rud_formatp
, item
->ri_buf
[0].i_len
);
843 return -EFSCORRUPTED
;
846 xlog_recover_release_intent(log
, XFS_LI_RUI
, rud_formatp
->rud_rui_id
);
850 const struct xlog_recover_item_ops xlog_rud_item_ops
= {
851 .item_type
= XFS_LI_RUD
,
852 .commit_pass2
= xlog_recover_rud_commit_pass2
,
857 xlog_recover_rtrud_commit_pass2(
859 struct list_head
*buffer_list
,
860 struct xlog_recover_item
*item
,
863 struct xfs_rud_log_format
*rud_formatp
;
865 rud_formatp
= item
->ri_buf
[0].i_addr
;
866 if (item
->ri_buf
[0].i_len
!= sizeof(struct xfs_rud_log_format
)) {
867 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
,
868 rud_formatp
, item
->ri_buf
[0].i_len
);
869 return -EFSCORRUPTED
;
872 xlog_recover_release_intent(log
, XFS_LI_RUI_RT
,
873 rud_formatp
->rud_rui_id
);
877 # define xlog_recover_rtrud_commit_pass2 xlog_recover_rtrui_commit_pass2
880 const struct xlog_recover_item_ops xlog_rtrud_item_ops
= {
881 .item_type
= XFS_LI_RUD_RT
,
882 .commit_pass2
= xlog_recover_rtrud_commit_pass2
,