1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_btree_staging.h"
16 #include "xfs_log_format.h"
17 #include "xfs_trans.h"
19 #include "xfs_inode.h"
20 #include "xfs_inode_fork.h"
21 #include "xfs_alloc.h"
22 #include "xfs_rtalloc.h"
24 #include "xfs_bmap_util.h"
25 #include "xfs_bmap_btree.h"
27 #include "xfs_rmap_btree.h"
28 #include "xfs_refcount.h"
29 #include "xfs_quota.h"
30 #include "xfs_ialloc.h"
32 #include "xfs_reflink.h"
33 #include "scrub/xfs_scrub.h"
34 #include "scrub/scrub.h"
35 #include "scrub/common.h"
36 #include "scrub/btree.h"
37 #include "scrub/trace.h"
38 #include "scrub/repair.h"
39 #include "scrub/bitmap.h"
40 #include "scrub/fsb_bitmap.h"
41 #include "scrub/xfile.h"
42 #include "scrub/xfarray.h"
43 #include "scrub/newbt.h"
44 #include "scrub/reap.h"
47 * Inode Fork Block Mapping (BMBT) Repair
48 * ======================================
50 * Gather all the rmap records for the inode and fork we're fixing, reset the
51 * incore fork, then recreate the btree.
54 enum reflink_scan_state
{
55 RLS_IRRELEVANT
= -1, /* not applicable to this file */
56 RLS_UNKNOWN
, /* shared extent scans required */
57 RLS_SET_IFLAG
, /* iflag must be set */
62 struct xfsb_bitmap old_bmbt_blocks
;
65 struct xrep_newbt new_bmapbt
;
67 /* List of new bmap records. */
68 struct xfarray
*bmap_records
;
72 /* How many blocks did we find allocated to this file? */
73 xfs_rfsblock_t nblocks
;
75 /* How many bmbt blocks did we find for this fork? */
76 xfs_rfsblock_t old_bmbt_block_count
;
78 /* get_records()'s position in the free space record array. */
79 xfarray_idx_t array_cur
;
81 /* How many real (non-hole, non-delalloc) mappings do we have? */
82 uint64_t real_mappings
;
84 /* Which fork are we fixing? */
87 /* What d the REFLINK flag be set when the repair is over? */
88 enum reflink_scan_state reflink_scan
;
90 /* Do we allow unwritten extents? */
94 /* Is this space extent shared? Flag the inode if it is. */
96 xrep_bmap_discover_shared(
98 xfs_fsblock_t startblock
,
99 xfs_filblks_t blockcount
)
101 struct xfs_scrub
*sc
= rb
->sc
;
107 agbno
= XFS_FSB_TO_AGBNO(sc
->mp
, startblock
);
108 error
= xfs_refcount_find_shared(sc
->sa
.refc_cur
, agbno
, blockcount
,
109 &fbno
, &flen
, false);
113 if (fbno
!= NULLAGBLOCK
)
114 rb
->reflink_scan
= RLS_SET_IFLAG
;
119 /* Remember this reverse-mapping as a series of bmap records. */
122 struct xrep_bmap
*rb
,
123 xfs_fileoff_t startoff
,
124 xfs_fsblock_t startblock
,
125 xfs_filblks_t blockcount
,
128 struct xfs_bmbt_irec irec
= {
129 .br_startoff
= startoff
,
130 .br_startblock
= startblock
,
131 .br_state
= unwritten
? XFS_EXT_UNWRITTEN
: XFS_EXT_NORM
,
133 struct xfs_bmbt_rec rbe
;
134 struct xfs_scrub
*sc
= rb
->sc
;
138 * If we're repairing the data fork of a non-reflinked regular file on
139 * a reflink filesystem, we need to figure out if this space extent is
142 if (rb
->reflink_scan
== RLS_UNKNOWN
&& !unwritten
) {
143 error
= xrep_bmap_discover_shared(rb
, startblock
, blockcount
);
151 irec
.br_blockcount
= min_t(xfs_filblks_t
, blockcount
,
152 XFS_MAX_BMBT_EXTLEN
);
154 fa
= xfs_bmap_validate_extent(sc
->ip
, rb
->whichfork
, &irec
);
156 return -EFSCORRUPTED
;
158 xfs_bmbt_disk_set_all(&rbe
, &irec
);
160 trace_xrep_bmap_found(sc
->ip
, rb
->whichfork
, &irec
);
162 if (xchk_should_terminate(sc
, &error
))
165 error
= xfarray_append(rb
->bmap_records
, &rbe
);
171 irec
.br_startblock
+= irec
.br_blockcount
;
172 irec
.br_startoff
+= irec
.br_blockcount
;
173 blockcount
-= irec
.br_blockcount
;
174 } while (blockcount
> 0);
179 /* Check for any obvious errors or conflicts in the file mapping. */
181 xrep_bmap_check_fork_rmap(
182 struct xrep_bmap
*rb
,
183 struct xfs_btree_cur
*cur
,
184 const struct xfs_rmap_irec
*rec
)
186 struct xfs_scrub
*sc
= rb
->sc
;
187 enum xbtree_recpacking outcome
;
191 * Data extents for rt files are never stored on the data device, but
192 * everything else (xattrs, bmbt blocks) can be.
194 if (XFS_IS_REALTIME_INODE(sc
->ip
) &&
195 !(rec
->rm_flags
& (XFS_RMAP_ATTR_FORK
| XFS_RMAP_BMBT_BLOCK
)))
196 return -EFSCORRUPTED
;
198 /* Check that this is within the AG. */
199 if (!xfs_verify_agbext(to_perag(cur
->bc_group
), rec
->rm_startblock
,
201 return -EFSCORRUPTED
;
203 /* Check the file offset range. */
204 if (!(rec
->rm_flags
& XFS_RMAP_BMBT_BLOCK
) &&
205 !xfs_verify_fileext(sc
->mp
, rec
->rm_offset
, rec
->rm_blockcount
))
206 return -EFSCORRUPTED
;
208 /* No contradictory flags. */
209 if ((rec
->rm_flags
& (XFS_RMAP_ATTR_FORK
| XFS_RMAP_BMBT_BLOCK
)) &&
210 (rec
->rm_flags
& XFS_RMAP_UNWRITTEN
))
211 return -EFSCORRUPTED
;
213 /* Make sure this isn't free space. */
214 error
= xfs_alloc_has_records(sc
->sa
.bno_cur
, rec
->rm_startblock
,
215 rec
->rm_blockcount
, &outcome
);
218 if (outcome
!= XBTREE_RECPACKING_EMPTY
)
219 return -EFSCORRUPTED
;
221 /* Must not be an inode chunk. */
222 error
= xfs_ialloc_has_inodes_at_extent(sc
->sa
.ino_cur
,
223 rec
->rm_startblock
, rec
->rm_blockcount
, &outcome
);
226 if (outcome
!= XBTREE_RECPACKING_EMPTY
)
227 return -EFSCORRUPTED
;
232 /* Record extents that belong to this inode's fork. */
235 struct xfs_btree_cur
*cur
,
236 const struct xfs_rmap_irec
*rec
,
239 struct xrep_bmap
*rb
= priv
;
243 if (xchk_should_terminate(rb
->sc
, &error
))
246 if (rec
->rm_owner
!= rb
->sc
->ip
->i_ino
)
249 error
= xrep_bmap_check_fork_rmap(rb
, cur
, rec
);
254 * Record all blocks allocated to this file even if the extent isn't
255 * for the fork we're rebuilding so that we can reset di_nblocks later.
257 rb
->nblocks
+= rec
->rm_blockcount
;
259 /* If this rmap isn't for the fork we want, we're done. */
260 if (rb
->whichfork
== XFS_DATA_FORK
&&
261 (rec
->rm_flags
& XFS_RMAP_ATTR_FORK
))
263 if (rb
->whichfork
== XFS_ATTR_FORK
&&
264 !(rec
->rm_flags
& XFS_RMAP_ATTR_FORK
))
267 /* Reject unwritten extents if we don't allow those. */
268 if ((rec
->rm_flags
& XFS_RMAP_UNWRITTEN
) && !rb
->allow_unwritten
)
269 return -EFSCORRUPTED
;
271 fsbno
= xfs_agbno_to_fsb(to_perag(cur
->bc_group
), rec
->rm_startblock
);
273 if (rec
->rm_flags
& XFS_RMAP_BMBT_BLOCK
) {
274 rb
->old_bmbt_block_count
+= rec
->rm_blockcount
;
275 return xfsb_bitmap_set(&rb
->old_bmbt_blocks
, fsbno
,
279 return xrep_bmap_from_rmap(rb
, rec
->rm_offset
, fsbno
,
281 rec
->rm_flags
& XFS_RMAP_UNWRITTEN
);
285 * Compare two block mapping records. We want to sort in order of increasing
289 xrep_bmap_extent_cmp(
293 const struct xfs_bmbt_rec
*ba
= a
;
294 const struct xfs_bmbt_rec
*bb
= b
;
295 xfs_fileoff_t ao
= xfs_bmbt_disk_get_startoff(ba
);
296 xfs_fileoff_t bo
= xfs_bmbt_disk_get_startoff(bb
);
306 * Sort the bmap extents by fork offset or else the records will be in the
307 * wrong order. Ensure there are no overlaps in the file offset ranges.
310 xrep_bmap_sort_records(
311 struct xrep_bmap
*rb
)
313 struct xfs_bmbt_irec irec
;
314 xfs_fileoff_t next_off
= 0;
315 xfarray_idx_t array_cur
;
318 error
= xfarray_sort(rb
->bmap_records
, xrep_bmap_extent_cmp
,
319 XFARRAY_SORT_KILLABLE
);
323 foreach_xfarray_idx(rb
->bmap_records
, array_cur
) {
324 struct xfs_bmbt_rec rec
;
326 if (xchk_should_terminate(rb
->sc
, &error
))
329 error
= xfarray_load(rb
->bmap_records
, array_cur
, &rec
);
333 xfs_bmbt_disk_get_all(&rec
, &irec
);
335 if (irec
.br_startoff
< next_off
)
336 return -EFSCORRUPTED
;
338 next_off
= irec
.br_startoff
+ irec
.br_blockcount
;
344 /* Scan one AG for reverse mappings that we can turn into extent maps. */
347 struct xrep_bmap
*rb
,
348 struct xfs_perag
*pag
)
350 struct xfs_scrub
*sc
= rb
->sc
;
353 error
= xrep_ag_init(sc
, pag
, &sc
->sa
);
357 error
= xfs_rmap_query_all(sc
->sa
.rmap_cur
, xrep_bmap_walk_rmap
, rb
);
358 xchk_ag_free(sc
, &sc
->sa
);
362 /* Find the delalloc extents from the old incore extent tree. */
364 xrep_bmap_find_delalloc(
365 struct xrep_bmap
*rb
)
367 struct xfs_bmbt_irec irec
;
368 struct xfs_iext_cursor icur
;
369 struct xfs_bmbt_rec rbe
;
370 struct xfs_inode
*ip
= rb
->sc
->ip
;
371 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, rb
->whichfork
);
375 * Skip this scan if we don't expect to find delayed allocation
376 * reservations in this fork.
378 if (rb
->whichfork
== XFS_ATTR_FORK
|| ip
->i_delayed_blks
== 0)
381 for_each_xfs_iext(ifp
, &icur
, &irec
) {
382 if (!isnullstartblock(irec
.br_startblock
))
385 xfs_bmbt_disk_set_all(&rbe
, &irec
);
387 trace_xrep_bmap_found(ip
, rb
->whichfork
, &irec
);
389 if (xchk_should_terminate(rb
->sc
, &error
))
392 error
= xfarray_append(rb
->bmap_records
, &rbe
);
401 * Collect block mappings for this fork of this inode and decide if we have
402 * enough space to rebuild. Caller is responsible for cleaning up the list if
403 * anything goes wrong.
406 xrep_bmap_find_mappings(
407 struct xrep_bmap
*rb
)
409 struct xfs_scrub
*sc
= rb
->sc
;
410 struct xfs_perag
*pag
= NULL
;
413 /* Iterate the rmaps for extents. */
414 while ((pag
= xfs_perag_next(sc
->mp
, pag
))) {
415 error
= xrep_bmap_scan_ag(rb
, pag
);
422 return xrep_bmap_find_delalloc(rb
);
425 /* Retrieve real extent mappings for bulk loading the bmap btree. */
427 xrep_bmap_get_records(
428 struct xfs_btree_cur
*cur
,
430 struct xfs_btree_block
*block
,
431 unsigned int nr_wanted
,
434 struct xfs_bmbt_rec rec
;
435 struct xfs_bmbt_irec
*irec
= &cur
->bc_rec
.b
;
436 struct xrep_bmap
*rb
= priv
;
437 union xfs_btree_rec
*block_rec
;
441 for (loaded
= 0; loaded
< nr_wanted
; loaded
++, idx
++) {
443 error
= xfarray_load(rb
->bmap_records
, rb
->array_cur
++,
448 xfs_bmbt_disk_get_all(&rec
, irec
);
449 } while (isnullstartblock(irec
->br_startblock
));
451 block_rec
= xfs_btree_rec_addr(cur
, idx
, block
);
452 cur
->bc_ops
->init_rec_from_cur(cur
, block_rec
);
458 /* Feed one of the new btree blocks to the bulk loader. */
460 xrep_bmap_claim_block(
461 struct xfs_btree_cur
*cur
,
462 union xfs_btree_ptr
*ptr
,
465 struct xrep_bmap
*rb
= priv
;
467 return xrep_newbt_claim_block(cur
, &rb
->new_bmapbt
, ptr
);
470 /* Figure out how much space we need to create the incore btree root block. */
472 xrep_bmap_iroot_size(
473 struct xfs_btree_cur
*cur
,
475 unsigned int nr_this_level
,
480 return xfs_bmap_broot_space_calc(cur
->bc_mp
, nr_this_level
);
483 /* Update the inode counters. */
485 xrep_bmap_reset_counters(
486 struct xrep_bmap
*rb
)
488 struct xfs_scrub
*sc
= rb
->sc
;
489 struct xbtree_ifakeroot
*ifake
= &rb
->new_bmapbt
.ifake
;
492 if (rb
->reflink_scan
== RLS_SET_IFLAG
)
493 sc
->ip
->i_diflags2
|= XFS_DIFLAG2_REFLINK
;
496 * Update the inode block counts to reflect the extents we found in the
499 delta
= ifake
->if_blocks
- rb
->old_bmbt_block_count
;
500 sc
->ip
->i_nblocks
= rb
->nblocks
+ delta
;
501 xfs_trans_log_inode(sc
->tp
, sc
->ip
, XFS_ILOG_CORE
);
504 * Adjust the quota counts by the difference in size between the old
507 xfs_trans_mod_dquot_byino(sc
->tp
, sc
->ip
, XFS_TRANS_DQ_BCOUNT
, delta
);
512 * Create a new iext tree and load it with block mappings. If the inode is
513 * in extents format, that's all we need to do to commit the new mappings.
514 * If it is in btree format, this takes care of preloading the incore tree.
517 xrep_bmap_extents_load(
518 struct xrep_bmap
*rb
)
520 struct xfs_iext_cursor icur
;
521 struct xfs_bmbt_irec irec
;
522 struct xfs_ifork
*ifp
= rb
->new_bmapbt
.ifake
.if_fork
;
523 xfarray_idx_t array_cur
;
526 ASSERT(ifp
->if_bytes
== 0);
528 /* Add all the mappings (incl. delalloc) to the incore extent tree. */
529 xfs_iext_first(ifp
, &icur
);
530 foreach_xfarray_idx(rb
->bmap_records
, array_cur
) {
531 struct xfs_bmbt_rec rec
;
533 error
= xfarray_load(rb
->bmap_records
, array_cur
, &rec
);
537 xfs_bmbt_disk_get_all(&rec
, &irec
);
539 xfs_iext_insert_raw(ifp
, &icur
, &irec
);
540 if (!isnullstartblock(irec
.br_startblock
))
543 xfs_iext_next(ifp
, &icur
);
546 return xrep_ino_ensure_extent_count(rb
->sc
, rb
->whichfork
,
551 * Reserve new btree blocks, bulk load the bmap records into the ondisk btree,
552 * and load the incore extent tree.
555 xrep_bmap_btree_load(
556 struct xrep_bmap
*rb
,
557 struct xfs_btree_cur
*bmap_cur
)
559 struct xfs_scrub
*sc
= rb
->sc
;
562 /* Compute how many blocks we'll need. */
563 error
= xfs_btree_bload_compute_geometry(bmap_cur
,
564 &rb
->new_bmapbt
.bload
, rb
->real_mappings
);
568 /* Last chance to abort before we start committing fixes. */
569 if (xchk_should_terminate(sc
, &error
))
573 * Guess how many blocks we're going to need to rebuild an entire bmap
574 * from the number of extents we found, and pump up our transaction to
575 * have sufficient block reservation. We're allowed to exceed file
576 * quota to repair inconsistent metadata.
578 error
= xfs_trans_reserve_more_inode(sc
->tp
, sc
->ip
,
579 rb
->new_bmapbt
.bload
.nr_blocks
, 0, true);
583 /* Reserve the space we'll need for the new btree. */
584 error
= xrep_newbt_alloc_blocks(&rb
->new_bmapbt
,
585 rb
->new_bmapbt
.bload
.nr_blocks
);
589 /* Add all observed bmap records. */
590 rb
->array_cur
= XFARRAY_CURSOR_INIT
;
591 error
= xfs_btree_bload(bmap_cur
, &rb
->new_bmapbt
.bload
, rb
);
596 * Load the new bmap records into the new incore extent tree to
597 * preserve delalloc reservations for regular files. The directory
598 * code loads the extent tree during xfs_dir_open and assumes
599 * thereafter that it remains loaded, so we must not violate that
602 return xrep_bmap_extents_load(rb
);
606 * Use the collected bmap information to stage a new bmap fork. If this is
607 * successful we'll return with the new fork information logged to the repair
608 * transaction but not yet committed. The caller must ensure that the inode
609 * is joined to the transaction; the inode will be joined to a clean
610 * transaction when the function returns.
613 xrep_bmap_build_new_fork(
614 struct xrep_bmap
*rb
)
616 struct xfs_owner_info oinfo
;
617 struct xfs_scrub
*sc
= rb
->sc
;
618 struct xfs_btree_cur
*bmap_cur
;
619 struct xbtree_ifakeroot
*ifake
= &rb
->new_bmapbt
.ifake
;
622 error
= xrep_bmap_sort_records(rb
);
627 * Prepare to construct the new fork by initializing the new btree
628 * structure and creating a fake ifork in the ifakeroot structure.
630 xfs_rmap_ino_bmbt_owner(&oinfo
, sc
->ip
->i_ino
, rb
->whichfork
);
631 error
= xrep_newbt_init_inode(&rb
->new_bmapbt
, sc
, rb
->whichfork
,
636 rb
->new_bmapbt
.bload
.get_records
= xrep_bmap_get_records
;
637 rb
->new_bmapbt
.bload
.claim_block
= xrep_bmap_claim_block
;
638 rb
->new_bmapbt
.bload
.iroot_size
= xrep_bmap_iroot_size
;
641 * Allocate a new bmap btree cursor for reloading an inode block mapping
644 bmap_cur
= xfs_bmbt_init_cursor(sc
->mp
, NULL
, sc
->ip
, XFS_STAGING_FORK
);
645 xfs_btree_stage_ifakeroot(bmap_cur
, ifake
);
648 * Figure out the size and format of the new fork, then fill it with
649 * all the bmap records we've found. Join the inode to the transaction
650 * so that we can roll the transaction while holding the inode locked.
652 if (rb
->real_mappings
<= XFS_IFORK_MAXEXT(sc
->ip
, rb
->whichfork
)) {
653 ifake
->if_fork
->if_format
= XFS_DINODE_FMT_EXTENTS
;
654 error
= xrep_bmap_extents_load(rb
);
656 ifake
->if_fork
->if_format
= XFS_DINODE_FMT_BTREE
;
657 error
= xrep_bmap_btree_load(rb
, bmap_cur
);
663 * Install the new fork in the inode. After this point the old mapping
664 * data are no longer accessible and the new tree is live. We delete
665 * the cursor immediately after committing the staged root because the
666 * staged fork might be in extents format.
668 xfs_bmbt_commit_staged_btree(bmap_cur
, sc
->tp
, rb
->whichfork
);
669 xfs_btree_del_cursor(bmap_cur
, 0);
671 /* Reset the inode counters now that we've changed the fork. */
672 error
= xrep_bmap_reset_counters(rb
);
676 /* Dispose of any unused blocks and the accounting information. */
677 error
= xrep_newbt_commit(&rb
->new_bmapbt
);
681 return xrep_roll_trans(sc
);
685 xfs_btree_del_cursor(bmap_cur
, error
);
687 xrep_newbt_cancel(&rb
->new_bmapbt
);
692 * Now that we've logged the new inode btree, invalidate all of the old blocks
693 * and free them, if there were any.
696 xrep_bmap_remove_old_tree(
697 struct xrep_bmap
*rb
)
699 struct xfs_scrub
*sc
= rb
->sc
;
700 struct xfs_owner_info oinfo
;
702 /* Free the old bmbt blocks if they're not in use. */
703 xfs_rmap_ino_bmbt_owner(&oinfo
, sc
->ip
->i_ino
, rb
->whichfork
);
704 return xrep_reap_fsblocks(sc
, &rb
->old_bmbt_blocks
, &oinfo
);
707 /* Check for garbage inputs. Returns -ECANCELED if there's nothing to do. */
709 xrep_bmap_check_inputs(
710 struct xfs_scrub
*sc
,
713 struct xfs_ifork
*ifp
= xfs_ifork_ptr(sc
->ip
, whichfork
);
715 ASSERT(whichfork
== XFS_DATA_FORK
|| whichfork
== XFS_ATTR_FORK
);
717 if (!xfs_has_rmapbt(sc
->mp
))
720 /* No fork means nothing to rebuild. */
725 * We only know how to repair extent mappings, which is to say that we
726 * only support extents and btree fork format. Repairs to a local
727 * format fork require a higher level repair function, so we do not
728 * have any work to do here.
730 switch (ifp
->if_format
) {
731 case XFS_DINODE_FMT_DEV
:
732 case XFS_DINODE_FMT_LOCAL
:
733 case XFS_DINODE_FMT_UUID
:
735 case XFS_DINODE_FMT_EXTENTS
:
736 case XFS_DINODE_FMT_BTREE
:
739 return -EFSCORRUPTED
;
742 if (whichfork
== XFS_ATTR_FORK
)
745 /* Only files, symlinks, and directories get to have data forks. */
746 switch (VFS_I(sc
->ip
)->i_mode
& S_IFMT
) {
756 /* Don't know how to rebuild realtime data forks. */
757 if (XFS_IS_REALTIME_INODE(sc
->ip
))
763 /* Set up the initial state of the reflink scan. */
764 static inline enum reflink_scan_state
765 xrep_bmap_init_reflink_scan(
766 struct xfs_scrub
*sc
,
769 /* cannot share on non-reflink filesystem */
770 if (!xfs_has_reflink(sc
->mp
))
771 return RLS_IRRELEVANT
;
773 /* preserve flag if it's already set */
774 if (xfs_is_reflink_inode(sc
->ip
))
775 return RLS_SET_IFLAG
;
777 /* can only share regular files */
778 if (!S_ISREG(VFS_I(sc
->ip
)->i_mode
))
779 return RLS_IRRELEVANT
;
781 /* cannot share attr fork extents */
782 if (whichfork
!= XFS_DATA_FORK
)
783 return RLS_IRRELEVANT
;
785 /* cannot share realtime extents */
786 if (XFS_IS_REALTIME_INODE(sc
->ip
))
787 return RLS_IRRELEVANT
;
792 /* Repair an inode fork. */
795 struct xfs_scrub
*sc
,
797 bool allow_unwritten
)
799 struct xrep_bmap
*rb
;
801 xfs_extnum_t max_bmbt_recs
;
805 error
= xrep_bmap_check_inputs(sc
, whichfork
);
806 if (error
== -ECANCELED
)
811 rb
= kzalloc(sizeof(struct xrep_bmap
), XCHK_GFP_FLAGS
);
815 rb
->whichfork
= whichfork
;
816 rb
->reflink_scan
= xrep_bmap_init_reflink_scan(sc
, whichfork
);
817 rb
->allow_unwritten
= allow_unwritten
;
819 /* Set up enough storage to handle the max records for this fork. */
820 large_extcount
= xfs_has_large_extent_counts(sc
->mp
);
821 max_bmbt_recs
= xfs_iext_max_nextents(large_extcount
, whichfork
);
822 descr
= xchk_xfile_ino_descr(sc
, "%s fork mapping records",
823 whichfork
== XFS_DATA_FORK
? "data" : "attr");
824 error
= xfarray_create(descr
, max_bmbt_recs
,
825 sizeof(struct xfs_bmbt_rec
), &rb
->bmap_records
);
830 /* Collect all reverse mappings for this fork's extents. */
831 xfsb_bitmap_init(&rb
->old_bmbt_blocks
);
832 error
= xrep_bmap_find_mappings(rb
);
836 xfs_trans_ijoin(sc
->tp
, sc
->ip
, 0);
838 /* Rebuild the bmap information. */
839 error
= xrep_bmap_build_new_fork(rb
);
843 /* Kill the old tree. */
844 error
= xrep_bmap_remove_old_tree(rb
);
849 xfsb_bitmap_destroy(&rb
->old_bmbt_blocks
);
850 xfarray_destroy(rb
->bmap_records
);
856 /* Repair an inode's data fork. */
859 struct xfs_scrub
*sc
)
861 return xrep_bmap(sc
, XFS_DATA_FORK
, true);
864 /* Repair an inode's attr fork. */
867 struct xfs_scrub
*sc
)
869 return xrep_bmap(sc
, XFS_ATTR_FORK
, false);