1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
18 #include "xfs_inode.h"
19 #include "xfs_icache.h"
20 #include "xfs_alloc.h"
21 #include "xfs_alloc_btree.h"
22 #include "xfs_ialloc.h"
23 #include "xfs_ialloc_btree.h"
25 #include "xfs_rmap_btree.h"
26 #include "xfs_refcount.h"
27 #include "xfs_refcount_btree.h"
28 #include "xfs_extent_busy.h"
29 #include "xfs_ag_resv.h"
30 #include "xfs_trans_space.h"
31 #include "xfs_quota.h"
33 #include "xfs_reflink.h"
34 #include "scrub/xfs_scrub.h"
35 #include "scrub/scrub.h"
36 #include "scrub/common.h"
37 #include "scrub/trace.h"
38 #include "scrub/repair.h"
39 #include "scrub/bitmap.h"
42 * Attempt to repair some metadata, if the metadata is corrupt and userspace
43 * told us to fix it. This function returns -EAGAIN to mean "re-run scrub",
44 * and will set *fixed to true if it thinks it repaired anything.
53 trace_xrep_attempt(ip
, sc
->sm
, error
);
55 xchk_ag_btcur_free(&sc
->sa
);
57 /* Repair whatever's broken. */
58 ASSERT(sc
->ops
->repair
);
59 error
= sc
->ops
->repair(sc
);
60 trace_xrep_done(ip
, sc
->sm
, error
);
64 * Repair succeeded. Commit the fixes and perform a second
65 * scrub so that we can tell userspace if we fixed the problem.
67 sc
->sm
->sm_flags
&= ~XFS_SCRUB_FLAGS_OUT
;
68 sc
->flags
|= XREP_ALREADY_FIXED
;
72 /* Tell the caller to try again having grabbed all the locks. */
73 if (!(sc
->flags
& XCHK_TRY_HARDER
)) {
74 sc
->flags
|= XCHK_TRY_HARDER
;
78 * We tried harder but still couldn't grab all the resources
79 * we needed to fix it. The corruption has not been fixed,
80 * so report back to userspace.
89 * Complain about unfixable problems in the filesystem. We don't log
90 * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
91 * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
92 * administrator isn't running xfs_scrub in no-repairs mode.
94 * Use this helper function because _ratelimited silently declares a static
95 * structure to track rate limiting information.
101 xfs_alert_ratelimited(mp
,
102 "Corruption not fixed during online repair. Unmount and run xfs_repair.");
106 * Repair probe -- userspace uses this to probe if we're willing to repair a
111 struct xfs_scrub
*sc
)
115 if (xchk_should_terminate(sc
, &error
))
122 * Roll a transaction, keeping the AG headers locked and reinitializing
127 struct xfs_scrub
*sc
)
131 /* Keep the AG header buffers locked so we can keep going. */
133 xfs_trans_bhold(sc
->tp
, sc
->sa
.agi_bp
);
135 xfs_trans_bhold(sc
->tp
, sc
->sa
.agf_bp
);
137 xfs_trans_bhold(sc
->tp
, sc
->sa
.agfl_bp
);
140 * Roll the transaction. We still own the buffer and the buffer lock
141 * regardless of whether or not the roll succeeds. If the roll fails,
142 * the buffers will be released during teardown on our way out of the
143 * kernel. If it succeeds, we join them to the new transaction and
146 error
= xfs_trans_roll(&sc
->tp
);
150 /* Join AG headers to the new transaction. */
152 xfs_trans_bjoin(sc
->tp
, sc
->sa
.agi_bp
);
154 xfs_trans_bjoin(sc
->tp
, sc
->sa
.agf_bp
);
156 xfs_trans_bjoin(sc
->tp
, sc
->sa
.agfl_bp
);
162 * Does the given AG have enough space to rebuild a btree? Neither AG
163 * reservation can be critical, and we must have enough space (factoring
164 * in AG reservations) to construct a whole btree.
168 struct xfs_perag
*pag
,
169 xfs_extlen_t nr_blocks
,
170 enum xfs_ag_resv_type type
)
172 return !xfs_ag_resv_critical(pag
, XFS_AG_RESV_RMAPBT
) &&
173 !xfs_ag_resv_critical(pag
, XFS_AG_RESV_METADATA
) &&
174 pag
->pagf_freeblks
> xfs_ag_resv_needed(pag
, type
) + nr_blocks
;
178 * Figure out how many blocks to reserve for an AG repair. We calculate the
179 * worst case estimate for the number of blocks we'd need to rebuild one of
180 * any type of per-AG btree.
183 xrep_calc_ag_resblks(
184 struct xfs_scrub
*sc
)
186 struct xfs_mount
*mp
= sc
->mp
;
187 struct xfs_scrub_metadata
*sm
= sc
->sm
;
188 struct xfs_perag
*pag
;
190 xfs_agino_t icount
= NULLAGINO
;
191 xfs_extlen_t aglen
= NULLAGBLOCK
;
192 xfs_extlen_t usedlen
;
193 xfs_extlen_t freelen
;
194 xfs_extlen_t bnobt_sz
;
195 xfs_extlen_t inobt_sz
;
196 xfs_extlen_t rmapbt_sz
;
197 xfs_extlen_t refcbt_sz
;
200 if (!(sm
->sm_flags
& XFS_SCRUB_IFLAG_REPAIR
))
203 pag
= xfs_perag_get(mp
, sm
->sm_agno
);
204 if (pag
->pagi_init
) {
205 /* Use in-core icount if possible. */
206 icount
= pag
->pagi_count
;
208 /* Try to get the actual counters from disk. */
209 error
= xfs_ialloc_read_agi(mp
, NULL
, sm
->sm_agno
, &bp
);
211 icount
= pag
->pagi_count
;
216 /* Now grab the block counters from the AGF. */
217 error
= xfs_alloc_read_agf(mp
, NULL
, sm
->sm_agno
, 0, &bp
);
219 aglen
= be32_to_cpu(XFS_BUF_TO_AGF(bp
)->agf_length
);
220 freelen
= be32_to_cpu(XFS_BUF_TO_AGF(bp
)->agf_freeblks
);
221 usedlen
= aglen
- freelen
;
226 /* If the icount is impossible, make some worst-case assumptions. */
227 if (icount
== NULLAGINO
||
228 !xfs_verify_agino(mp
, sm
->sm_agno
, icount
)) {
229 xfs_agino_t first
, last
;
231 xfs_agino_range(mp
, sm
->sm_agno
, &first
, &last
);
232 icount
= last
- first
+ 1;
235 /* If the block counts are impossible, make worst-case assumptions. */
236 if (aglen
== NULLAGBLOCK
||
237 aglen
!= xfs_ag_block_count(mp
, sm
->sm_agno
) ||
239 aglen
= xfs_ag_block_count(mp
, sm
->sm_agno
);
244 trace_xrep_calc_ag_resblks(mp
, sm
->sm_agno
, icount
, aglen
,
248 * Figure out how many blocks we'd need worst case to rebuild
249 * each type of btree. Note that we can only rebuild the
250 * bnobt/cntbt or inobt/finobt as pairs.
252 bnobt_sz
= 2 * xfs_allocbt_calc_size(mp
, freelen
);
253 if (xfs_sb_version_hassparseinodes(&mp
->m_sb
))
254 inobt_sz
= xfs_iallocbt_calc_size(mp
, icount
/
255 XFS_INODES_PER_HOLEMASK_BIT
);
257 inobt_sz
= xfs_iallocbt_calc_size(mp
, icount
/
258 XFS_INODES_PER_CHUNK
);
259 if (xfs_sb_version_hasfinobt(&mp
->m_sb
))
261 if (xfs_sb_version_hasreflink(&mp
->m_sb
))
262 refcbt_sz
= xfs_refcountbt_calc_size(mp
, usedlen
);
265 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
)) {
267 * Guess how many blocks we need to rebuild the rmapbt.
268 * For non-reflink filesystems we can't have more records than
269 * used blocks. However, with reflink it's possible to have
270 * more than one rmap record per AG block. We don't know how
271 * many rmaps there could be in the AG, so we start off with
272 * what we hope is an generous over-estimation.
274 if (xfs_sb_version_hasreflink(&mp
->m_sb
))
275 rmapbt_sz
= xfs_rmapbt_calc_size(mp
,
276 (unsigned long long)aglen
* 2);
278 rmapbt_sz
= xfs_rmapbt_calc_size(mp
, usedlen
);
283 trace_xrep_calc_ag_resblks_btsize(mp
, sm
->sm_agno
, bnobt_sz
,
284 inobt_sz
, rmapbt_sz
, refcbt_sz
);
286 return max(max(bnobt_sz
, inobt_sz
), max(rmapbt_sz
, refcbt_sz
));
289 /* Allocate a block in an AG. */
292 struct xfs_scrub
*sc
,
293 const struct xfs_owner_info
*oinfo
,
294 xfs_fsblock_t
*fsbno
,
295 enum xfs_ag_resv_type resv
)
297 struct xfs_alloc_arg args
= {0};
302 case XFS_AG_RESV_AGFL
:
303 case XFS_AG_RESV_RMAPBT
:
304 error
= xfs_alloc_get_freelist(sc
->tp
, sc
->sa
.agf_bp
, &bno
, 1);
307 if (bno
== NULLAGBLOCK
)
309 xfs_extent_busy_reuse(sc
->mp
, sc
->sa
.agno
, bno
,
311 *fsbno
= XFS_AGB_TO_FSB(sc
->mp
, sc
->sa
.agno
, bno
);
312 if (resv
== XFS_AG_RESV_RMAPBT
)
313 xfs_ag_resv_rmapbt_alloc(sc
->mp
, sc
->sa
.agno
);
322 args
.fsbno
= XFS_AGB_TO_FSB(args
.mp
, sc
->sa
.agno
, 0);
326 args
.type
= XFS_ALLOCTYPE_THIS_AG
;
329 error
= xfs_alloc_vextent(&args
);
332 if (args
.fsbno
== NULLFSBLOCK
)
334 ASSERT(args
.len
== 1);
340 /* Initialize a new AG btree root block with zero entries. */
343 struct xfs_scrub
*sc
,
345 struct xfs_buf
**bpp
,
347 const struct xfs_buf_ops
*ops
)
349 struct xfs_trans
*tp
= sc
->tp
;
350 struct xfs_mount
*mp
= sc
->mp
;
353 trace_xrep_init_btblock(mp
, XFS_FSB_TO_AGNO(mp
, fsb
),
354 XFS_FSB_TO_AGBNO(mp
, fsb
), btnum
);
356 ASSERT(XFS_FSB_TO_AGNO(mp
, fsb
) == sc
->sa
.agno
);
357 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
, XFS_FSB_TO_DADDR(mp
, fsb
),
358 XFS_FSB_TO_BB(mp
, 1), 0);
359 xfs_buf_zero(bp
, 0, BBTOB(bp
->b_length
));
360 xfs_btree_init_block(mp
, bp
, btnum
, 0, 0, sc
->sa
.agno
, 0);
361 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_BTREE_BUF
);
362 xfs_trans_log_buf(tp
, bp
, 0, bp
->b_length
);
370 * Reconstructing per-AG Btrees
372 * When a space btree is corrupt, we don't bother trying to fix it. Instead,
373 * we scan secondary space metadata to derive the records that should be in
374 * the damaged btree, initialize a fresh btree root, and insert the records.
375 * Note that for rebuilding the rmapbt we scan all the primary data to
376 * generate the new records.
378 * However, that leaves the matter of removing all the metadata describing the
379 * old broken structure. For primary metadata we use the rmap data to collect
380 * every extent with a matching rmap owner (bitmap); we then iterate all other
381 * metadata structures with the same rmap owner to collect the extents that
382 * cannot be removed (sublist). We then subtract sublist from bitmap to
383 * derive the blocks that were used by the old btree. These blocks can be
386 * For rmapbt reconstructions we must use different tactics for extent
387 * collection. First we iterate all primary metadata (this excludes the old
388 * rmapbt, obviously) to generate new rmap records. The gaps in the rmap
389 * records are collected as bitmap. The bnobt records are collected as
390 * sublist. As with the other btrees we subtract sublist from bitmap, and the
391 * result (since the rmapbt lives in the free space) are the blocks from the
394 * Disposal of Blocks from Old per-AG Btrees
396 * Now that we've constructed a new btree to replace the damaged one, we want
397 * to dispose of the blocks that (we think) the old btree was using.
398 * Previously, we used the rmapbt to collect the extents (bitmap) with the
399 * rmap owner corresponding to the tree we rebuilt, collected extents for any
400 * blocks with the same rmap owner that are owned by another data structure
401 * (sublist), and subtracted sublist from bitmap. In theory the extents
402 * remaining in bitmap are the old btree's blocks.
404 * Unfortunately, it's possible that the btree was crosslinked with other
405 * blocks on disk. The rmap data can tell us if there are multiple owners, so
406 * if the rmapbt says there is an owner of this block other than @oinfo, then
407 * the block is crosslinked. Remove the reverse mapping and continue.
409 * If there is one rmap record, we can free the block, which removes the
410 * reverse mapping but doesn't add the block to the free space. Our repair
411 * strategy is to hope the other metadata objects crosslinked on this block
412 * will be rebuilt (atop different blocks), thereby removing all the cross
415 * If there are no rmap records at all, we also free the block. If the btree
416 * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
417 * supposed to be a rmap record and everything is ok. For other btrees there
418 * had to have been an rmap entry for the block to have ended up on @bitmap,
419 * so if it's gone now there's something wrong and the fs will shut down.
421 * Note: If there are multiple rmap records with only the same rmap owner as
422 * the btree we're trying to rebuild and the block is indeed owned by another
423 * data structure with the same rmap owner, then the block will be in sublist
424 * and therefore doesn't need disposal. If there are multiple rmap records
425 * with only the same rmap owner but the block is not owned by something with
426 * the same rmap owner, the block will be freed.
428 * The caller is responsible for locking the AG headers for the entire rebuild
429 * operation so that nothing else can sneak in and change the AG state while
430 * we're not looking. We also assume that the caller already invalidated any
431 * buffers associated with @bitmap.
435 * Invalidate buffers for per-AG btree blocks we're dumping. This function
436 * is not intended for use with file data repairs; we have bunmapi for that.
439 xrep_invalidate_blocks(
440 struct xfs_scrub
*sc
,
441 struct xfs_bitmap
*bitmap
)
443 struct xfs_bitmap_range
*bmr
;
444 struct xfs_bitmap_range
*n
;
449 * For each block in each extent, see if there's an incore buffer for
450 * exactly that block; if so, invalidate it. The buffer cache only
451 * lets us look for one buffer at a time, so we have to look one block
452 * at a time. Avoid invalidating AG headers and post-EOFS blocks
453 * because we never own those; and if we can't TRYLOCK the buffer we
454 * assume it's owned by someone else.
456 for_each_xfs_bitmap_block(fsbno
, bmr
, n
, bitmap
) {
457 /* Skip AG headers and post-EOFS blocks */
458 if (!xfs_verify_fsbno(sc
->mp
, fsbno
))
460 bp
= xfs_buf_incore(sc
->mp
->m_ddev_targp
,
461 XFS_FSB_TO_DADDR(sc
->mp
, fsbno
),
462 XFS_FSB_TO_BB(sc
->mp
, 1), XBF_TRYLOCK
);
464 xfs_trans_bjoin(sc
->tp
, bp
);
465 xfs_trans_binval(sc
->tp
, bp
);
472 /* Ensure the freelist is the correct size. */
475 struct xfs_scrub
*sc
,
478 struct xfs_alloc_arg args
= {0};
482 args
.agno
= sc
->sa
.agno
;
484 args
.pag
= sc
->sa
.pag
;
486 return xfs_alloc_fix_freelist(&args
,
487 can_shrink
? 0 : XFS_ALLOC_FLAG_NOSHRINK
);
491 * Put a block back on the AGFL.
495 struct xfs_scrub
*sc
,
500 /* Make sure there's space on the freelist. */
501 error
= xrep_fix_freelist(sc
, true);
506 * Since we're "freeing" a lost block onto the AGFL, we have to
507 * create an rmap for the block prior to merging it or else other
510 error
= xfs_rmap_alloc(sc
->tp
, sc
->sa
.agf_bp
, sc
->sa
.agno
, agbno
, 1,
515 /* Put the block on the AGFL. */
516 error
= xfs_alloc_put_freelist(sc
->tp
, sc
->sa
.agf_bp
, sc
->sa
.agfl_bp
,
520 xfs_extent_busy_insert(sc
->tp
, sc
->sa
.agno
, agbno
, 1,
521 XFS_EXTENT_BUSY_SKIP_DISCARD
);
526 /* Dispose of a single block. */
529 struct xfs_scrub
*sc
,
531 const struct xfs_owner_info
*oinfo
,
532 enum xfs_ag_resv_type resv
)
534 struct xfs_btree_cur
*cur
;
535 struct xfs_buf
*agf_bp
= NULL
;
541 agno
= XFS_FSB_TO_AGNO(sc
->mp
, fsbno
);
542 agbno
= XFS_FSB_TO_AGBNO(sc
->mp
, fsbno
);
545 * If we are repairing per-inode metadata, we need to read in the AGF
546 * buffer. Otherwise, we're repairing a per-AG structure, so reuse
547 * the AGF buffer that the setup functions already grabbed.
550 error
= xfs_alloc_read_agf(sc
->mp
, sc
->tp
, agno
, 0, &agf_bp
);
556 agf_bp
= sc
->sa
.agf_bp
;
558 cur
= xfs_rmapbt_init_cursor(sc
->mp
, sc
->tp
, agf_bp
, agno
);
560 /* Can we find any other rmappings? */
561 error
= xfs_rmap_has_other_keys(cur
, agbno
, 1, oinfo
, &has_other_rmap
);
562 xfs_btree_del_cursor(cur
, error
);
567 * If there are other rmappings, this block is cross linked and must
568 * not be freed. Remove the reverse mapping and move on. Otherwise,
569 * we were the only owner of the block, so free the extent, which will
570 * also remove the rmap.
572 * XXX: XFS doesn't support detecting the case where a single block
573 * metadata structure is crosslinked with a multi-block structure
574 * because the buffer cache doesn't detect aliasing problems, so we
575 * can't fix 100% of crosslinking problems (yet). The verifiers will
576 * blow on writeout, the filesystem will shut down, and the admin gets
580 error
= xfs_rmap_free(sc
->tp
, agf_bp
, agno
, agbno
, 1, oinfo
);
581 else if (resv
== XFS_AG_RESV_AGFL
)
582 error
= xrep_put_freelist(sc
, agbno
);
584 error
= xfs_free_extent(sc
->tp
, fsbno
, 1, oinfo
, resv
);
585 if (agf_bp
!= sc
->sa
.agf_bp
)
586 xfs_trans_brelse(sc
->tp
, agf_bp
);
591 return xfs_trans_roll_inode(&sc
->tp
, sc
->ip
);
592 return xrep_roll_ag_trans(sc
);
595 if (agf_bp
!= sc
->sa
.agf_bp
)
596 xfs_trans_brelse(sc
->tp
, agf_bp
);
600 /* Dispose of every block of every extent in the bitmap. */
603 struct xfs_scrub
*sc
,
604 struct xfs_bitmap
*bitmap
,
605 const struct xfs_owner_info
*oinfo
,
606 enum xfs_ag_resv_type type
)
608 struct xfs_bitmap_range
*bmr
;
609 struct xfs_bitmap_range
*n
;
613 ASSERT(xfs_sb_version_hasrmapbt(&sc
->mp
->m_sb
));
615 for_each_xfs_bitmap_block(fsbno
, bmr
, n
, bitmap
) {
616 ASSERT(sc
->ip
!= NULL
||
617 XFS_FSB_TO_AGNO(sc
->mp
, fsbno
) == sc
->sa
.agno
);
618 trace_xrep_dispose_btree_extent(sc
->mp
,
619 XFS_FSB_TO_AGNO(sc
->mp
, fsbno
),
620 XFS_FSB_TO_AGBNO(sc
->mp
, fsbno
), 1);
622 error
= xrep_reap_block(sc
, fsbno
, oinfo
, type
);
628 xfs_bitmap_destroy(bitmap
);
633 * Finding per-AG Btree Roots for AGF/AGI Reconstruction
635 * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
636 * the AG headers by using the rmap data to rummage through the AG looking for
637 * btree roots. This is not guaranteed to work if the AG is heavily damaged
638 * or the rmap data are corrupt.
640 * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
641 * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
642 * AGI is being rebuilt. It must maintain these locks until it's safe for
643 * other threads to change the btrees' shapes. The caller provides
644 * information about the btrees to look for by passing in an array of
645 * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
646 * The (root, height) fields will be set on return if anything is found. The
647 * last element of the array should have a NULL buf_ops to mark the end of the
650 * For every rmapbt record matching any of the rmap owners in btree_info,
651 * read each block referenced by the rmap record. If the block is a btree
652 * block from this filesystem matching any of the magic numbers and has a
653 * level higher than what we've already seen, remember the block and the
654 * height of the tree required to have such a block. When the call completes,
655 * we return the highest block we've found for each btree description; those
656 * should be the roots.
659 struct xrep_findroot
{
660 struct xfs_scrub
*sc
;
661 struct xfs_buf
*agfl_bp
;
663 struct xrep_find_ag_btree
*btree_info
;
666 /* See if our block is in the AGFL. */
668 xrep_findroot_agfl_walk(
669 struct xfs_mount
*mp
,
673 xfs_agblock_t
*agbno
= priv
;
675 return (*agbno
== bno
) ? XFS_BTREE_QUERY_RANGE_ABORT
: 0;
678 /* Does this block match the btree information passed in? */
681 struct xrep_findroot
*ri
,
682 struct xrep_find_ag_btree
*fab
,
685 bool *done_with_block
)
687 struct xfs_mount
*mp
= ri
->sc
->mp
;
689 struct xfs_btree_block
*btblock
;
694 daddr
= XFS_AGB_TO_DADDR(mp
, ri
->sc
->sa
.agno
, agbno
);
697 * Blocks in the AGFL have stale contents that might just happen to
698 * have a matching magic and uuid. We don't want to pull these blocks
699 * in as part of a tree root, so we have to filter out the AGFL stuff
700 * here. If the AGFL looks insane we'll just refuse to repair.
702 if (owner
== XFS_RMAP_OWN_AG
) {
703 error
= xfs_agfl_walk(mp
, ri
->agf
, ri
->agfl_bp
,
704 xrep_findroot_agfl_walk
, &agbno
);
705 if (error
== XFS_BTREE_QUERY_RANGE_ABORT
)
712 * Read the buffer into memory so that we can see if it's a match for
713 * our btree type. We have no clue if it is beforehand, and we want to
714 * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
715 * will cause needless disk reads in subsequent calls to this function)
716 * and logging metadata verifier failures.
718 * Therefore, pass in NULL buffer ops. If the buffer was already in
719 * memory from some other caller it will already have b_ops assigned.
720 * If it was in memory from a previous unsuccessful findroot_block
721 * call, the buffer won't have b_ops but it should be clean and ready
722 * for us to try to verify if the read call succeeds. The same applies
723 * if the buffer wasn't in memory at all.
725 * Note: If we never match a btree type with this buffer, it will be
726 * left in memory with NULL b_ops. This shouldn't be a problem unless
727 * the buffer gets written.
729 error
= xfs_trans_read_buf(mp
, ri
->sc
->tp
, mp
->m_ddev_targp
, daddr
,
730 mp
->m_bsize
, 0, &bp
, NULL
);
734 /* Ensure the block magic matches the btree type we're looking for. */
735 btblock
= XFS_BUF_TO_BLOCK(bp
);
736 ASSERT(fab
->buf_ops
->magic
[1] != 0);
737 if (btblock
->bb_magic
!= fab
->buf_ops
->magic
[1])
741 * If the buffer already has ops applied and they're not the ones for
742 * this btree type, we know this block doesn't match the btree and we
745 * If the buffer ops match ours, someone else has already validated
746 * the block for us, so we can move on to checking if this is a root
749 * If the buffer does not have ops, nobody has successfully validated
750 * the contents and the buffer cannot be dirty. If the magic, uuid,
751 * and structure match this btree type then we'll move on to checking
752 * if it's a root block candidate. If there is no match, bail out.
755 if (bp
->b_ops
!= fab
->buf_ops
)
758 ASSERT(!xfs_trans_buf_is_dirty(bp
));
759 if (!uuid_equal(&btblock
->bb_u
.s
.bb_uuid
,
760 &mp
->m_sb
.sb_meta_uuid
))
763 * Read verifiers can reference b_ops, so we set the pointer
764 * here. If the verifier fails we'll reset the buffer state
765 * to what it was before we touched the buffer.
767 bp
->b_ops
= fab
->buf_ops
;
768 fab
->buf_ops
->verify_read(bp
);
776 * Some read verifiers will (re)set b_ops, so we must be
777 * careful not to change b_ops after running the verifier.
782 * This block passes the magic/uuid and verifier tests for this btree
783 * type. We don't need the caller to try the other tree types.
785 *done_with_block
= true;
788 * Compare this btree block's level to the height of the current
789 * candidate root block.
791 * If the level matches the root we found previously, throw away both
792 * blocks because there can't be two candidate roots.
794 * If level is lower in the tree than the root we found previously,
797 block_level
= xfs_btree_get_level(btblock
);
798 if (block_level
+ 1 == fab
->height
) {
799 fab
->root
= NULLAGBLOCK
;
801 } else if (block_level
< fab
->height
) {
806 * This is the highest block in the tree that we've found so far.
807 * Update the btree height to reflect what we've learned from this
810 fab
->height
= block_level
+ 1;
813 * If this block doesn't have sibling pointers, then it's the new root
814 * block candidate. Otherwise, the root will be found farther up the
817 if (btblock
->bb_u
.s
.bb_leftsib
== cpu_to_be32(NULLAGBLOCK
) &&
818 btblock
->bb_u
.s
.bb_rightsib
== cpu_to_be32(NULLAGBLOCK
))
821 fab
->root
= NULLAGBLOCK
;
823 trace_xrep_findroot_block(mp
, ri
->sc
->sa
.agno
, agbno
,
824 be32_to_cpu(btblock
->bb_magic
), fab
->height
- 1);
826 xfs_trans_brelse(ri
->sc
->tp
, bp
);
831 * Do any of the blocks in this rmap record match one of the btrees we're
836 struct xfs_btree_cur
*cur
,
837 struct xfs_rmap_irec
*rec
,
840 struct xrep_findroot
*ri
= priv
;
841 struct xrep_find_ag_btree
*fab
;
846 /* Ignore anything that isn't AG metadata. */
847 if (!XFS_RMAP_NON_INODE_OWNER(rec
->rm_owner
))
850 /* Otherwise scan each block + btree type. */
851 for (b
= 0; b
< rec
->rm_blockcount
; b
++) {
853 for (fab
= ri
->btree_info
; fab
->buf_ops
; fab
++) {
854 if (rec
->rm_owner
!= fab
->rmap_owner
)
856 error
= xrep_findroot_block(ri
, fab
,
857 rec
->rm_owner
, rec
->rm_startblock
+ b
,
869 /* Find the roots of the per-AG btrees described in btree_info. */
871 xrep_find_ag_btree_roots(
872 struct xfs_scrub
*sc
,
873 struct xfs_buf
*agf_bp
,
874 struct xrep_find_ag_btree
*btree_info
,
875 struct xfs_buf
*agfl_bp
)
877 struct xfs_mount
*mp
= sc
->mp
;
878 struct xrep_findroot ri
;
879 struct xrep_find_ag_btree
*fab
;
880 struct xfs_btree_cur
*cur
;
883 ASSERT(xfs_buf_islocked(agf_bp
));
884 ASSERT(agfl_bp
== NULL
|| xfs_buf_islocked(agfl_bp
));
887 ri
.btree_info
= btree_info
;
888 ri
.agf
= XFS_BUF_TO_AGF(agf_bp
);
889 ri
.agfl_bp
= agfl_bp
;
890 for (fab
= btree_info
; fab
->buf_ops
; fab
++) {
891 ASSERT(agfl_bp
|| fab
->rmap_owner
!= XFS_RMAP_OWN_AG
);
892 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab
->rmap_owner
));
893 fab
->root
= NULLAGBLOCK
;
897 cur
= xfs_rmapbt_init_cursor(mp
, sc
->tp
, agf_bp
, sc
->sa
.agno
);
898 error
= xfs_rmap_query_all(cur
, xrep_findroot_rmap
, &ri
);
899 xfs_btree_del_cursor(cur
, error
);
904 /* Force a quotacheck the next time we mount. */
906 xrep_force_quotacheck(
907 struct xfs_scrub
*sc
,
912 flag
= xfs_quota_chkd_flag(dqtype
);
913 if (!(flag
& sc
->mp
->m_qflags
))
916 sc
->mp
->m_qflags
&= ~flag
;
917 spin_lock(&sc
->mp
->m_sb_lock
);
918 sc
->mp
->m_sb
.sb_qflags
&= ~flag
;
919 spin_unlock(&sc
->mp
->m_sb_lock
);
924 * Attach dquots to this inode, or schedule quotacheck to fix them.
926 * This function ensures that the appropriate dquots are attached to an inode.
927 * We cannot allow the dquot code to allocate an on-disk dquot block here
928 * because we're already in transaction context with the inode locked. The
929 * on-disk dquot should already exist anyway. If the quota code signals
930 * corruption or missing quota information, schedule quotacheck, which will
931 * repair corruptions in the quota metadata.
935 struct xfs_scrub
*sc
)
939 error
= xfs_qm_dqattach_locked(sc
->ip
, false);
944 xfs_err_ratelimited(sc
->mp
,
945 "inode %llu repair encountered quota error %d, quotacheck forced.",
946 (unsigned long long)sc
->ip
->i_ino
, error
);
947 if (XFS_IS_UQUOTA_ON(sc
->mp
) && !sc
->ip
->i_udquot
)
948 xrep_force_quotacheck(sc
, XFS_DQ_USER
);
949 if (XFS_IS_GQUOTA_ON(sc
->mp
) && !sc
->ip
->i_gdquot
)
950 xrep_force_quotacheck(sc
, XFS_DQ_GROUP
);
951 if (XFS_IS_PQUOTA_ON(sc
->mp
) && !sc
->ip
->i_pdquot
)
952 xrep_force_quotacheck(sc
, XFS_DQ_PROJ
);