1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
18 #include "xfs_alloc.h"
20 #include "xfs_refcount.h"
21 #include "scrub/xfs_scrub.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/btree.h"
25 #include "scrub/trace.h"
28 * Set us up to scrub reference count btrees.
31 xchk_setup_ag_refcountbt(
35 return xchk_setup_ag_btree(sc
, ip
, false);
38 /* Reference count btree scrubber. */
41 * Confirming Reference Counts via Reverse Mappings
43 * We want to count the reverse mappings overlapping a refcount record
44 * (bno, len, refcount), allowing for the possibility that some of the
45 * overlap may come from smaller adjoining reverse mappings, while some
46 * comes from single extents which overlap the range entirely. The
47 * outer loop is as follows:
49 * 1. For all reverse mappings overlapping the refcount extent,
50 * a. If a given rmap completely overlaps, mark it as seen.
51 * b. Otherwise, record the fragment (in agbno order) for later
54 * Once we've seen all the rmaps, we know that for all blocks in the
55 * refcount record we want to find $refcount owners and we've already
56 * visited $seen extents that overlap all the blocks. Therefore, we
57 * need to find ($refcount - $seen) owners for every block in the
58 * extent; call that quantity $target_nr. Proceed as follows:
60 * 2. Pull the first $target_nr fragments from the list; all of them
61 * should start at or before the start of the extent.
62 * Call this subset of fragments the working set.
63 * 3. Until there are no more unprocessed fragments,
64 * a. Find the shortest fragments in the set and remove them.
65 * b. Note the block number of the end of these fragments.
66 * c. Pull the same number of fragments from the list. All of these
67 * fragments should start at the block number recorded in the
69 * d. Put those fragments in the set.
70 * 4. Check that there are $target_nr fragments remaining in the list,
71 * and that they all end at or beyond the end of the refcount extent.
73 * If the refcount is correct, all the check conditions in the algorithm
74 * should always hold true. If not, the refcount is incorrect.
76 struct xchk_refcnt_frag
{
77 struct list_head list
;
78 struct xfs_rmap_irec rm
;
81 struct xchk_refcnt_check
{
83 struct list_head fragments
;
85 /* refcount extent we're examining */
90 /* number of owners seen */
95 * Decide if the given rmap is large enough that we can redeem it
96 * towards refcount verification now, or if it's a fragment, in
97 * which case we'll hang onto it in the hopes that we'll later
98 * discover that we've collected exactly the correct number of
99 * fragments as the refcountbt says we should have.
102 xchk_refcountbt_rmap_check(
103 struct xfs_btree_cur
*cur
,
104 struct xfs_rmap_irec
*rec
,
107 struct xchk_refcnt_check
*refchk
= priv
;
108 struct xchk_refcnt_frag
*frag
;
109 xfs_agblock_t rm_last
;
110 xfs_agblock_t rc_last
;
113 if (xchk_should_terminate(refchk
->sc
, &error
))
116 rm_last
= rec
->rm_startblock
+ rec
->rm_blockcount
- 1;
117 rc_last
= refchk
->bno
+ refchk
->len
- 1;
119 /* Confirm that a single-owner refc extent is a CoW stage. */
120 if (refchk
->refcount
== 1 && rec
->rm_owner
!= XFS_RMAP_OWN_COW
) {
121 xchk_btree_xref_set_corrupt(refchk
->sc
, cur
, 0);
125 if (rec
->rm_startblock
<= refchk
->bno
&& rm_last
>= rc_last
) {
127 * The rmap overlaps the refcount record, so we can confirm
128 * one refcount owner seen.
133 * This rmap covers only part of the refcount record, so
134 * save the fragment for later processing. If the rmapbt
135 * is healthy each rmap_irec we see will be in agbno order
136 * so we don't need insertion sort here.
138 frag
= kmem_alloc(sizeof(struct xchk_refcnt_frag
),
142 memcpy(&frag
->rm
, rec
, sizeof(frag
->rm
));
143 list_add_tail(&frag
->list
, &refchk
->fragments
);
150 * Given a bunch of rmap fragments, iterate through them, keeping
151 * a running tally of the refcount. If this ever deviates from
152 * what we expect (which is the refcountbt's refcount minus the
153 * number of extents that totally covered the refcountbt extent),
154 * we have a refcountbt error.
157 xchk_refcountbt_process_rmap_fragments(
158 struct xchk_refcnt_check
*refchk
)
160 struct list_head worklist
;
161 struct xchk_refcnt_frag
*frag
;
162 struct xchk_refcnt_frag
*n
;
165 xfs_agblock_t next_rbno
;
167 xfs_nlink_t target_nr
;
169 target_nr
= refchk
->refcount
- refchk
->seen
;
174 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
175 * references we haven't found yet. Pull that many off the
176 * fragment list and figure out where the smallest rmap ends
177 * (and therefore the next rmap should start). All the rmaps
178 * we pull off should start at or before the beginning of the
179 * refcount record's range.
181 INIT_LIST_HEAD(&worklist
);
185 /* Make sure the fragments actually /are/ in agbno order. */
187 list_for_each_entry(frag
, &refchk
->fragments
, list
) {
188 if (frag
->rm
.rm_startblock
< bno
)
190 bno
= frag
->rm
.rm_startblock
;
194 * Find all the rmaps that start at or before the refc extent,
195 * and put them on the worklist.
197 list_for_each_entry_safe(frag
, n
, &refchk
->fragments
, list
) {
198 if (frag
->rm
.rm_startblock
> refchk
->bno
)
200 bno
= frag
->rm
.rm_startblock
+ frag
->rm
.rm_blockcount
;
203 list_move_tail(&frag
->list
, &worklist
);
210 * We should have found exactly $target_nr rmap fragments starting
211 * at or before the refcount extent.
216 while (!list_empty(&refchk
->fragments
)) {
217 /* Discard any fragments ending at rbno from the worklist. */
219 next_rbno
= NULLAGBLOCK
;
220 list_for_each_entry_safe(frag
, n
, &worklist
, list
) {
221 bno
= frag
->rm
.rm_startblock
+ frag
->rm
.rm_blockcount
;
227 list_del(&frag
->list
);
232 /* Try to add nr rmaps starting at rbno to the worklist. */
233 list_for_each_entry_safe(frag
, n
, &refchk
->fragments
, list
) {
234 bno
= frag
->rm
.rm_startblock
+ frag
->rm
.rm_blockcount
;
235 if (frag
->rm
.rm_startblock
!= rbno
)
237 list_move_tail(&frag
->list
, &worklist
);
246 * If we get here and nr > 0, this means that we added fewer
247 * items to the worklist than we discarded because the fragment
248 * list ran out of items. Therefore, we cannot maintain the
249 * required refcount. Something is wrong, so we're done.
258 * Make sure the last extent we processed ends at or beyond
259 * the end of the refcount extent.
261 if (rbno
< refchk
->bno
+ refchk
->len
)
264 /* Actually record us having seen the remaining refcount. */
265 refchk
->seen
= refchk
->refcount
;
267 /* Delete fragments and work list. */
268 list_for_each_entry_safe(frag
, n
, &worklist
, list
) {
269 list_del(&frag
->list
);
272 list_for_each_entry_safe(frag
, n
, &refchk
->fragments
, list
) {
273 list_del(&frag
->list
);
278 /* Use the rmap entries covering this extent to verify the refcount. */
280 xchk_refcountbt_xref_rmap(
281 struct xfs_scrub
*sc
,
284 xfs_nlink_t refcount
)
286 struct xchk_refcnt_check refchk
= {
290 .refcount
= refcount
,
293 struct xfs_rmap_irec low
;
294 struct xfs_rmap_irec high
;
295 struct xchk_refcnt_frag
*frag
;
296 struct xchk_refcnt_frag
*n
;
299 if (!sc
->sa
.rmap_cur
|| xchk_skip_xref(sc
->sm
))
302 /* Cross-reference with the rmapbt to confirm the refcount. */
303 memset(&low
, 0, sizeof(low
));
304 low
.rm_startblock
= bno
;
305 memset(&high
, 0xFF, sizeof(high
));
306 high
.rm_startblock
= bno
+ len
- 1;
308 INIT_LIST_HEAD(&refchk
.fragments
);
309 error
= xfs_rmap_query_range(sc
->sa
.rmap_cur
, &low
, &high
,
310 &xchk_refcountbt_rmap_check
, &refchk
);
311 if (!xchk_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
314 xchk_refcountbt_process_rmap_fragments(&refchk
);
315 if (refcount
!= refchk
.seen
)
316 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.rmap_cur
, 0);
319 list_for_each_entry_safe(frag
, n
, &refchk
.fragments
, list
) {
320 list_del(&frag
->list
);
325 /* Cross-reference with the other btrees. */
327 xchk_refcountbt_xref(
328 struct xfs_scrub
*sc
,
331 xfs_nlink_t refcount
)
333 if (sc
->sm
->sm_flags
& XFS_SCRUB_OFLAG_CORRUPT
)
336 xchk_xref_is_used_space(sc
, agbno
, len
);
337 xchk_xref_is_not_inode_chunk(sc
, agbno
, len
);
338 xchk_refcountbt_xref_rmap(sc
, agbno
, len
, refcount
);
341 /* Scrub a refcountbt record. */
344 struct xchk_btree
*bs
,
345 union xfs_btree_rec
*rec
)
347 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
348 xfs_agblock_t
*cow_blocks
= bs
->private;
349 xfs_agnumber_t agno
= bs
->cur
->bc_private
.a
.agno
;
352 xfs_nlink_t refcount
;
356 bno
= be32_to_cpu(rec
->refc
.rc_startblock
);
357 len
= be32_to_cpu(rec
->refc
.rc_blockcount
);
358 refcount
= be32_to_cpu(rec
->refc
.rc_refcount
);
360 /* Only CoW records can have refcount == 1. */
361 has_cowflag
= (bno
& XFS_REFC_COW_START
);
362 if ((refcount
== 1 && !has_cowflag
) || (refcount
!= 1 && has_cowflag
))
363 xchk_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
365 (*cow_blocks
) += len
;
367 /* Check the extent. */
368 bno
&= ~XFS_REFC_COW_START
;
369 if (bno
+ len
<= bno
||
370 !xfs_verify_agbno(mp
, agno
, bno
) ||
371 !xfs_verify_agbno(mp
, agno
, bno
+ len
- 1))
372 xchk_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
375 xchk_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
377 xchk_refcountbt_xref(bs
->sc
, bno
, len
, refcount
);
382 /* Make sure we have as many refc blocks as the rmap says. */
384 xchk_refcount_xref_rmap(
385 struct xfs_scrub
*sc
,
386 xfs_filblks_t cow_blocks
)
388 xfs_extlen_t refcbt_blocks
= 0;
389 xfs_filblks_t blocks
;
392 if (!sc
->sa
.rmap_cur
|| xchk_skip_xref(sc
->sm
))
395 /* Check that we saw as many refcbt blocks as the rmap knows about. */
396 error
= xfs_btree_count_blocks(sc
->sa
.refc_cur
, &refcbt_blocks
);
397 if (!xchk_btree_process_error(sc
, sc
->sa
.refc_cur
, 0, &error
))
399 error
= xchk_count_rmap_ownedby_ag(sc
, sc
->sa
.rmap_cur
,
400 &XFS_RMAP_OINFO_REFC
, &blocks
);
401 if (!xchk_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
403 if (blocks
!= refcbt_blocks
)
404 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.rmap_cur
, 0);
406 /* Check that we saw as many cow blocks as the rmap knows about. */
407 error
= xchk_count_rmap_ownedby_ag(sc
, sc
->sa
.rmap_cur
,
408 &XFS_RMAP_OINFO_COW
, &blocks
);
409 if (!xchk_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
411 if (blocks
!= cow_blocks
)
412 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.rmap_cur
, 0);
415 /* Scrub the refcount btree for some AG. */
418 struct xfs_scrub
*sc
)
420 xfs_agblock_t cow_blocks
= 0;
423 error
= xchk_btree(sc
, sc
->sa
.refc_cur
, xchk_refcountbt_rec
,
424 &XFS_RMAP_OINFO_REFC
, &cow_blocks
);
428 xchk_refcount_xref_rmap(sc
, cow_blocks
);
433 /* xref check that a cow staging extent is marked in the refcountbt. */
435 xchk_xref_is_cow_staging(
436 struct xfs_scrub
*sc
,
440 struct xfs_refcount_irec rc
;
445 if (!sc
->sa
.refc_cur
|| xchk_skip_xref(sc
->sm
))
448 /* Find the CoW staging extent. */
449 error
= xfs_refcount_lookup_le(sc
->sa
.refc_cur
,
450 agbno
+ XFS_REFC_COW_START
, &has_refcount
);
451 if (!xchk_should_check_xref(sc
, &error
, &sc
->sa
.refc_cur
))
454 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.refc_cur
, 0);
458 error
= xfs_refcount_get_rec(sc
->sa
.refc_cur
, &rc
, &has_refcount
);
459 if (!xchk_should_check_xref(sc
, &error
, &sc
->sa
.refc_cur
))
462 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.refc_cur
, 0);
466 /* CoW flag must be set, refcount must be 1. */
467 has_cowflag
= (rc
.rc_startblock
& XFS_REFC_COW_START
);
468 if (!has_cowflag
|| rc
.rc_refcount
!= 1)
469 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.refc_cur
, 0);
471 /* Must be at least as long as what was passed in */
472 if (rc
.rc_blockcount
< len
)
473 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.refc_cur
, 0);
477 * xref check that the extent is not shared. Only file data blocks
478 * can have multiple owners.
481 xchk_xref_is_not_shared(
482 struct xfs_scrub
*sc
,
489 if (!sc
->sa
.refc_cur
|| xchk_skip_xref(sc
->sm
))
492 error
= xfs_refcount_has_record(sc
->sa
.refc_cur
, agbno
, len
, &shared
);
493 if (!xchk_should_check_xref(sc
, &error
, &sc
->sa
.refc_cur
))
496 xchk_btree_xref_set_corrupt(sc
, sc
->sa
.refc_cur
, 0);