Merge tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa
[cris-mirror.git] / fs / xfs / scrub / agheader.c
blobfd975524f4603387e28078d13b175b5df2443021
1 /*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_alloc.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_rmap.h"
36 #include "scrub/xfs_scrub.h"
37 #include "scrub/scrub.h"
38 #include "scrub/common.h"
39 #include "scrub/trace.h"
42 * Walk all the blocks in the AGFL. The fn function can return any negative
43 * error code or XFS_BTREE_QUERY_RANGE_ABORT.
45 int
46 xfs_scrub_walk_agfl(
47 struct xfs_scrub_context *sc,
48 int (*fn)(struct xfs_scrub_context *,
49 xfs_agblock_t bno, void *),
50 void *priv)
52 struct xfs_agf *agf;
53 __be32 *agfl_bno;
54 struct xfs_mount *mp = sc->mp;
55 unsigned int flfirst;
56 unsigned int fllast;
57 int i;
58 int error;
60 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
61 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
62 flfirst = be32_to_cpu(agf->agf_flfirst);
63 fllast = be32_to_cpu(agf->agf_fllast);
65 /* Nothing to walk in an empty AGFL. */
66 if (agf->agf_flcount == cpu_to_be32(0))
67 return 0;
69 /* first to last is a consecutive list. */
70 if (fllast >= flfirst) {
71 for (i = flfirst; i <= fllast; i++) {
72 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
73 if (error)
74 return error;
75 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
76 return error;
79 return 0;
82 /* first to the end */
83 for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
84 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
85 if (error)
86 return error;
87 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
88 return error;
91 /* the start to last. */
92 for (i = 0; i <= fllast; i++) {
93 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
94 if (error)
95 return error;
96 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
97 return error;
100 return 0;
103 /* Superblock */
105 /* Cross-reference with the other btrees. */
106 STATIC void
107 xfs_scrub_superblock_xref(
108 struct xfs_scrub_context *sc,
109 struct xfs_buf *bp)
111 struct xfs_owner_info oinfo;
112 struct xfs_mount *mp = sc->mp;
113 xfs_agnumber_t agno = sc->sm->sm_agno;
114 xfs_agblock_t agbno;
115 int error;
117 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
118 return;
120 agbno = XFS_SB_BLOCK(mp);
122 error = xfs_scrub_ag_init(sc, agno, &sc->sa);
123 if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
124 return;
126 xfs_scrub_xref_is_used_space(sc, agbno, 1);
127 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
128 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
129 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
130 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
132 /* scrub teardown will take care of sc->sa for us */
136 * Scrub the filesystem superblock.
138 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
139 * responsible for validating all the geometry information in sb 0, so
140 * if the filesystem is capable of initiating online scrub, then clearly
141 * sb 0 is ok and we can use its information to check everything else.
144 xfs_scrub_superblock(
145 struct xfs_scrub_context *sc)
147 struct xfs_mount *mp = sc->mp;
148 struct xfs_buf *bp;
149 struct xfs_dsb *sb;
150 xfs_agnumber_t agno;
151 uint32_t v2_ok;
152 __be32 features_mask;
153 int error;
154 __be16 vernum_mask;
156 agno = sc->sm->sm_agno;
157 if (agno == 0)
158 return 0;
160 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
161 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
162 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
164 * The superblock verifier can return several different error codes
165 * if it thinks the superblock doesn't look right. For a mount these
166 * would all get bounced back to userspace, but if we're here then the
167 * fs mounted successfully, which means that this secondary superblock
168 * is simply incorrect. Treat all these codes the same way we treat
169 * any corruption.
171 switch (error) {
172 case -EINVAL: /* also -EWRONGFS */
173 case -ENOSYS:
174 case -EFBIG:
175 error = -EFSCORRUPTED;
176 default:
177 break;
179 if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
180 return error;
182 sb = XFS_BUF_TO_SBP(bp);
185 * Verify the geometries match. Fields that are permanently
186 * set by mkfs are checked; fields that can be updated later
187 * (and are not propagated to backup superblocks) are preen
188 * checked.
190 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
191 xfs_scrub_block_set_corrupt(sc, bp);
193 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
194 xfs_scrub_block_set_corrupt(sc, bp);
196 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
197 xfs_scrub_block_set_corrupt(sc, bp);
199 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
200 xfs_scrub_block_set_corrupt(sc, bp);
202 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
203 xfs_scrub_block_set_preen(sc, bp);
205 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
206 xfs_scrub_block_set_corrupt(sc, bp);
208 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
209 xfs_scrub_block_set_preen(sc, bp);
211 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
212 xfs_scrub_block_set_preen(sc, bp);
214 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
215 xfs_scrub_block_set_preen(sc, bp);
217 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
218 xfs_scrub_block_set_corrupt(sc, bp);
220 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
221 xfs_scrub_block_set_corrupt(sc, bp);
223 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
224 xfs_scrub_block_set_corrupt(sc, bp);
226 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
227 xfs_scrub_block_set_corrupt(sc, bp);
229 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
230 xfs_scrub_block_set_corrupt(sc, bp);
232 /* Check sb_versionnum bits that are set at mkfs time. */
233 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
234 XFS_SB_VERSION_NUMBITS |
235 XFS_SB_VERSION_ALIGNBIT |
236 XFS_SB_VERSION_DALIGNBIT |
237 XFS_SB_VERSION_SHAREDBIT |
238 XFS_SB_VERSION_LOGV2BIT |
239 XFS_SB_VERSION_SECTORBIT |
240 XFS_SB_VERSION_EXTFLGBIT |
241 XFS_SB_VERSION_DIRV2BIT);
242 if ((sb->sb_versionnum & vernum_mask) !=
243 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
244 xfs_scrub_block_set_corrupt(sc, bp);
246 /* Check sb_versionnum bits that can be set after mkfs time. */
247 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
248 XFS_SB_VERSION_NLINKBIT |
249 XFS_SB_VERSION_QUOTABIT);
250 if ((sb->sb_versionnum & vernum_mask) !=
251 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
252 xfs_scrub_block_set_preen(sc, bp);
254 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
255 xfs_scrub_block_set_corrupt(sc, bp);
257 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
258 xfs_scrub_block_set_corrupt(sc, bp);
260 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
261 xfs_scrub_block_set_corrupt(sc, bp);
263 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
264 xfs_scrub_block_set_preen(sc, bp);
266 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
267 xfs_scrub_block_set_corrupt(sc, bp);
269 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
270 xfs_scrub_block_set_corrupt(sc, bp);
272 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
273 xfs_scrub_block_set_corrupt(sc, bp);
275 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
276 xfs_scrub_block_set_corrupt(sc, bp);
278 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
279 xfs_scrub_block_set_corrupt(sc, bp);
281 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
282 xfs_scrub_block_set_corrupt(sc, bp);
284 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
285 xfs_scrub_block_set_preen(sc, bp);
288 * Skip the summary counters since we track them in memory anyway.
289 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
292 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
293 xfs_scrub_block_set_preen(sc, bp);
295 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
296 xfs_scrub_block_set_preen(sc, bp);
299 * Skip the quota flags since repair will force quotacheck.
300 * sb_qflags
303 if (sb->sb_flags != mp->m_sb.sb_flags)
304 xfs_scrub_block_set_corrupt(sc, bp);
306 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
307 xfs_scrub_block_set_corrupt(sc, bp);
309 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
310 xfs_scrub_block_set_corrupt(sc, bp);
312 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
313 xfs_scrub_block_set_preen(sc, bp);
315 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
316 xfs_scrub_block_set_preen(sc, bp);
318 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
319 xfs_scrub_block_set_corrupt(sc, bp);
321 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
322 xfs_scrub_block_set_corrupt(sc, bp);
324 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
325 xfs_scrub_block_set_corrupt(sc, bp);
327 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
328 xfs_scrub_block_set_corrupt(sc, bp);
330 /* Do we see any invalid bits in sb_features2? */
331 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
332 if (sb->sb_features2 != 0)
333 xfs_scrub_block_set_corrupt(sc, bp);
334 } else {
335 v2_ok = XFS_SB_VERSION2_OKBITS;
336 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
337 v2_ok |= XFS_SB_VERSION2_CRCBIT;
339 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
340 xfs_scrub_block_set_corrupt(sc, bp);
342 if (sb->sb_features2 != sb->sb_bad_features2)
343 xfs_scrub_block_set_preen(sc, bp);
346 /* Check sb_features2 flags that are set at mkfs time. */
347 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
348 XFS_SB_VERSION2_PROJID32BIT |
349 XFS_SB_VERSION2_CRCBIT |
350 XFS_SB_VERSION2_FTYPE);
351 if ((sb->sb_features2 & features_mask) !=
352 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
353 xfs_scrub_block_set_corrupt(sc, bp);
355 /* Check sb_features2 flags that can be set after mkfs time. */
356 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
357 if ((sb->sb_features2 & features_mask) !=
358 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
359 xfs_scrub_block_set_corrupt(sc, bp);
361 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
362 /* all v5 fields must be zero */
363 if (memchr_inv(&sb->sb_features_compat, 0,
364 sizeof(struct xfs_dsb) -
365 offsetof(struct xfs_dsb, sb_features_compat)))
366 xfs_scrub_block_set_corrupt(sc, bp);
367 } else {
368 /* Check compat flags; all are set at mkfs time. */
369 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
370 if ((sb->sb_features_compat & features_mask) !=
371 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
372 xfs_scrub_block_set_corrupt(sc, bp);
374 /* Check ro compat flags; all are set at mkfs time. */
375 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
376 XFS_SB_FEAT_RO_COMPAT_FINOBT |
377 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
378 XFS_SB_FEAT_RO_COMPAT_REFLINK);
379 if ((sb->sb_features_ro_compat & features_mask) !=
380 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
381 features_mask))
382 xfs_scrub_block_set_corrupt(sc, bp);
384 /* Check incompat flags; all are set at mkfs time. */
385 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
386 XFS_SB_FEAT_INCOMPAT_FTYPE |
387 XFS_SB_FEAT_INCOMPAT_SPINODES |
388 XFS_SB_FEAT_INCOMPAT_META_UUID);
389 if ((sb->sb_features_incompat & features_mask) !=
390 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
391 features_mask))
392 xfs_scrub_block_set_corrupt(sc, bp);
394 /* Check log incompat flags; all are set at mkfs time. */
395 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
396 if ((sb->sb_features_log_incompat & features_mask) !=
397 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
398 features_mask))
399 xfs_scrub_block_set_corrupt(sc, bp);
401 /* Don't care about sb_crc */
403 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
404 xfs_scrub_block_set_corrupt(sc, bp);
406 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
407 xfs_scrub_block_set_preen(sc, bp);
409 /* Don't care about sb_lsn */
412 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
413 /* The metadata UUID must be the same for all supers */
414 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
415 xfs_scrub_block_set_corrupt(sc, bp);
418 /* Everything else must be zero. */
419 if (memchr_inv(sb + 1, 0,
420 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
421 xfs_scrub_block_set_corrupt(sc, bp);
423 xfs_scrub_superblock_xref(sc, bp);
425 return error;
428 /* AGF */
430 /* Tally freespace record lengths. */
431 STATIC int
432 xfs_scrub_agf_record_bno_lengths(
433 struct xfs_btree_cur *cur,
434 struct xfs_alloc_rec_incore *rec,
435 void *priv)
437 xfs_extlen_t *blocks = priv;
439 (*blocks) += rec->ar_blockcount;
440 return 0;
443 /* Check agf_freeblks */
444 static inline void
445 xfs_scrub_agf_xref_freeblks(
446 struct xfs_scrub_context *sc)
448 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
449 xfs_extlen_t blocks = 0;
450 int error;
452 if (!sc->sa.bno_cur)
453 return;
455 error = xfs_alloc_query_all(sc->sa.bno_cur,
456 xfs_scrub_agf_record_bno_lengths, &blocks);
457 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
458 return;
459 if (blocks != be32_to_cpu(agf->agf_freeblks))
460 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
463 /* Cross reference the AGF with the cntbt (freespace by length btree) */
464 static inline void
465 xfs_scrub_agf_xref_cntbt(
466 struct xfs_scrub_context *sc)
468 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
469 xfs_agblock_t agbno;
470 xfs_extlen_t blocks;
471 int have;
472 int error;
474 if (!sc->sa.cnt_cur)
475 return;
477 /* Any freespace at all? */
478 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
479 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
480 return;
481 if (!have) {
482 if (agf->agf_freeblks != be32_to_cpu(0))
483 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
484 return;
487 /* Check agf_longest */
488 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
489 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
490 return;
491 if (!have || blocks != be32_to_cpu(agf->agf_longest))
492 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
495 /* Check the btree block counts in the AGF against the btrees. */
496 STATIC void
497 xfs_scrub_agf_xref_btreeblks(
498 struct xfs_scrub_context *sc)
500 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
501 struct xfs_mount *mp = sc->mp;
502 xfs_agblock_t blocks;
503 xfs_agblock_t btreeblks;
504 int error;
506 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
507 if (sc->sa.rmap_cur) {
508 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
509 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
510 return;
511 btreeblks = blocks - 1;
512 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
513 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
514 } else {
515 btreeblks = 0;
519 * No rmap cursor; we can't xref if we have the rmapbt feature.
520 * We also can't do it if we're missing the free space btree cursors.
522 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
523 !sc->sa.bno_cur || !sc->sa.cnt_cur)
524 return;
526 /* Check agf_btreeblks */
527 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
528 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
529 return;
530 btreeblks += blocks - 1;
532 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
533 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
534 return;
535 btreeblks += blocks - 1;
537 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
538 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
541 /* Check agf_refcount_blocks against tree size */
542 static inline void
543 xfs_scrub_agf_xref_refcblks(
544 struct xfs_scrub_context *sc)
546 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
547 xfs_agblock_t blocks;
548 int error;
550 if (!sc->sa.refc_cur)
551 return;
553 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
554 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
555 return;
556 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
557 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
560 /* Cross-reference with the other btrees. */
561 STATIC void
562 xfs_scrub_agf_xref(
563 struct xfs_scrub_context *sc)
565 struct xfs_owner_info oinfo;
566 struct xfs_mount *mp = sc->mp;
567 xfs_agblock_t agbno;
568 int error;
570 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
571 return;
573 agbno = XFS_AGF_BLOCK(mp);
575 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
576 if (error)
577 return;
579 xfs_scrub_xref_is_used_space(sc, agbno, 1);
580 xfs_scrub_agf_xref_freeblks(sc);
581 xfs_scrub_agf_xref_cntbt(sc);
582 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
583 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
584 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
585 xfs_scrub_agf_xref_btreeblks(sc);
586 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
587 xfs_scrub_agf_xref_refcblks(sc);
589 /* scrub teardown will take care of sc->sa for us */
592 /* Scrub the AGF. */
594 xfs_scrub_agf(
595 struct xfs_scrub_context *sc)
597 struct xfs_mount *mp = sc->mp;
598 struct xfs_agf *agf;
599 xfs_agnumber_t agno;
600 xfs_agblock_t agbno;
601 xfs_agblock_t eoag;
602 xfs_agblock_t agfl_first;
603 xfs_agblock_t agfl_last;
604 xfs_agblock_t agfl_count;
605 xfs_agblock_t fl_count;
606 int level;
607 int error = 0;
609 agno = sc->sa.agno = sc->sm->sm_agno;
610 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
611 &sc->sa.agf_bp, &sc->sa.agfl_bp);
612 if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
613 goto out;
614 xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
616 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
618 /* Check the AG length */
619 eoag = be32_to_cpu(agf->agf_length);
620 if (eoag != xfs_ag_block_count(mp, agno))
621 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
623 /* Check the AGF btree roots and levels */
624 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
625 if (!xfs_verify_agbno(mp, agno, agbno))
626 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
628 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
629 if (!xfs_verify_agbno(mp, agno, agbno))
630 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
632 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
633 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
634 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
636 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
637 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
638 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
640 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
641 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
642 if (!xfs_verify_agbno(mp, agno, agbno))
643 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
645 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
646 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
647 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
650 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
651 agbno = be32_to_cpu(agf->agf_refcount_root);
652 if (!xfs_verify_agbno(mp, agno, agbno))
653 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
655 level = be32_to_cpu(agf->agf_refcount_level);
656 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
657 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
660 /* Check the AGFL counters */
661 agfl_first = be32_to_cpu(agf->agf_flfirst);
662 agfl_last = be32_to_cpu(agf->agf_fllast);
663 agfl_count = be32_to_cpu(agf->agf_flcount);
664 if (agfl_last > agfl_first)
665 fl_count = agfl_last - agfl_first + 1;
666 else
667 fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
668 if (agfl_count != 0 && fl_count != agfl_count)
669 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
671 xfs_scrub_agf_xref(sc);
672 out:
673 return error;
676 /* AGFL */
678 struct xfs_scrub_agfl_info {
679 struct xfs_owner_info oinfo;
680 unsigned int sz_entries;
681 unsigned int nr_entries;
682 xfs_agblock_t *entries;
685 /* Cross-reference with the other btrees. */
686 STATIC void
687 xfs_scrub_agfl_block_xref(
688 struct xfs_scrub_context *sc,
689 xfs_agblock_t agbno,
690 struct xfs_owner_info *oinfo)
692 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
693 return;
695 xfs_scrub_xref_is_used_space(sc, agbno, 1);
696 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
697 xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
698 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
701 /* Scrub an AGFL block. */
702 STATIC int
703 xfs_scrub_agfl_block(
704 struct xfs_scrub_context *sc,
705 xfs_agblock_t agbno,
706 void *priv)
708 struct xfs_mount *mp = sc->mp;
709 struct xfs_scrub_agfl_info *sai = priv;
710 xfs_agnumber_t agno = sc->sa.agno;
712 if (xfs_verify_agbno(mp, agno, agbno) &&
713 sai->nr_entries < sai->sz_entries)
714 sai->entries[sai->nr_entries++] = agbno;
715 else
716 xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
718 xfs_scrub_agfl_block_xref(sc, agbno, priv);
720 return 0;
723 static int
724 xfs_scrub_agblock_cmp(
725 const void *pa,
726 const void *pb)
728 const xfs_agblock_t *a = pa;
729 const xfs_agblock_t *b = pb;
731 return (int)*a - (int)*b;
734 /* Cross-reference with the other btrees. */
735 STATIC void
736 xfs_scrub_agfl_xref(
737 struct xfs_scrub_context *sc)
739 struct xfs_owner_info oinfo;
740 struct xfs_mount *mp = sc->mp;
741 xfs_agblock_t agbno;
742 int error;
744 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
745 return;
747 agbno = XFS_AGFL_BLOCK(mp);
749 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
750 if (error)
751 return;
753 xfs_scrub_xref_is_used_space(sc, agbno, 1);
754 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
755 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
756 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
757 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
760 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
761 * active so that the agfl block xref can use it too.
765 /* Scrub the AGFL. */
767 xfs_scrub_agfl(
768 struct xfs_scrub_context *sc)
770 struct xfs_scrub_agfl_info sai = { 0 };
771 struct xfs_agf *agf;
772 xfs_agnumber_t agno;
773 unsigned int agflcount;
774 unsigned int i;
775 int error;
777 agno = sc->sa.agno = sc->sm->sm_agno;
778 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
779 &sc->sa.agf_bp, &sc->sa.agfl_bp);
780 if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
781 goto out;
782 if (!sc->sa.agf_bp)
783 return -EFSCORRUPTED;
784 xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
786 xfs_scrub_agfl_xref(sc);
788 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
789 goto out;
791 /* Allocate buffer to ensure uniqueness of AGFL entries. */
792 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
793 agflcount = be32_to_cpu(agf->agf_flcount);
794 if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
795 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
796 goto out;
798 sai.sz_entries = agflcount;
799 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
800 if (!sai.entries) {
801 error = -ENOMEM;
802 goto out;
805 /* Check the blocks in the AGFL. */
806 xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
807 error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
808 if (error)
809 goto out_free;
811 if (agflcount != sai.nr_entries) {
812 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
813 goto out_free;
816 /* Sort entries, check for duplicates. */
817 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
818 xfs_scrub_agblock_cmp, NULL);
819 for (i = 1; i < sai.nr_entries; i++) {
820 if (sai.entries[i] == sai.entries[i - 1]) {
821 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
822 break;
826 out_free:
827 kmem_free(sai.entries);
828 out:
829 return error;
832 /* AGI */
834 /* Check agi_count/agi_freecount */
835 static inline void
836 xfs_scrub_agi_xref_icounts(
837 struct xfs_scrub_context *sc)
839 struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
840 xfs_agino_t icount;
841 xfs_agino_t freecount;
842 int error;
844 if (!sc->sa.ino_cur)
845 return;
847 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
848 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
849 return;
850 if (be32_to_cpu(agi->agi_count) != icount ||
851 be32_to_cpu(agi->agi_freecount) != freecount)
852 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
855 /* Cross-reference with the other btrees. */
856 STATIC void
857 xfs_scrub_agi_xref(
858 struct xfs_scrub_context *sc)
860 struct xfs_owner_info oinfo;
861 struct xfs_mount *mp = sc->mp;
862 xfs_agblock_t agbno;
863 int error;
865 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
866 return;
868 agbno = XFS_AGI_BLOCK(mp);
870 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
871 if (error)
872 return;
874 xfs_scrub_xref_is_used_space(sc, agbno, 1);
875 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
876 xfs_scrub_agi_xref_icounts(sc);
877 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
878 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
879 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
881 /* scrub teardown will take care of sc->sa for us */
884 /* Scrub the AGI. */
886 xfs_scrub_agi(
887 struct xfs_scrub_context *sc)
889 struct xfs_mount *mp = sc->mp;
890 struct xfs_agi *agi;
891 xfs_agnumber_t agno;
892 xfs_agblock_t agbno;
893 xfs_agblock_t eoag;
894 xfs_agino_t agino;
895 xfs_agino_t first_agino;
896 xfs_agino_t last_agino;
897 xfs_agino_t icount;
898 int i;
899 int level;
900 int error = 0;
902 agno = sc->sa.agno = sc->sm->sm_agno;
903 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
904 &sc->sa.agf_bp, &sc->sa.agfl_bp);
905 if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
906 goto out;
907 xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
909 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
911 /* Check the AG length */
912 eoag = be32_to_cpu(agi->agi_length);
913 if (eoag != xfs_ag_block_count(mp, agno))
914 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
916 /* Check btree roots and levels */
917 agbno = be32_to_cpu(agi->agi_root);
918 if (!xfs_verify_agbno(mp, agno, agbno))
919 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
921 level = be32_to_cpu(agi->agi_level);
922 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
923 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
925 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
926 agbno = be32_to_cpu(agi->agi_free_root);
927 if (!xfs_verify_agbno(mp, agno, agbno))
928 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
930 level = be32_to_cpu(agi->agi_free_level);
931 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
932 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
935 /* Check inode counters */
936 xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
937 icount = be32_to_cpu(agi->agi_count);
938 if (icount > last_agino - first_agino + 1 ||
939 icount < be32_to_cpu(agi->agi_freecount))
940 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
942 /* Check inode pointers */
943 agino = be32_to_cpu(agi->agi_newino);
944 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
945 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
947 agino = be32_to_cpu(agi->agi_dirino);
948 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
949 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
951 /* Check unlinked inode buckets */
952 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
953 agino = be32_to_cpu(agi->agi_unlinked[i]);
954 if (agino == NULLAGINO)
955 continue;
956 if (!xfs_verify_agino(mp, agno, agino))
957 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
960 if (agi->agi_pad32 != cpu_to_be32(0))
961 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
963 xfs_scrub_agi_xref(sc);
964 out:
965 return error;