2 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
32 #include "xfs_inode.h"
33 #include "xfs_alloc.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_ialloc_btree.h"
36 #include "xfs_icache.h"
39 #include "xfs_trans_priv.h"
40 #include "scrub/xfs_scrub.h"
41 #include "scrub/scrub.h"
42 #include "scrub/common.h"
43 #include "scrub/btree.h"
44 #include "scrub/trace.h"
47 * Set us up to scrub inode btrees.
48 * If we detect a discrepancy between the inobt and the inode,
49 * try again after forcing logged inode cores out to disk.
52 xfs_scrub_setup_ag_iallocbt(
53 struct xfs_scrub_context
*sc
,
56 return xfs_scrub_setup_ag_btree(sc
, ip
, sc
->try_harder
);
59 /* Inode btree scrubber. */
62 * If we're checking the finobt, cross-reference with the inobt.
63 * Otherwise we're checking the inobt; if there is an finobt, make sure
64 * we have a record or not depending on freecount.
67 xfs_scrub_iallocbt_chunk_xref_other(
68 struct xfs_scrub_context
*sc
,
69 struct xfs_inobt_rec_incore
*irec
,
72 struct xfs_btree_cur
**pcur
;
76 if (sc
->sm
->sm_type
== XFS_SCRUB_TYPE_FINOBT
)
77 pcur
= &sc
->sa
.ino_cur
;
79 pcur
= &sc
->sa
.fino_cur
;
82 error
= xfs_ialloc_has_inode_record(*pcur
, agino
, agino
, &has_irec
);
83 if (!xfs_scrub_should_check_xref(sc
, &error
, pcur
))
85 if (((irec
->ir_freecount
> 0 && !has_irec
) ||
86 (irec
->ir_freecount
== 0 && has_irec
)))
87 xfs_scrub_btree_xref_set_corrupt(sc
, *pcur
, 0);
90 /* Cross-reference with the other btrees. */
92 xfs_scrub_iallocbt_chunk_xref(
93 struct xfs_scrub_context
*sc
,
94 struct xfs_inobt_rec_incore
*irec
,
99 struct xfs_owner_info oinfo
;
101 if (sc
->sm
->sm_flags
& XFS_SCRUB_OFLAG_CORRUPT
)
104 xfs_scrub_xref_is_used_space(sc
, agbno
, len
);
105 xfs_scrub_iallocbt_chunk_xref_other(sc
, irec
, agino
);
106 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INODES
);
107 xfs_scrub_xref_is_owned_by(sc
, agbno
, len
, &oinfo
);
108 xfs_scrub_xref_is_not_shared(sc
, agbno
, len
);
111 /* Is this chunk worth checking? */
113 xfs_scrub_iallocbt_chunk(
114 struct xfs_scrub_btree
*bs
,
115 struct xfs_inobt_rec_incore
*irec
,
119 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
120 xfs_agnumber_t agno
= bs
->cur
->bc_private
.a
.agno
;
123 bno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
124 if (bno
+ len
<= bno
||
125 !xfs_verify_agbno(mp
, agno
, bno
) ||
126 !xfs_verify_agbno(mp
, agno
, bno
+ len
- 1))
127 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
129 xfs_scrub_iallocbt_chunk_xref(bs
->sc
, irec
, agino
, bno
, len
);
134 /* Count the number of free inodes. */
136 xfs_scrub_iallocbt_freecount(
137 xfs_inofree_t freemask
)
139 BUILD_BUG_ON(sizeof(freemask
) != sizeof(__u64
));
140 return hweight64(freemask
);
143 /* Check a particular inode with ir_free. */
145 xfs_scrub_iallocbt_check_cluster_freemask(
146 struct xfs_scrub_btree
*bs
,
148 xfs_agino_t chunkino
,
149 xfs_agino_t clusterino
,
150 struct xfs_inobt_rec_incore
*irec
,
153 struct xfs_dinode
*dip
;
154 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
155 bool inode_is_free
= false;
160 if (xfs_scrub_should_terminate(bs
->sc
, &error
))
163 dip
= xfs_buf_offset(bp
, clusterino
* mp
->m_sb
.sb_inodesize
);
164 if (be16_to_cpu(dip
->di_magic
) != XFS_DINODE_MAGIC
||
165 (dip
->di_version
>= 3 &&
166 be64_to_cpu(dip
->di_ino
) != fsino
+ clusterino
)) {
167 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
171 if (irec
->ir_free
& XFS_INOBT_MASK(chunkino
+ clusterino
))
172 inode_is_free
= true;
173 error
= xfs_icache_inode_is_allocated(mp
, bs
->cur
->bc_tp
,
174 fsino
+ clusterino
, &inuse
);
175 if (error
== -ENODATA
) {
176 /* Not cached, just read the disk buffer */
177 freemask_ok
= inode_is_free
^ !!(dip
->di_mode
);
178 if (!bs
->sc
->try_harder
&& !freemask_ok
)
180 } else if (error
< 0) {
182 * Inode is only half assembled, or there was an IO error,
183 * or the verifier failed, so don't bother trying to check.
184 * The inode scrubber can deal with this.
188 /* Inode is all there. */
189 freemask_ok
= inode_is_free
^ inuse
;
192 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
197 /* Make sure the free mask is consistent with what the inodes think. */
199 xfs_scrub_iallocbt_check_freemask(
200 struct xfs_scrub_btree
*bs
,
201 struct xfs_inobt_rec_incore
*irec
)
203 struct xfs_owner_info oinfo
;
204 struct xfs_imap imap
;
205 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
206 struct xfs_dinode
*dip
;
209 xfs_agino_t nr_inodes
;
211 xfs_agino_t chunkino
;
212 xfs_agino_t clusterino
;
214 int blks_per_cluster
;
216 uint16_t ir_holemask
;
219 /* Make sure the freemask matches the inode records. */
220 blks_per_cluster
= xfs_icluster_size_fsb(mp
);
221 nr_inodes
= XFS_OFFBNO_TO_AGINO(mp
, blks_per_cluster
, 0);
222 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INODES
);
224 for (agino
= irec
->ir_startino
;
225 agino
< irec
->ir_startino
+ XFS_INODES_PER_CHUNK
;
226 agino
+= blks_per_cluster
* mp
->m_sb
.sb_inopblock
) {
227 fsino
= XFS_AGINO_TO_INO(mp
, bs
->cur
->bc_private
.a
.agno
, agino
);
228 chunkino
= agino
- irec
->ir_startino
;
229 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
231 /* Compute the holemask mask for this cluster. */
232 for (clusterino
= 0, holemask
= 0; clusterino
< nr_inodes
;
233 clusterino
+= XFS_INODES_PER_HOLEMASK_BIT
)
234 holemask
|= XFS_INOBT_MASK((chunkino
+ clusterino
) /
235 XFS_INODES_PER_HOLEMASK_BIT
);
237 /* The whole cluster must be a hole or not a hole. */
238 ir_holemask
= (irec
->ir_holemask
& holemask
);
239 if (ir_holemask
!= holemask
&& ir_holemask
!= 0) {
240 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
244 /* If any part of this is a hole, skip it. */
246 xfs_scrub_xref_is_not_owned_by(bs
->sc
, agbno
,
247 blks_per_cluster
, &oinfo
);
251 xfs_scrub_xref_is_owned_by(bs
->sc
, agbno
, blks_per_cluster
,
254 /* Grab the inode cluster buffer. */
255 imap
.im_blkno
= XFS_AGB_TO_DADDR(mp
, bs
->cur
->bc_private
.a
.agno
,
257 imap
.im_len
= XFS_FSB_TO_BB(mp
, blks_per_cluster
);
260 error
= xfs_imap_to_bp(mp
, bs
->cur
->bc_tp
, &imap
,
262 if (!xfs_scrub_btree_process_error(bs
->sc
, bs
->cur
, 0, &error
))
265 /* Which inodes are free? */
266 for (clusterino
= 0; clusterino
< nr_inodes
; clusterino
++) {
267 error
= xfs_scrub_iallocbt_check_cluster_freemask(bs
,
268 fsino
, chunkino
, clusterino
, irec
, bp
);
270 xfs_trans_brelse(bs
->cur
->bc_tp
, bp
);
275 xfs_trans_brelse(bs
->cur
->bc_tp
, bp
);
281 /* Scrub an inobt/finobt record. */
283 xfs_scrub_iallocbt_rec(
284 struct xfs_scrub_btree
*bs
,
285 union xfs_btree_rec
*rec
)
287 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
288 xfs_filblks_t
*inode_blocks
= bs
->private;
289 struct xfs_inobt_rec_incore irec
;
291 xfs_agnumber_t agno
= bs
->cur
->bc_private
.a
.agno
;
298 unsigned int real_freecount
;
301 xfs_inobt_btrec_to_irec(mp
, rec
, &irec
);
303 if (irec
.ir_count
> XFS_INODES_PER_CHUNK
||
304 irec
.ir_freecount
> XFS_INODES_PER_CHUNK
)
305 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
307 real_freecount
= irec
.ir_freecount
+
308 (XFS_INODES_PER_CHUNK
- irec
.ir_count
);
309 if (real_freecount
!= xfs_scrub_iallocbt_freecount(irec
.ir_free
))
310 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
312 agino
= irec
.ir_startino
;
313 /* Record has to be properly aligned within the AG. */
314 if (!xfs_verify_agino(mp
, agno
, agino
) ||
315 !xfs_verify_agino(mp
, agno
, agino
+ XFS_INODES_PER_CHUNK
- 1)) {
316 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
320 /* Make sure this record is aligned to cluster and inoalignmnt size. */
321 agbno
= XFS_AGINO_TO_AGBNO(mp
, irec
.ir_startino
);
322 if ((agbno
& (xfs_ialloc_cluster_alignment(mp
) - 1)) ||
323 (agbno
& (xfs_icluster_size_fsb(mp
) - 1)))
324 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
326 *inode_blocks
+= XFS_B_TO_FSB(mp
,
327 irec
.ir_count
* mp
->m_sb
.sb_inodesize
);
329 /* Handle non-sparse inodes */
330 if (!xfs_inobt_issparse(irec
.ir_holemask
)) {
331 len
= XFS_B_TO_FSB(mp
,
332 XFS_INODES_PER_CHUNK
* mp
->m_sb
.sb_inodesize
);
333 if (irec
.ir_count
!= XFS_INODES_PER_CHUNK
)
334 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
336 if (!xfs_scrub_iallocbt_chunk(bs
, &irec
, agino
, len
))
341 /* Check each chunk of a sparse inode cluster. */
342 holemask
= irec
.ir_holemask
;
344 len
= XFS_B_TO_FSB(mp
,
345 XFS_INODES_PER_HOLEMASK_BIT
* mp
->m_sb
.sb_inodesize
);
346 holes
= ~xfs_inobt_irec_to_allocmask(&irec
);
347 if ((holes
& irec
.ir_free
) != holes
||
348 irec
.ir_freecount
> irec
.ir_count
)
349 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
351 for (i
= 0; i
< XFS_INOBT_HOLEMASK_BITS
; i
++) {
353 holecount
+= XFS_INODES_PER_HOLEMASK_BIT
;
354 else if (!xfs_scrub_iallocbt_chunk(bs
, &irec
, agino
, len
))
357 agino
+= XFS_INODES_PER_HOLEMASK_BIT
;
360 if (holecount
> XFS_INODES_PER_CHUNK
||
361 holecount
+ irec
.ir_count
!= XFS_INODES_PER_CHUNK
)
362 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
365 error
= xfs_scrub_iallocbt_check_freemask(bs
, &irec
);
374 * Make sure the inode btrees are as large as the rmap thinks they are.
375 * Don't bother if we're missing btree cursors, as we're already corrupt.
378 xfs_scrub_iallocbt_xref_rmap_btreeblks(
379 struct xfs_scrub_context
*sc
,
382 struct xfs_owner_info oinfo
;
383 xfs_filblks_t blocks
;
384 xfs_extlen_t inobt_blocks
= 0;
385 xfs_extlen_t finobt_blocks
= 0;
388 if (!sc
->sa
.ino_cur
|| !sc
->sa
.rmap_cur
||
389 (xfs_sb_version_hasfinobt(&sc
->mp
->m_sb
) && !sc
->sa
.fino_cur
))
392 /* Check that we saw as many inobt blocks as the rmap says. */
393 error
= xfs_btree_count_blocks(sc
->sa
.ino_cur
, &inobt_blocks
);
394 if (!xfs_scrub_process_error(sc
, 0, 0, &error
))
397 if (sc
->sa
.fino_cur
) {
398 error
= xfs_btree_count_blocks(sc
->sa
.fino_cur
, &finobt_blocks
);
399 if (!xfs_scrub_process_error(sc
, 0, 0, &error
))
403 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INOBT
);
404 error
= xfs_scrub_count_rmap_ownedby_ag(sc
, sc
->sa
.rmap_cur
, &oinfo
,
406 if (!xfs_scrub_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
408 if (blocks
!= inobt_blocks
+ finobt_blocks
)
409 xfs_scrub_btree_set_corrupt(sc
, sc
->sa
.ino_cur
, 0);
413 * Make sure that the inobt records point to the same number of blocks as
414 * the rmap says are owned by inodes.
417 xfs_scrub_iallocbt_xref_rmap_inodes(
418 struct xfs_scrub_context
*sc
,
420 xfs_filblks_t inode_blocks
)
422 struct xfs_owner_info oinfo
;
423 xfs_filblks_t blocks
;
426 if (!sc
->sa
.rmap_cur
)
429 /* Check that we saw as many inode blocks as the rmap knows about. */
430 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INODES
);
431 error
= xfs_scrub_count_rmap_ownedby_ag(sc
, sc
->sa
.rmap_cur
, &oinfo
,
433 if (!xfs_scrub_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
435 if (blocks
!= inode_blocks
)
436 xfs_scrub_btree_set_corrupt(sc
, sc
->sa
.ino_cur
, 0);
439 /* Scrub the inode btrees for some AG. */
442 struct xfs_scrub_context
*sc
,
445 struct xfs_btree_cur
*cur
;
446 struct xfs_owner_info oinfo
;
447 xfs_filblks_t inode_blocks
= 0;
450 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INOBT
);
451 cur
= which
== XFS_BTNUM_INO
? sc
->sa
.ino_cur
: sc
->sa
.fino_cur
;
452 error
= xfs_scrub_btree(sc
, cur
, xfs_scrub_iallocbt_rec
, &oinfo
,
457 xfs_scrub_iallocbt_xref_rmap_btreeblks(sc
, which
);
460 * If we're scrubbing the inode btree, inode_blocks is the number of
461 * blocks pointed to by all the inode chunk records. Therefore, we
462 * should compare to the number of inode chunk blocks that the rmap
463 * knows about. We can't do this for the finobt since it only points
464 * to inode chunks with free inodes.
466 if (which
== XFS_BTNUM_INO
)
467 xfs_scrub_iallocbt_xref_rmap_inodes(sc
, which
, inode_blocks
);
474 struct xfs_scrub_context
*sc
)
476 return xfs_scrub_iallocbt(sc
, XFS_BTNUM_INO
);
481 struct xfs_scrub_context
*sc
)
483 return xfs_scrub_iallocbt(sc
, XFS_BTNUM_FINO
);
486 /* See if an inode btree has (or doesn't have) an inode chunk record. */
488 xfs_scrub_xref_inode_check(
489 struct xfs_scrub_context
*sc
,
492 struct xfs_btree_cur
**icur
,
493 bool should_have_inodes
)
501 error
= xfs_ialloc_has_inodes_at_extent(*icur
, agbno
, len
, &has_inodes
);
502 if (!xfs_scrub_should_check_xref(sc
, &error
, icur
))
504 if (has_inodes
!= should_have_inodes
)
505 xfs_scrub_btree_xref_set_corrupt(sc
, *icur
, 0);
508 /* xref check that the extent is not covered by inodes */
510 xfs_scrub_xref_is_not_inode_chunk(
511 struct xfs_scrub_context
*sc
,
515 xfs_scrub_xref_inode_check(sc
, agbno
, len
, &sc
->sa
.ino_cur
, false);
516 xfs_scrub_xref_inode_check(sc
, agbno
, len
, &sc
->sa
.fino_cur
, false);
519 /* xref check that the extent is covered by inodes */
521 xfs_scrub_xref_is_inode_chunk(
522 struct xfs_scrub_context
*sc
,
526 xfs_scrub_xref_inode_check(sc
, agbno
, len
, &sc
->sa
.ino_cur
, true);