drm/tests: Add test for drm_atomic_helper_check_modeset()
[drm/drm-misc.git] / fs / xfs / xfs_health.c
blobc7c2e65619986278a4a98388069e77ba5e12a665
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trace.h"
15 #include "xfs_health.h"
16 #include "xfs_ag.h"
17 #include "xfs_btree.h"
18 #include "xfs_da_format.h"
19 #include "xfs_da_btree.h"
20 #include "xfs_quota_defs.h"
21 #include "xfs_rtgroup.h"
23 static void
24 xfs_health_unmount_group(
25 struct xfs_group *xg,
26 bool *warn)
28 unsigned int sick = 0;
29 unsigned int checked = 0;
31 xfs_group_measure_sickness(xg, &sick, &checked);
32 if (sick) {
33 trace_xfs_group_unfixed_corruption(xg, sick);
34 *warn = true;
39 * Warn about metadata corruption that we detected but haven't fixed, and
40 * make sure we're not sitting on anything that would get in the way of
41 * recovery.
43 void
44 xfs_health_unmount(
45 struct xfs_mount *mp)
47 struct xfs_perag *pag = NULL;
48 struct xfs_rtgroup *rtg = NULL;
49 unsigned int sick = 0;
50 unsigned int checked = 0;
51 bool warn = false;
53 if (xfs_is_shutdown(mp))
54 return;
56 /* Measure AG corruption levels. */
57 while ((pag = xfs_perag_next(mp, pag)))
58 xfs_health_unmount_group(pag_group(pag), &warn);
60 /* Measure realtime group corruption levels. */
61 while ((rtg = xfs_rtgroup_next(mp, rtg)))
62 xfs_health_unmount_group(rtg_group(rtg), &warn);
65 * Measure fs corruption and keep the sample around for the warning.
66 * See the note below for why we exempt FS_COUNTERS.
68 xfs_fs_measure_sickness(mp, &sick, &checked);
69 if (sick & ~XFS_SICK_FS_COUNTERS) {
70 trace_xfs_fs_unfixed_corruption(mp, sick);
71 warn = true;
74 if (warn) {
75 xfs_warn(mp,
76 "Uncorrected metadata errors detected; please run xfs_repair.");
79 * We discovered uncorrected metadata problems at some point
80 * during this filesystem mount and have advised the
81 * administrator to run repair once the unmount completes.
83 * However, we must be careful -- when FSCOUNTERS are flagged
84 * unhealthy, the unmount procedure omits writing the clean
85 * unmount record to the log so that the next mount will run
86 * recovery and recompute the summary counters. In other
87 * words, we leave a dirty log to get the counters fixed.
89 * Unfortunately, xfs_repair cannot recover dirty logs, so if
90 * there were filesystem problems, FSCOUNTERS was flagged, and
91 * the administrator takes our advice to run xfs_repair,
92 * they'll have to zap the log before repairing structures.
93 * We don't really want to encourage this, so we mark the
94 * FSCOUNTERS healthy so that a subsequent repair run won't see
95 * a dirty log.
97 if (sick & XFS_SICK_FS_COUNTERS)
98 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
102 /* Mark unhealthy per-fs metadata. */
103 void
104 xfs_fs_mark_sick(
105 struct xfs_mount *mp,
106 unsigned int mask)
108 ASSERT(!(mask & ~XFS_SICK_FS_ALL));
109 trace_xfs_fs_mark_sick(mp, mask);
111 spin_lock(&mp->m_sb_lock);
112 mp->m_fs_sick |= mask;
113 spin_unlock(&mp->m_sb_lock);
116 /* Mark per-fs metadata as having been checked and found unhealthy by fsck. */
117 void
118 xfs_fs_mark_corrupt(
119 struct xfs_mount *mp,
120 unsigned int mask)
122 ASSERT(!(mask & ~XFS_SICK_FS_ALL));
123 trace_xfs_fs_mark_corrupt(mp, mask);
125 spin_lock(&mp->m_sb_lock);
126 mp->m_fs_sick |= mask;
127 mp->m_fs_checked |= mask;
128 spin_unlock(&mp->m_sb_lock);
131 /* Mark a per-fs metadata healed. */
132 void
133 xfs_fs_mark_healthy(
134 struct xfs_mount *mp,
135 unsigned int mask)
137 ASSERT(!(mask & ~XFS_SICK_FS_ALL));
138 trace_xfs_fs_mark_healthy(mp, mask);
140 spin_lock(&mp->m_sb_lock);
141 mp->m_fs_sick &= ~mask;
142 if (!(mp->m_fs_sick & XFS_SICK_FS_PRIMARY))
143 mp->m_fs_sick &= ~XFS_SICK_FS_SECONDARY;
144 mp->m_fs_checked |= mask;
145 spin_unlock(&mp->m_sb_lock);
148 /* Sample which per-fs metadata are unhealthy. */
149 void
150 xfs_fs_measure_sickness(
151 struct xfs_mount *mp,
152 unsigned int *sick,
153 unsigned int *checked)
155 spin_lock(&mp->m_sb_lock);
156 *sick = mp->m_fs_sick;
157 *checked = mp->m_fs_checked;
158 spin_unlock(&mp->m_sb_lock);
161 /* Mark unhealthy per-ag metadata given a raw AG number. */
162 void
163 xfs_agno_mark_sick(
164 struct xfs_mount *mp,
165 xfs_agnumber_t agno,
166 unsigned int mask)
168 struct xfs_perag *pag = xfs_perag_get(mp, agno);
170 /* per-ag structure not set up yet? */
171 if (!pag)
172 return;
174 xfs_ag_mark_sick(pag, mask);
175 xfs_perag_put(pag);
178 static inline void
179 xfs_group_check_mask(
180 struct xfs_group *xg,
181 unsigned int mask)
183 if (xg->xg_type == XG_TYPE_AG)
184 ASSERT(!(mask & ~XFS_SICK_AG_ALL));
185 else
186 ASSERT(!(mask & ~XFS_SICK_RG_ALL));
189 /* Mark unhealthy per-ag metadata. */
190 void
191 xfs_group_mark_sick(
192 struct xfs_group *xg,
193 unsigned int mask)
195 xfs_group_check_mask(xg, mask);
196 trace_xfs_group_mark_sick(xg, mask);
198 spin_lock(&xg->xg_state_lock);
199 xg->xg_sick |= mask;
200 spin_unlock(&xg->xg_state_lock);
204 * Mark per-group metadata as having been checked and found unhealthy by fsck.
206 void
207 xfs_group_mark_corrupt(
208 struct xfs_group *xg,
209 unsigned int mask)
211 xfs_group_check_mask(xg, mask);
212 trace_xfs_group_mark_corrupt(xg, mask);
214 spin_lock(&xg->xg_state_lock);
215 xg->xg_sick |= mask;
216 xg->xg_checked |= mask;
217 spin_unlock(&xg->xg_state_lock);
221 * Mark per-group metadata ok.
223 void
224 xfs_group_mark_healthy(
225 struct xfs_group *xg,
226 unsigned int mask)
228 xfs_group_check_mask(xg, mask);
229 trace_xfs_group_mark_healthy(xg, mask);
231 spin_lock(&xg->xg_state_lock);
232 xg->xg_sick &= ~mask;
233 if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY))
234 xg->xg_sick &= ~XFS_SICK_AG_SECONDARY;
235 xg->xg_checked |= mask;
236 spin_unlock(&xg->xg_state_lock);
239 /* Sample which per-ag metadata are unhealthy. */
240 void
241 xfs_group_measure_sickness(
242 struct xfs_group *xg,
243 unsigned int *sick,
244 unsigned int *checked)
246 spin_lock(&xg->xg_state_lock);
247 *sick = xg->xg_sick;
248 *checked = xg->xg_checked;
249 spin_unlock(&xg->xg_state_lock);
252 /* Mark unhealthy per-rtgroup metadata given a raw rt group number. */
253 void
254 xfs_rgno_mark_sick(
255 struct xfs_mount *mp,
256 xfs_rgnumber_t rgno,
257 unsigned int mask)
259 struct xfs_rtgroup *rtg = xfs_rtgroup_get(mp, rgno);
261 /* per-rtgroup structure not set up yet? */
262 if (!rtg)
263 return;
265 xfs_group_mark_sick(rtg_group(rtg), mask);
266 xfs_rtgroup_put(rtg);
269 /* Mark the unhealthy parts of an inode. */
270 void
271 xfs_inode_mark_sick(
272 struct xfs_inode *ip,
273 unsigned int mask)
275 ASSERT(!(mask & ~XFS_SICK_INO_ALL));
276 trace_xfs_inode_mark_sick(ip, mask);
278 spin_lock(&ip->i_flags_lock);
279 ip->i_sick |= mask;
280 spin_unlock(&ip->i_flags_lock);
283 * Keep this inode around so we don't lose the sickness report. Scrub
284 * grabs inodes with DONTCACHE assuming that most inode are ok, which
285 * is not the case here.
287 spin_lock(&VFS_I(ip)->i_lock);
288 VFS_I(ip)->i_state &= ~I_DONTCACHE;
289 spin_unlock(&VFS_I(ip)->i_lock);
292 /* Mark inode metadata as having been checked and found unhealthy by fsck. */
293 void
294 xfs_inode_mark_corrupt(
295 struct xfs_inode *ip,
296 unsigned int mask)
298 ASSERT(!(mask & ~XFS_SICK_INO_ALL));
299 trace_xfs_inode_mark_corrupt(ip, mask);
301 spin_lock(&ip->i_flags_lock);
302 ip->i_sick |= mask;
303 ip->i_checked |= mask;
304 spin_unlock(&ip->i_flags_lock);
307 * Keep this inode around so we don't lose the sickness report. Scrub
308 * grabs inodes with DONTCACHE assuming that most inode are ok, which
309 * is not the case here.
311 spin_lock(&VFS_I(ip)->i_lock);
312 VFS_I(ip)->i_state &= ~I_DONTCACHE;
313 spin_unlock(&VFS_I(ip)->i_lock);
316 /* Mark parts of an inode healed. */
317 void
318 xfs_inode_mark_healthy(
319 struct xfs_inode *ip,
320 unsigned int mask)
322 ASSERT(!(mask & ~XFS_SICK_INO_ALL));
323 trace_xfs_inode_mark_healthy(ip, mask);
325 spin_lock(&ip->i_flags_lock);
326 ip->i_sick &= ~mask;
327 if (!(ip->i_sick & XFS_SICK_INO_PRIMARY))
328 ip->i_sick &= ~XFS_SICK_INO_SECONDARY;
329 ip->i_checked |= mask;
330 spin_unlock(&ip->i_flags_lock);
333 /* Sample which parts of an inode are unhealthy. */
334 void
335 xfs_inode_measure_sickness(
336 struct xfs_inode *ip,
337 unsigned int *sick,
338 unsigned int *checked)
340 spin_lock(&ip->i_flags_lock);
341 *sick = ip->i_sick;
342 *checked = ip->i_checked;
343 spin_unlock(&ip->i_flags_lock);
346 /* Mappings between internal sick masks and ioctl sick masks. */
348 struct ioctl_sick_map {
349 unsigned int sick_mask;
350 unsigned int ioctl_mask;
353 #define for_each_sick_map(map, m) \
354 for ((m) = (map); (m) < (map) + ARRAY_SIZE(map); (m)++)
356 static const struct ioctl_sick_map fs_map[] = {
357 { XFS_SICK_FS_COUNTERS, XFS_FSOP_GEOM_SICK_COUNTERS},
358 { XFS_SICK_FS_UQUOTA, XFS_FSOP_GEOM_SICK_UQUOTA },
359 { XFS_SICK_FS_GQUOTA, XFS_FSOP_GEOM_SICK_GQUOTA },
360 { XFS_SICK_FS_PQUOTA, XFS_FSOP_GEOM_SICK_PQUOTA },
361 { XFS_SICK_FS_QUOTACHECK, XFS_FSOP_GEOM_SICK_QUOTACHECK },
362 { XFS_SICK_FS_NLINKS, XFS_FSOP_GEOM_SICK_NLINKS },
363 { XFS_SICK_FS_METADIR, XFS_FSOP_GEOM_SICK_METADIR },
364 { XFS_SICK_FS_METAPATH, XFS_FSOP_GEOM_SICK_METAPATH },
367 static const struct ioctl_sick_map rt_map[] = {
368 { XFS_SICK_RG_BITMAP, XFS_FSOP_GEOM_SICK_RT_BITMAP },
369 { XFS_SICK_RG_SUMMARY, XFS_FSOP_GEOM_SICK_RT_SUMMARY },
372 static inline void
373 xfgeo_health_tick(
374 struct xfs_fsop_geom *geo,
375 unsigned int sick,
376 unsigned int checked,
377 const struct ioctl_sick_map *m)
379 if (checked & m->sick_mask)
380 geo->checked |= m->ioctl_mask;
381 if (sick & m->sick_mask)
382 geo->sick |= m->ioctl_mask;
385 /* Fill out fs geometry health info. */
386 void
387 xfs_fsop_geom_health(
388 struct xfs_mount *mp,
389 struct xfs_fsop_geom *geo)
391 struct xfs_rtgroup *rtg = NULL;
392 const struct ioctl_sick_map *m;
393 unsigned int sick;
394 unsigned int checked;
396 geo->sick = 0;
397 geo->checked = 0;
399 xfs_fs_measure_sickness(mp, &sick, &checked);
400 for_each_sick_map(fs_map, m)
401 xfgeo_health_tick(geo, sick, checked, m);
403 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
404 xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked);
405 for_each_sick_map(rt_map, m)
406 xfgeo_health_tick(geo, sick, checked, m);
410 static const struct ioctl_sick_map ag_map[] = {
411 { XFS_SICK_AG_SB, XFS_AG_GEOM_SICK_SB },
412 { XFS_SICK_AG_AGF, XFS_AG_GEOM_SICK_AGF },
413 { XFS_SICK_AG_AGFL, XFS_AG_GEOM_SICK_AGFL },
414 { XFS_SICK_AG_AGI, XFS_AG_GEOM_SICK_AGI },
415 { XFS_SICK_AG_BNOBT, XFS_AG_GEOM_SICK_BNOBT },
416 { XFS_SICK_AG_CNTBT, XFS_AG_GEOM_SICK_CNTBT },
417 { XFS_SICK_AG_INOBT, XFS_AG_GEOM_SICK_INOBT },
418 { XFS_SICK_AG_FINOBT, XFS_AG_GEOM_SICK_FINOBT },
419 { XFS_SICK_AG_RMAPBT, XFS_AG_GEOM_SICK_RMAPBT },
420 { XFS_SICK_AG_REFCNTBT, XFS_AG_GEOM_SICK_REFCNTBT },
421 { XFS_SICK_AG_INODES, XFS_AG_GEOM_SICK_INODES },
424 /* Fill out ag geometry health info. */
425 void
426 xfs_ag_geom_health(
427 struct xfs_perag *pag,
428 struct xfs_ag_geometry *ageo)
430 const struct ioctl_sick_map *m;
431 unsigned int sick;
432 unsigned int checked;
434 ageo->ag_sick = 0;
435 ageo->ag_checked = 0;
437 xfs_group_measure_sickness(pag_group(pag), &sick, &checked);
438 for_each_sick_map(ag_map, m) {
439 if (checked & m->sick_mask)
440 ageo->ag_checked |= m->ioctl_mask;
441 if (sick & m->sick_mask)
442 ageo->ag_sick |= m->ioctl_mask;
446 static const struct ioctl_sick_map rtgroup_map[] = {
447 { XFS_SICK_RG_SUPER, XFS_RTGROUP_GEOM_SICK_SUPER },
448 { XFS_SICK_RG_BITMAP, XFS_RTGROUP_GEOM_SICK_BITMAP },
449 { XFS_SICK_RG_SUMMARY, XFS_RTGROUP_GEOM_SICK_SUMMARY },
452 /* Fill out rtgroup geometry health info. */
453 void
454 xfs_rtgroup_geom_health(
455 struct xfs_rtgroup *rtg,
456 struct xfs_rtgroup_geometry *rgeo)
458 const struct ioctl_sick_map *m;
459 unsigned int sick;
460 unsigned int checked;
462 rgeo->rg_sick = 0;
463 rgeo->rg_checked = 0;
465 xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked);
466 for_each_sick_map(rtgroup_map, m) {
467 if (checked & m->sick_mask)
468 rgeo->rg_checked |= m->ioctl_mask;
469 if (sick & m->sick_mask)
470 rgeo->rg_sick |= m->ioctl_mask;
474 static const struct ioctl_sick_map ino_map[] = {
475 { XFS_SICK_INO_CORE, XFS_BS_SICK_INODE },
476 { XFS_SICK_INO_BMBTD, XFS_BS_SICK_BMBTD },
477 { XFS_SICK_INO_BMBTA, XFS_BS_SICK_BMBTA },
478 { XFS_SICK_INO_BMBTC, XFS_BS_SICK_BMBTC },
479 { XFS_SICK_INO_DIR, XFS_BS_SICK_DIR },
480 { XFS_SICK_INO_XATTR, XFS_BS_SICK_XATTR },
481 { XFS_SICK_INO_SYMLINK, XFS_BS_SICK_SYMLINK },
482 { XFS_SICK_INO_PARENT, XFS_BS_SICK_PARENT },
483 { XFS_SICK_INO_BMBTD_ZAPPED, XFS_BS_SICK_BMBTD },
484 { XFS_SICK_INO_BMBTA_ZAPPED, XFS_BS_SICK_BMBTA },
485 { XFS_SICK_INO_DIR_ZAPPED, XFS_BS_SICK_DIR },
486 { XFS_SICK_INO_SYMLINK_ZAPPED, XFS_BS_SICK_SYMLINK },
487 { XFS_SICK_INO_DIRTREE, XFS_BS_SICK_DIRTREE },
490 /* Fill out bulkstat health info. */
491 void
492 xfs_bulkstat_health(
493 struct xfs_inode *ip,
494 struct xfs_bulkstat *bs)
496 const struct ioctl_sick_map *m;
497 unsigned int sick;
498 unsigned int checked;
500 bs->bs_sick = 0;
501 bs->bs_checked = 0;
503 xfs_inode_measure_sickness(ip, &sick, &checked);
504 for_each_sick_map(ino_map, m) {
505 if (checked & m->sick_mask)
506 bs->bs_checked |= m->ioctl_mask;
507 if (sick & m->sick_mask)
508 bs->bs_sick |= m->ioctl_mask;
512 /* Mark a block mapping sick. */
513 void
514 xfs_bmap_mark_sick(
515 struct xfs_inode *ip,
516 int whichfork)
518 unsigned int mask;
520 switch (whichfork) {
521 case XFS_DATA_FORK:
522 mask = XFS_SICK_INO_BMBTD;
523 break;
524 case XFS_ATTR_FORK:
525 mask = XFS_SICK_INO_BMBTA;
526 break;
527 case XFS_COW_FORK:
528 mask = XFS_SICK_INO_BMBTC;
529 break;
530 default:
531 ASSERT(0);
532 return;
535 xfs_inode_mark_sick(ip, mask);
538 /* Record observations of btree corruption with the health tracking system. */
539 void
540 xfs_btree_mark_sick(
541 struct xfs_btree_cur *cur)
543 if (xfs_btree_is_bmap(cur->bc_ops)) {
544 xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork);
545 /* no health state tracking for ephemeral btrees */
546 } else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) {
547 ASSERT(cur->bc_group);
548 ASSERT(cur->bc_ops->sick_mask);
549 xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask);
554 * Record observations of dir/attr btree corruption with the health tracking
555 * system.
557 void
558 xfs_dirattr_mark_sick(
559 struct xfs_inode *ip,
560 int whichfork)
562 unsigned int mask;
564 switch (whichfork) {
565 case XFS_DATA_FORK:
566 mask = XFS_SICK_INO_DIR;
567 break;
568 case XFS_ATTR_FORK:
569 mask = XFS_SICK_INO_XATTR;
570 break;
571 default:
572 ASSERT(0);
573 return;
576 xfs_inode_mark_sick(ip, mask);
580 * Record observations of dir/attr btree corruption with the health tracking
581 * system.
583 void
584 xfs_da_mark_sick(
585 struct xfs_da_args *args)
587 xfs_dirattr_mark_sick(args->dp, args->whichfork);