1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
17 #include "xfs_icache.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_ialloc.h"
21 #include "scrub/scrub.h"
22 #include "scrub/common.h"
23 #include "scrub/repair.h"
24 #include "scrub/xfile.h"
25 #include "scrub/xfarray.h"
26 #include "scrub/iscan.h"
27 #include "scrub/quota.h"
28 #include "scrub/quotacheck.h"
29 #include "scrub/trace.h"
35 * Quota counters are "summary" metadata, in the sense that they are computed
36 * as the summation of the block usage counts for every file on the filesystem.
37 * Therefore, we compute the correct icount, bcount, and rtbcount values by
38 * creating a shadow quota counter structure and walking every inode.
41 /* Track the quota deltas for a dquot in a transaction. */
42 struct xqcheck_dqtrx
{
49 int64_t delbcnt_delta
;
51 int64_t rtbcount_delta
;
55 #define XQCHECK_MAX_NR_DQTRXS (XFS_QM_TRANS_DQTYPES * XFS_QM_TRANS_MAXDQS)
58 * Track the quota deltas for all dquots attached to a transaction if the
59 * quota deltas are being applied to an inode that we already scanned.
61 struct xqcheck_dqacct
{
62 struct rhash_head hash
;
64 struct xqcheck_dqtrx dqtrx
[XQCHECK_MAX_NR_DQTRXS
];
65 unsigned int refcount
;
68 /* Free a shadow dquot accounting structure. */
74 struct xqcheck_dqacct
*dqa
= ptr
;
79 /* Set us up to scrub quota counters. */
81 xchk_setup_quotacheck(
84 if (!XFS_IS_QUOTA_ON(sc
->mp
))
87 xchk_fsgates_enable(sc
, XCHK_FSGATES_QUOTA
);
89 sc
->buf
= kzalloc(sizeof(struct xqcheck
), XCHK_GFP_FLAGS
);
93 return xchk_setup_fs(sc
);
97 * Part 1: Collecting dquot resource usage counts. For each xfs_dquot attached
98 * to each inode, we create a shadow dquot, and compute the inode count and add
99 * the data/rt block usage from what we see.
101 * To avoid false corruption reports in part 2, any failure in this part must
102 * set the INCOMPLETE flag even when a negative errno is returned. This care
103 * must be taken with certain errno values (i.e. EFSBADCRC, EFSCORRUPTED,
104 * ECANCELED) that are absorbed into a scrub state flag update by
105 * xchk_*_process_error. Scrub and repair share the same incore data
106 * structures, so the INCOMPLETE flag is critical to prevent a repair based on
107 * insufficient information.
109 * Because we are scanning a live filesystem, it's possible that another thread
110 * will try to update the quota counters for an inode that we've already
111 * scanned. This will cause our counts to be incorrect. Therefore, we hook
112 * the live transaction code in two places: (1) when the callers update the
113 * per-transaction dqtrx structure to log quota counter updates; and (2) when
114 * transaction commit actually logs those updates to the incore dquot. By
115 * shadowing transaction updates in this manner, live quotacheck can ensure
116 * by locking the dquot and the shadow structure that its own copies are not
117 * out of date. Because the hook code runs in a different process context from
118 * the scrub code and the scrub state flags are not accessed atomically,
119 * failures in the hook code must abort the iscan and the scrubber must notice
120 * the aborted scan and set the incomplete flag.
122 * Note that we use srcu notifier hooks to minimize the overhead when live
123 * quotacheck is /not/ running.
126 /* Update an incore dquot counter information from a live update. */
128 xqcheck_update_incore_counts(
130 struct xfarray
*counts
,
136 struct xqcheck_dquot xcdq
;
139 error
= xfarray_load_sparse(counts
, id
, &xcdq
);
143 xcdq
.flags
|= XQCHECK_DQUOT_WRITTEN
;
144 xcdq
.icount
+= inodes
;
145 xcdq
.bcount
+= nblks
;
146 xcdq
.rtbcount
+= rtblks
;
148 error
= xfarray_store(counts
, id
, &xcdq
);
149 if (error
== -EFBIG
) {
151 * EFBIG means we tried to store data at too high a byte offset
152 * in the sparse array. IOWs, we cannot complete the check and
153 * must notify userspace that the check was incomplete.
160 /* Decide if this is the shadow dquot accounting structure for a transaction. */
162 xqcheck_dqacct_obj_cmpfn(
163 struct rhashtable_compare_arg
*arg
,
166 const uintptr_t *tx_idp
= arg
->key
;
167 const struct xqcheck_dqacct
*dqa
= obj
;
169 if (dqa
->tx_id
!= *tx_idp
)
174 static const struct rhashtable_params xqcheck_dqacct_hash_params
= {
176 .key_len
= sizeof(uintptr_t),
177 .key_offset
= offsetof(struct xqcheck_dqacct
, tx_id
),
178 .head_offset
= offsetof(struct xqcheck_dqacct
, hash
),
179 .automatic_shrinking
= true,
180 .obj_cmpfn
= xqcheck_dqacct_obj_cmpfn
,
183 /* Find a shadow dqtrx slot for the given dquot. */
184 STATIC
struct xqcheck_dqtrx
*
186 struct xqcheck_dqacct
*dqa
,
192 for (i
= 0; i
< XQCHECK_MAX_NR_DQTRXS
; i
++) {
193 if (dqa
->dqtrx
[i
].q_type
== 0 ||
194 (dqa
->dqtrx
[i
].q_type
== q_type
&&
195 dqa
->dqtrx
[i
].q_id
== q_id
))
196 return &dqa
->dqtrx
[i
];
203 * Create and fill out a quota delta tracking structure to shadow the updates
204 * going on in the regular quota code.
207 xqcheck_mod_live_ino_dqtrx(
208 struct notifier_block
*nb
,
209 unsigned long action
,
212 struct xfs_mod_ino_dqtrx_params
*p
= data
;
214 struct xqcheck_dqacct
*dqa
;
215 struct xqcheck_dqtrx
*dqtrx
;
218 xqc
= container_of(nb
, struct xqcheck
, qhook
.mod_hook
.nb
);
220 /* Skip quota reservation fields. */
222 case XFS_TRANS_DQ_BCOUNT
:
223 case XFS_TRANS_DQ_DELBCOUNT
:
224 case XFS_TRANS_DQ_ICOUNT
:
225 case XFS_TRANS_DQ_RTBCOUNT
:
226 case XFS_TRANS_DQ_DELRTBCOUNT
:
232 /* Ignore dqtrx updates for quota types we don't care about. */
234 case XFS_DQTYPE_USER
:
238 case XFS_DQTYPE_GROUP
:
242 case XFS_DQTYPE_PROJ
:
250 /* Skip inodes that haven't been scanned yet. */
251 if (!xchk_iscan_want_live_update(&xqc
->iscan
, p
->ino
))
254 /* Make a shadow quota accounting tracker for this transaction. */
255 mutex_lock(&xqc
->lock
);
256 dqa
= rhashtable_lookup_fast(&xqc
->shadow_dquot_acct
, &p
->tx_id
,
257 xqcheck_dqacct_hash_params
);
259 dqa
= kzalloc(sizeof(struct xqcheck_dqacct
), XCHK_GFP_FLAGS
);
263 dqa
->tx_id
= p
->tx_id
;
264 error
= rhashtable_insert_fast(&xqc
->shadow_dquot_acct
,
265 &dqa
->hash
, xqcheck_dqacct_hash_params
);
270 /* Find the shadow dqtrx (or an empty slot) here. */
271 dqtrx
= xqcheck_get_dqtrx(dqa
, p
->q_type
, p
->q_id
);
274 if (dqtrx
->q_type
== 0) {
275 dqtrx
->q_type
= p
->q_type
;
276 dqtrx
->q_id
= p
->q_id
;
282 case XFS_TRANS_DQ_BCOUNT
:
283 dqtrx
->bcount_delta
+= p
->delta
;
285 case XFS_TRANS_DQ_DELBCOUNT
:
286 dqtrx
->delbcnt_delta
+= p
->delta
;
288 case XFS_TRANS_DQ_ICOUNT
:
289 dqtrx
->icount_delta
+= p
->delta
;
291 case XFS_TRANS_DQ_RTBCOUNT
:
292 dqtrx
->rtbcount_delta
+= p
->delta
;
294 case XFS_TRANS_DQ_DELRTBCOUNT
:
295 dqtrx
->delrtb_delta
+= p
->delta
;
299 mutex_unlock(&xqc
->lock
);
303 xchk_iscan_abort(&xqc
->iscan
);
304 mutex_unlock(&xqc
->lock
);
309 * Apply the transaction quota deltas to our shadow quota accounting info when
310 * the regular quota code are doing the same.
313 xqcheck_apply_live_dqtrx(
314 struct notifier_block
*nb
,
315 unsigned long action
,
318 struct xfs_apply_dqtrx_params
*p
= data
;
320 struct xqcheck_dqacct
*dqa
;
321 struct xqcheck_dqtrx
*dqtrx
;
322 struct xfarray
*counts
;
325 xqc
= container_of(nb
, struct xqcheck
, qhook
.apply_hook
.nb
);
327 /* Map the dquot type to an incore counter object. */
329 case XFS_DQTYPE_USER
:
330 counts
= xqc
->ucounts
;
332 case XFS_DQTYPE_GROUP
:
333 counts
= xqc
->gcounts
;
335 case XFS_DQTYPE_PROJ
:
336 counts
= xqc
->pcounts
;
342 if (xchk_iscan_aborted(&xqc
->iscan
) || counts
== NULL
)
346 * Find the shadow dqtrx for this transaction and dquot, if any deltas
347 * need to be applied here. If not, we're finished early.
349 mutex_lock(&xqc
->lock
);
350 dqa
= rhashtable_lookup_fast(&xqc
->shadow_dquot_acct
, &p
->tx_id
,
351 xqcheck_dqacct_hash_params
);
354 dqtrx
= xqcheck_get_dqtrx(dqa
, p
->q_type
, p
->q_id
);
355 if (!dqtrx
|| dqtrx
->q_type
== 0)
358 /* Update our shadow dquot if we're committing. */
359 if (action
== XFS_APPLY_DQTRX_COMMIT
) {
360 error
= xqcheck_update_incore_counts(xqc
, counts
, p
->q_id
,
362 dqtrx
->bcount_delta
+ dqtrx
->delbcnt_delta
,
363 dqtrx
->rtbcount_delta
+ dqtrx
->delrtb_delta
);
368 /* Free the shadow accounting structure if that was the last user. */
370 if (dqa
->refcount
== 0) {
371 error
= rhashtable_remove_fast(&xqc
->shadow_dquot_acct
,
372 &dqa
->hash
, xqcheck_dqacct_hash_params
);
375 xqcheck_dqacct_free(dqa
, NULL
);
378 mutex_unlock(&xqc
->lock
);
382 xchk_iscan_abort(&xqc
->iscan
);
384 mutex_unlock(&xqc
->lock
);
388 /* Record this inode's quota usage in our shadow quota counter data. */
390 xqcheck_collect_inode(
392 struct xfs_inode
*ip
)
394 struct xfs_trans
*tp
= xqc
->sc
->tp
;
395 xfs_filblks_t nblks
, rtblks
;
396 uint ilock_flags
= 0;
398 bool isreg
= S_ISREG(VFS_I(ip
)->i_mode
);
401 if (xfs_is_metadir_inode(ip
) ||
402 xfs_is_quota_inode(&tp
->t_mountp
->m_sb
, ip
->i_ino
)) {
404 * Quota files are never counted towards quota, so we do not
405 * need to take the lock. Files do not switch between the
406 * metadata and regular directory trees without a reallocation,
407 * so we do not need to ILOCK them either.
409 xchk_iscan_mark_visited(&xqc
->iscan
, ip
);
413 /* Figure out the data / rt device block counts. */
414 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
416 xfs_ilock(ip
, XFS_MMAPLOCK_SHARED
);
417 if (XFS_IS_REALTIME_INODE(ip
)) {
419 * Read in the data fork for rt files so that _count_blocks
420 * can count the number of blocks allocated from the rt volume.
421 * Inodes do not track that separately.
423 ilock_flags
= xfs_ilock_data_map_shared(ip
);
424 error
= xfs_iread_extents(tp
, ip
, XFS_DATA_FORK
);
428 ilock_flags
= XFS_ILOCK_SHARED
;
429 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
431 xfs_inode_count_blocks(tp
, ip
, &nblks
, &rtblks
);
433 if (xchk_iscan_aborted(&xqc
->iscan
)) {
438 /* Update the shadow dquot counters. */
439 mutex_lock(&xqc
->lock
);
441 id
= xfs_qm_id_for_quotatype(ip
, XFS_DQTYPE_USER
);
442 error
= xqcheck_update_incore_counts(xqc
, xqc
->ucounts
, id
, 1,
449 id
= xfs_qm_id_for_quotatype(ip
, XFS_DQTYPE_GROUP
);
450 error
= xqcheck_update_incore_counts(xqc
, xqc
->gcounts
, id
, 1,
457 id
= xfs_qm_id_for_quotatype(ip
, XFS_DQTYPE_PROJ
);
458 error
= xqcheck_update_incore_counts(xqc
, xqc
->pcounts
, id
, 1,
463 mutex_unlock(&xqc
->lock
);
465 xchk_iscan_mark_visited(&xqc
->iscan
, ip
);
469 mutex_unlock(&xqc
->lock
);
471 xchk_iscan_abort(&xqc
->iscan
);
473 xchk_set_incomplete(xqc
->sc
);
475 xfs_iunlock(ip
, ilock_flags
);
477 xfs_iunlock(ip
, XFS_MMAPLOCK_SHARED
);
478 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
482 /* Walk all the allocated inodes and run a quota scan on them. */
484 xqcheck_collect_counts(
487 struct xfs_scrub
*sc
= xqc
->sc
;
488 struct xfs_inode
*ip
;
492 * Set up for a potentially lengthy filesystem scan by reducing our
493 * transaction resource usage for the duration. Specifically:
495 * Cancel the transaction to release the log grant space while we scan
498 * Create a new empty transaction to eliminate the possibility of the
499 * inode scan deadlocking on cyclical metadata.
501 * We pass the empty transaction to the file scanning function to avoid
502 * repeatedly cycling empty transactions. This can be done without
503 * risk of deadlock between sb_internal and the IOLOCK (we take the
504 * IOLOCK to quiesce the file before scanning) because empty
505 * transactions do not take sb_internal.
507 xchk_trans_cancel(sc
);
508 error
= xchk_trans_alloc_empty(sc
);
512 while ((error
= xchk_iscan_iter(&xqc
->iscan
, &ip
)) == 1) {
513 error
= xqcheck_collect_inode(xqc
, ip
);
518 if (xchk_should_terminate(sc
, &error
))
521 xchk_iscan_iter_finish(&xqc
->iscan
);
523 xchk_set_incomplete(sc
);
525 * If we couldn't grab an inode that was busy with a state
526 * change, change the error code so that we exit to userspace
527 * as quickly as possible.
535 * Switch out for a real transaction in preparation for building a new
538 xchk_trans_cancel(sc
);
539 return xchk_setup_fs(sc
);
543 * Part 2: Comparing dquot resource counters. Walk each xfs_dquot, comparing
544 * the resource usage counters against our shadow dquots; and then walk each
545 * shadow dquot (that wasn't covered in the first part), comparing it against
550 * Check the dquot data against what we observed. Caller must hold the dquot
554 xqcheck_compare_dquot(
557 struct xfs_dquot
*dq
)
559 struct xqcheck_dquot xcdq
;
560 struct xfarray
*counts
= xqcheck_counters_for(xqc
, dqtype
);
563 if (xchk_iscan_aborted(&xqc
->iscan
)) {
564 xchk_set_incomplete(xqc
->sc
);
568 mutex_lock(&xqc
->lock
);
569 error
= xfarray_load_sparse(counts
, dq
->q_id
, &xcdq
);
573 if (xcdq
.icount
!= dq
->q_ino
.count
)
574 xchk_qcheck_set_corrupt(xqc
->sc
, dqtype
, dq
->q_id
);
576 if (xcdq
.bcount
!= dq
->q_blk
.count
)
577 xchk_qcheck_set_corrupt(xqc
->sc
, dqtype
, dq
->q_id
);
579 if (xcdq
.rtbcount
!= dq
->q_rtb
.count
)
580 xchk_qcheck_set_corrupt(xqc
->sc
, dqtype
, dq
->q_id
);
582 xcdq
.flags
|= (XQCHECK_DQUOT_COMPARE_SCANNED
| XQCHECK_DQUOT_WRITTEN
);
583 error
= xfarray_store(counts
, dq
->q_id
, &xcdq
);
584 if (error
== -EFBIG
) {
586 * EFBIG means we tried to store data at too high a byte offset
587 * in the sparse array. IOWs, we cannot complete the check and
588 * must notify userspace that the check was incomplete. This
589 * should never happen outside of the collection phase.
591 xchk_set_incomplete(xqc
->sc
);
594 mutex_unlock(&xqc
->lock
);
598 if (xqc
->sc
->sm
->sm_flags
& XFS_SCRUB_OFLAG_CORRUPT
)
604 mutex_unlock(&xqc
->lock
);
609 * Walk all the observed dquots, and make sure there's a matching incore
610 * dquot and that its counts match ours.
613 xqcheck_walk_observations(
617 struct xqcheck_dquot xcdq
;
618 struct xfs_dquot
*dq
;
619 struct xfarray
*counts
= xqcheck_counters_for(xqc
, dqtype
);
620 xfarray_idx_t cur
= XFARRAY_CURSOR_INIT
;
623 mutex_lock(&xqc
->lock
);
624 while ((error
= xfarray_iter(counts
, &cur
, &xcdq
)) == 1) {
625 xfs_dqid_t id
= cur
- 1;
627 if (xcdq
.flags
& XQCHECK_DQUOT_COMPARE_SCANNED
)
630 mutex_unlock(&xqc
->lock
);
632 error
= xfs_qm_dqget(xqc
->sc
->mp
, id
, dqtype
, false, &dq
);
633 if (error
== -ENOENT
) {
634 xchk_qcheck_set_corrupt(xqc
->sc
, dqtype
, id
);
640 error
= xqcheck_compare_dquot(xqc
, dqtype
, dq
);
645 if (xchk_should_terminate(xqc
->sc
, &error
))
648 mutex_lock(&xqc
->lock
);
650 mutex_unlock(&xqc
->lock
);
655 /* Compare the quota counters we observed against the live dquots. */
657 xqcheck_compare_dqtype(
661 struct xchk_dqiter cursor
= { };
662 struct xfs_scrub
*sc
= xqc
->sc
;
663 struct xfs_dquot
*dq
;
666 if (sc
->sm
->sm_flags
& XFS_SCRUB_OFLAG_CORRUPT
)
669 /* If the quota CHKD flag is cleared, we need to repair this quota. */
670 if (!(xfs_quota_chkd_flag(dqtype
) & sc
->mp
->m_qflags
)) {
671 xchk_qcheck_set_corrupt(xqc
->sc
, dqtype
, 0);
675 /* Compare what we observed against the actual dquots. */
676 xchk_dqiter_init(&cursor
, sc
, dqtype
);
677 while ((error
= xchk_dquot_iter(&cursor
, &dq
)) == 1) {
678 error
= xqcheck_compare_dquot(xqc
, dqtype
, dq
);
686 /* Walk all the observed dquots and compare to the incore ones. */
687 return xqcheck_walk_observations(xqc
, dqtype
);
690 /* Tear down everything associated with a quotacheck. */
692 xqcheck_teardown_scan(
695 struct xqcheck
*xqc
= priv
;
696 struct xfs_quotainfo
*qi
= xqc
->sc
->mp
->m_quotainfo
;
698 /* Discourage any hook functions that might be running. */
699 xchk_iscan_abort(&xqc
->iscan
);
702 * As noted above, the apply hook is responsible for cleaning up the
703 * shadow dquot accounting data when a transaction completes. The mod
704 * hook must be removed before the apply hook so that we don't
705 * mistakenly leave an active shadow account for the mod hook to get
706 * its hands on. No hooks should be running after these functions
709 xfs_dqtrx_hook_del(qi
, &xqc
->qhook
);
711 if (xqc
->shadow_dquot_acct
.key_len
) {
712 rhashtable_free_and_destroy(&xqc
->shadow_dquot_acct
,
713 xqcheck_dqacct_free
, NULL
);
714 xqc
->shadow_dquot_acct
.key_len
= 0;
718 xfarray_destroy(xqc
->pcounts
);
723 xfarray_destroy(xqc
->gcounts
);
728 xfarray_destroy(xqc
->ucounts
);
732 xchk_iscan_teardown(&xqc
->iscan
);
733 mutex_destroy(&xqc
->lock
);
738 * Scan all inodes in the entire filesystem to generate quota counter data.
739 * If the scan is successful, the quota data will be left alive for a repair.
740 * If any error occurs, we'll tear everything down.
744 struct xfs_scrub
*sc
,
748 struct xfs_quotainfo
*qi
= sc
->mp
->m_quotainfo
;
749 unsigned long long max_dquots
= XFS_DQ_ID_MAX
+ 1ULL;
752 ASSERT(xqc
->sc
== NULL
);
755 mutex_init(&xqc
->lock
);
757 /* Retry iget every tenth of a second for up to 30 seconds. */
758 xchk_iscan_start(sc
, 30000, 100, &xqc
->iscan
);
761 if (xfs_this_quota_on(sc
->mp
, XFS_DQTYPE_USER
)) {
762 descr
= xchk_xfile_descr(sc
, "user dquot records");
763 error
= xfarray_create(descr
, max_dquots
,
764 sizeof(struct xqcheck_dquot
), &xqc
->ucounts
);
770 if (xfs_this_quota_on(sc
->mp
, XFS_DQTYPE_GROUP
)) {
771 descr
= xchk_xfile_descr(sc
, "group dquot records");
772 error
= xfarray_create(descr
, max_dquots
,
773 sizeof(struct xqcheck_dquot
), &xqc
->gcounts
);
779 if (xfs_this_quota_on(sc
->mp
, XFS_DQTYPE_PROJ
)) {
780 descr
= xchk_xfile_descr(sc
, "project dquot records");
781 error
= xfarray_create(descr
, max_dquots
,
782 sizeof(struct xqcheck_dquot
), &xqc
->pcounts
);
789 * Set up hash table to map transactions to our internal shadow dqtrx
792 error
= rhashtable_init(&xqc
->shadow_dquot_acct
,
793 &xqcheck_dqacct_hash_params
);
798 * Hook into the quota code. The hook only triggers for inodes that
799 * were already scanned, and the scanner thread takes each inode's
800 * ILOCK, which means that any in-progress inode updates will finish
801 * before we can scan the inode.
803 * The apply hook (which removes the shadow dquot accounting struct)
804 * must be installed before the mod hook so that we never fail to catch
805 * the end of a quota update sequence and leave stale shadow data.
807 ASSERT(sc
->flags
& XCHK_FSGATES_QUOTA
);
808 xfs_dqtrx_hook_setup(&xqc
->qhook
, xqcheck_mod_live_ino_dqtrx
,
809 xqcheck_apply_live_dqtrx
);
811 error
= xfs_dqtrx_hook_add(qi
, &xqc
->qhook
);
815 /* Use deferred cleanup to pass the quota count data to repair. */
816 sc
->buf_cleanup
= xqcheck_teardown_scan
;
820 xqcheck_teardown_scan(xqc
);
824 /* Scrub all counters for a given quota type. */
827 struct xfs_scrub
*sc
)
829 struct xqcheck
*xqc
= sc
->buf
;
832 /* Check quota counters on the live filesystem. */
833 error
= xqcheck_setup_scan(sc
, xqc
);
837 /* Walk all inodes, picking up quota information. */
838 error
= xqcheck_collect_counts(xqc
);
839 if (!xchk_xref_process_error(sc
, 0, 0, &error
))
842 /* Fail fast if we're not playing with a full dataset. */
843 if (xchk_iscan_aborted(&xqc
->iscan
))
844 xchk_set_incomplete(sc
);
845 if (sc
->sm
->sm_flags
& XFS_SCRUB_OFLAG_INCOMPLETE
)
848 /* Compare quota counters. */
850 error
= xqcheck_compare_dqtype(xqc
, XFS_DQTYPE_USER
);
851 if (!xchk_xref_process_error(sc
, 0, 0, &error
))
855 error
= xqcheck_compare_dqtype(xqc
, XFS_DQTYPE_GROUP
);
856 if (!xchk_xref_process_error(sc
, 0, 0, &error
))
860 error
= xqcheck_compare_dqtype(xqc
, XFS_DQTYPE_PROJ
);
861 if (!xchk_xref_process_error(sc
, 0, 0, &error
))
865 /* Check one last time for an incomplete dataset. */
866 if (xchk_iscan_aborted(&xqc
->iscan
))
867 xchk_set_incomplete(sc
);