1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
17 #include "xfs_icache.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_iwalk.h"
20 #include "xfs_ialloc.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/repair.h"
25 #include "scrub/xfile.h"
26 #include "scrub/xfarray.h"
27 #include "scrub/iscan.h"
28 #include "scrub/quota.h"
29 #include "scrub/quotacheck.h"
30 #include "scrub/trace.h"
33 * Live Quotacheck Repair
34 * ======================
36 * Use the live quota counter information that we collected to replace the
37 * counter values in the incore dquots. A scrub->repair cycle should have left
38 * the live data and hooks active, so this is safe so long as we make sure the
42 /* Commit new counters to a dquot. */
49 struct xqcheck_dquot xcdq
;
50 struct xfarray
*counts
= xqcheck_counters_for(xqc
, dqtype
);
55 /* Unlock the dquot just long enough to allocate a transaction. */
57 error
= xchk_trans_alloc(xqc
->sc
, 0);
62 xfs_trans_dqjoin(xqc
->sc
->tp
, dq
);
64 if (xchk_iscan_aborted(&xqc
->iscan
)) {
69 mutex_lock(&xqc
->lock
);
70 error
= xfarray_load_sparse(counts
, dq
->q_id
, &xcdq
);
74 /* Adjust counters as needed. */
75 delta
= (int64_t)xcdq
.icount
- dq
->q_ino
.count
;
77 dq
->q_ino
.reserved
+= delta
;
78 dq
->q_ino
.count
+= delta
;
82 delta
= (int64_t)xcdq
.bcount
- dq
->q_blk
.count
;
84 dq
->q_blk
.reserved
+= delta
;
85 dq
->q_blk
.count
+= delta
;
89 delta
= (int64_t)xcdq
.rtbcount
- dq
->q_rtb
.count
;
91 dq
->q_rtb
.reserved
+= delta
;
92 dq
->q_rtb
.count
+= delta
;
96 xcdq
.flags
|= (XQCHECK_DQUOT_REPAIR_SCANNED
| XQCHECK_DQUOT_WRITTEN
);
97 error
= xfarray_store(counts
, dq
->q_id
, &xcdq
);
98 if (error
== -EFBIG
) {
100 * EFBIG means we tried to store data at too high a byte offset
101 * in the sparse array. IOWs, we cannot complete the repair
102 * and must cancel the whole operation. This should never
103 * happen, but we need to catch it anyway.
107 mutex_unlock(&xqc
->lock
);
111 trace_xrep_quotacheck_dquot(xqc
->sc
->mp
, dq
->q_type
, dq
->q_id
);
113 /* Commit the dirty dquot to disk. */
114 dq
->q_flags
|= XFS_DQFLAG_DIRTY
;
116 xfs_qm_adjust_dqtimers(dq
);
117 xfs_trans_log_dquot(xqc
->sc
->tp
, dq
);
120 * Transaction commit unlocks the dquot, so we must re-lock it so that
121 * the caller can put the reference (which apparently requires a locked
124 error
= xrep_trans_commit(xqc
->sc
);
129 mutex_unlock(&xqc
->lock
);
131 xchk_trans_cancel(xqc
->sc
);
133 /* Re-lock the dquot so the caller can put the reference. */
138 /* Commit new quota counters for a particular quota type. */
140 xqcheck_commit_dqtype(
144 struct xchk_dqiter cursor
= { };
145 struct xqcheck_dquot xcdq
;
146 struct xfs_scrub
*sc
= xqc
->sc
;
147 struct xfs_mount
*mp
= sc
->mp
;
148 struct xfarray
*counts
= xqcheck_counters_for(xqc
, dqtype
);
149 struct xfs_dquot
*dq
;
150 xfarray_idx_t cur
= XFARRAY_CURSOR_INIT
;
154 * Update the counters of every dquot that the quota file knows about.
156 xchk_dqiter_init(&cursor
, sc
, dqtype
);
157 while ((error
= xchk_dquot_iter(&cursor
, &dq
)) == 1) {
158 error
= xqcheck_commit_dquot(xqc
, dqtype
, dq
);
167 * Make a second pass to deal with the dquots that we know about but
168 * the quota file previously did not know about.
170 mutex_lock(&xqc
->lock
);
171 while ((error
= xfarray_iter(counts
, &cur
, &xcdq
)) == 1) {
172 xfs_dqid_t id
= cur
- 1;
174 if (xcdq
.flags
& XQCHECK_DQUOT_REPAIR_SCANNED
)
177 mutex_unlock(&xqc
->lock
);
180 * Grab the dquot, allowing for dquot block allocation in a
181 * separate transaction. We committed the scrub transaction
182 * in a previous step, so we will not be creating nested
185 error
= xfs_qm_dqget(mp
, id
, dqtype
, true, &dq
);
189 error
= xqcheck_commit_dquot(xqc
, dqtype
, dq
);
194 mutex_lock(&xqc
->lock
);
196 mutex_unlock(&xqc
->lock
);
201 /* Figure out quota CHKD flags for the running quota types. */
202 static inline unsigned int
204 struct xfs_mount
*mp
)
206 unsigned int ret
= 0;
208 if (XFS_IS_UQUOTA_ON(mp
))
209 ret
|= XFS_UQUOTA_CHKD
;
210 if (XFS_IS_GQUOTA_ON(mp
))
211 ret
|= XFS_GQUOTA_CHKD
;
212 if (XFS_IS_PQUOTA_ON(mp
))
213 ret
|= XFS_PQUOTA_CHKD
;
217 /* Commit the new dquot counters. */
220 struct xfs_scrub
*sc
)
222 struct xqcheck
*xqc
= sc
->buf
;
223 unsigned int qflags
= xqcheck_chkd_flags(sc
->mp
);
227 * Clear the CHKD flag for the running quota types and commit the scrub
228 * transaction so that we can allocate new quota block mappings if we
229 * have to. If we crash after this point, the sb still has the CHKD
230 * flags cleared, so mount quotacheck will fix all of this up.
232 xrep_update_qflags(sc
, qflags
, 0);
233 error
= xrep_trans_commit(sc
);
237 /* Commit the new counters to the dquots. */
239 error
= xqcheck_commit_dqtype(xqc
, XFS_DQTYPE_USER
);
244 error
= xqcheck_commit_dqtype(xqc
, XFS_DQTYPE_GROUP
);
249 error
= xqcheck_commit_dqtype(xqc
, XFS_DQTYPE_PROJ
);
254 /* Set the CHKD flags now that we've fixed quota counts. */
255 error
= xchk_trans_alloc(sc
, 0);
259 xrep_update_qflags(sc
, 0, qflags
);
260 return xrep_trans_commit(sc
);