drm/atomic-helper: document drm_atomic_helper_check() restrictions
[drm/drm-misc.git] / fs / xfs / scrub / quota_repair.c
blobcd51f10f29209e3f711a48624ae0c13401f80e56
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_inode.h"
19 #include "xfs_inode_fork.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_quota.h"
23 #include "xfs_qm.h"
24 #include "xfs_dquot.h"
25 #include "xfs_dquot_item.h"
26 #include "xfs_reflink.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_trans_space.h"
29 #include "scrub/xfs_scrub.h"
30 #include "scrub/scrub.h"
31 #include "scrub/common.h"
32 #include "scrub/quota.h"
33 #include "scrub/trace.h"
34 #include "scrub/repair.h"
37 * Quota Repair
38 * ============
40 * Quota repairs are fairly simplistic; we fix everything that the dquot
41 * verifiers complain about, cap any counters or limits that make no sense,
42 * and schedule a quotacheck if we had to fix anything. We also repair any
43 * data fork extent records that don't apply to metadata files.
46 struct xrep_quota_info {
47 struct xfs_scrub *sc;
48 bool need_quotacheck;
52 * Allocate a new block into a sparse hole in the quota file backing this
53 * dquot, initialize the block, and commit the whole mess.
55 STATIC int
56 xrep_quota_item_fill_bmap_hole(
57 struct xfs_scrub *sc,
58 struct xfs_dquot *dq,
59 struct xfs_bmbt_irec *irec)
61 struct xfs_buf *bp;
62 struct xfs_mount *mp = sc->mp;
63 int nmaps = 1;
64 int error;
66 xfs_trans_ijoin(sc->tp, sc->ip, 0);
68 /* Map a block into the file. */
69 error = xfs_trans_reserve_more(sc->tp, XFS_QM_DQALLOC_SPACE_RES(mp),
70 0);
71 if (error)
72 return error;
74 error = xfs_bmapi_write(sc->tp, sc->ip, dq->q_fileoffset,
75 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0,
76 irec, &nmaps);
77 if (error)
78 return error;
80 dq->q_blkno = XFS_FSB_TO_DADDR(mp, irec->br_startblock);
82 trace_xrep_dquot_item_fill_bmap_hole(sc->mp, dq->q_type, dq->q_id);
84 /* Initialize the new block. */
85 error = xfs_trans_get_buf(sc->tp, mp->m_ddev_targp, dq->q_blkno,
86 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
87 if (error)
88 return error;
89 bp->b_ops = &xfs_dquot_buf_ops;
91 xfs_qm_init_dquot_blk(sc->tp, dq->q_id, dq->q_type, bp);
92 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
95 * Finish the mapping transactions and roll one more time to
96 * disconnect sc->ip from sc->tp.
98 error = xrep_defer_finish(sc);
99 if (error)
100 return error;
101 return xfs_trans_roll(&sc->tp);
104 /* Make sure there's a written block backing this dquot */
105 STATIC int
106 xrep_quota_item_bmap(
107 struct xfs_scrub *sc,
108 struct xfs_dquot *dq,
109 bool *dirty)
111 struct xfs_bmbt_irec irec;
112 struct xfs_mount *mp = sc->mp;
113 struct xfs_quotainfo *qi = mp->m_quotainfo;
114 xfs_fileoff_t offset = dq->q_id / qi->qi_dqperchunk;
115 int nmaps = 1;
116 int error;
118 /* The computed file offset should always be valid. */
119 if (!xfs_verify_fileoff(mp, offset)) {
120 ASSERT(xfs_verify_fileoff(mp, offset));
121 return -EFSCORRUPTED;
123 dq->q_fileoffset = offset;
125 error = xfs_bmapi_read(sc->ip, offset, 1, &irec, &nmaps, 0);
126 if (error)
127 return error;
129 if (nmaps < 1 || !xfs_bmap_is_real_extent(&irec)) {
130 /* Hole/delalloc extent; allocate a real block. */
131 error = xrep_quota_item_fill_bmap_hole(sc, dq, &irec);
132 if (error)
133 return error;
134 } else if (irec.br_state != XFS_EXT_NORM) {
135 /* Unwritten extent, which we already took care of? */
136 ASSERT(irec.br_state == XFS_EXT_NORM);
137 return -EFSCORRUPTED;
138 } else if (dq->q_blkno != XFS_FSB_TO_DADDR(mp, irec.br_startblock)) {
140 * If the cached daddr is incorrect, repair probably punched a
141 * hole out of the quota file and filled it back in with a new
142 * block. Update the block mapping in the dquot.
144 dq->q_blkno = XFS_FSB_TO_DADDR(mp, irec.br_startblock);
147 *dirty = true;
148 return 0;
151 /* Reset quota timers if incorrectly set. */
152 static inline void
153 xrep_quota_item_timer(
154 struct xfs_scrub *sc,
155 const struct xfs_dquot_res *res,
156 bool *dirty)
158 if ((res->softlimit && res->count > res->softlimit) ||
159 (res->hardlimit && res->count > res->hardlimit)) {
160 if (!res->timer)
161 *dirty = true;
162 } else {
163 if (res->timer)
164 *dirty = true;
168 /* Scrub the fields in an individual quota item. */
169 STATIC int
170 xrep_quota_item(
171 struct xrep_quota_info *rqi,
172 struct xfs_dquot *dq)
174 struct xfs_scrub *sc = rqi->sc;
175 struct xfs_mount *mp = sc->mp;
176 xfs_ino_t fs_icount;
177 bool dirty = false;
178 int error = 0;
180 /* Last chance to abort before we start committing fixes. */
181 if (xchk_should_terminate(sc, &error))
182 return error;
185 * We might need to fix holes in the bmap record for the storage
186 * backing this dquot, so we need to lock the dquot and the quota file.
187 * dqiterate gave us a locked dquot, so drop the dquot lock to get the
188 * ILOCK_EXCL.
190 xfs_dqunlock(dq);
191 xchk_ilock(sc, XFS_ILOCK_EXCL);
192 xfs_dqlock(dq);
194 error = xrep_quota_item_bmap(sc, dq, &dirty);
195 xchk_iunlock(sc, XFS_ILOCK_EXCL);
196 if (error)
197 return error;
199 /* Check the limits. */
200 if (dq->q_blk.softlimit > dq->q_blk.hardlimit) {
201 dq->q_blk.softlimit = dq->q_blk.hardlimit;
202 dirty = true;
205 if (dq->q_ino.softlimit > dq->q_ino.hardlimit) {
206 dq->q_ino.softlimit = dq->q_ino.hardlimit;
207 dirty = true;
210 if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit) {
211 dq->q_rtb.softlimit = dq->q_rtb.hardlimit;
212 dirty = true;
216 * Check that usage doesn't exceed physical limits. However, on
217 * a reflink filesystem we're allowed to exceed physical space
218 * if there are no quota limits. We don't know what the real number
219 * is, but we can make quotacheck find out for us.
221 if (!xfs_has_reflink(mp) && dq->q_blk.count > mp->m_sb.sb_dblocks) {
222 dq->q_blk.reserved -= dq->q_blk.count;
223 dq->q_blk.reserved += mp->m_sb.sb_dblocks;
224 dq->q_blk.count = mp->m_sb.sb_dblocks;
225 rqi->need_quotacheck = true;
226 dirty = true;
228 fs_icount = percpu_counter_sum(&mp->m_icount);
229 if (dq->q_ino.count > fs_icount) {
230 dq->q_ino.reserved -= dq->q_ino.count;
231 dq->q_ino.reserved += fs_icount;
232 dq->q_ino.count = fs_icount;
233 rqi->need_quotacheck = true;
234 dirty = true;
236 if (dq->q_rtb.count > mp->m_sb.sb_rblocks) {
237 dq->q_rtb.reserved -= dq->q_rtb.count;
238 dq->q_rtb.reserved += mp->m_sb.sb_rblocks;
239 dq->q_rtb.count = mp->m_sb.sb_rblocks;
240 rqi->need_quotacheck = true;
241 dirty = true;
244 xrep_quota_item_timer(sc, &dq->q_blk, &dirty);
245 xrep_quota_item_timer(sc, &dq->q_ino, &dirty);
246 xrep_quota_item_timer(sc, &dq->q_rtb, &dirty);
248 if (!dirty)
249 return 0;
251 trace_xrep_dquot_item(sc->mp, dq->q_type, dq->q_id);
253 dq->q_flags |= XFS_DQFLAG_DIRTY;
254 xfs_trans_dqjoin(sc->tp, dq);
255 if (dq->q_id) {
256 xfs_qm_adjust_dqlimits(dq);
257 xfs_qm_adjust_dqtimers(dq);
259 xfs_trans_log_dquot(sc->tp, dq);
260 error = xfs_trans_roll(&sc->tp);
261 xfs_dqlock(dq);
262 return error;
265 /* Fix a quota timer so that we can pass the verifier. */
266 STATIC void
267 xrep_quota_fix_timer(
268 struct xfs_mount *mp,
269 const struct xfs_disk_dquot *ddq,
270 __be64 softlimit,
271 __be64 countnow,
272 __be32 *timer,
273 time64_t timelimit)
275 uint64_t soft = be64_to_cpu(softlimit);
276 uint64_t count = be64_to_cpu(countnow);
277 time64_t new_timer;
278 uint32_t t;
280 if (!soft || count <= soft || *timer != 0)
281 return;
283 new_timer = xfs_dquot_set_timeout(mp,
284 ktime_get_real_seconds() + timelimit);
285 if (ddq->d_type & XFS_DQTYPE_BIGTIME)
286 t = xfs_dq_unix_to_bigtime(new_timer);
287 else
288 t = new_timer;
290 *timer = cpu_to_be32(t);
293 /* Fix anything the verifiers complain about. */
294 STATIC int
295 xrep_quota_block(
296 struct xfs_scrub *sc,
297 xfs_daddr_t daddr,
298 xfs_dqtype_t dqtype,
299 xfs_dqid_t id)
301 struct xfs_dqblk *dqblk;
302 struct xfs_disk_dquot *ddq;
303 struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
304 struct xfs_def_quota *defq = xfs_get_defquota(qi, dqtype);
305 struct xfs_buf *bp = NULL;
306 enum xfs_blft buftype = 0;
307 int i;
308 int error;
310 error = xfs_trans_read_buf(sc->mp, sc->tp, sc->mp->m_ddev_targp, daddr,
311 qi->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops);
312 switch (error) {
313 case -EFSBADCRC:
314 case -EFSCORRUPTED:
315 /* Failed verifier, retry read with no ops. */
316 error = xfs_trans_read_buf(sc->mp, sc->tp,
317 sc->mp->m_ddev_targp, daddr, qi->qi_dqchunklen,
318 0, &bp, NULL);
319 if (error)
320 return error;
321 break;
322 case 0:
323 dqblk = bp->b_addr;
324 ddq = &dqblk[0].dd_diskdq;
327 * If there's nothing that would impede a dqiterate, we're
328 * done.
330 if ((ddq->d_type & XFS_DQTYPE_REC_MASK) != dqtype ||
331 id == be32_to_cpu(ddq->d_id)) {
332 xfs_trans_brelse(sc->tp, bp);
333 return 0;
335 break;
336 default:
337 return error;
340 /* Something's wrong with the block, fix the whole thing. */
341 dqblk = bp->b_addr;
342 bp->b_ops = &xfs_dquot_buf_ops;
343 for (i = 0; i < qi->qi_dqperchunk; i++, dqblk++) {
344 ddq = &dqblk->dd_diskdq;
346 trace_xrep_disk_dquot(sc->mp, dqtype, id + i);
348 ddq->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
349 ddq->d_version = XFS_DQUOT_VERSION;
350 ddq->d_type = dqtype;
351 ddq->d_id = cpu_to_be32(id + i);
353 if (xfs_has_bigtime(sc->mp) && ddq->d_id)
354 ddq->d_type |= XFS_DQTYPE_BIGTIME;
356 xrep_quota_fix_timer(sc->mp, ddq, ddq->d_blk_softlimit,
357 ddq->d_bcount, &ddq->d_btimer,
358 defq->blk.time);
360 xrep_quota_fix_timer(sc->mp, ddq, ddq->d_ino_softlimit,
361 ddq->d_icount, &ddq->d_itimer,
362 defq->ino.time);
364 xrep_quota_fix_timer(sc->mp, ddq, ddq->d_rtb_softlimit,
365 ddq->d_rtbcount, &ddq->d_rtbtimer,
366 defq->rtb.time);
368 /* We only support v5 filesystems so always set these. */
369 uuid_copy(&dqblk->dd_uuid, &sc->mp->m_sb.sb_meta_uuid);
370 xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
371 XFS_DQUOT_CRC_OFF);
372 dqblk->dd_lsn = 0;
374 switch (dqtype) {
375 case XFS_DQTYPE_USER:
376 buftype = XFS_BLFT_UDQUOT_BUF;
377 break;
378 case XFS_DQTYPE_GROUP:
379 buftype = XFS_BLFT_GDQUOT_BUF;
380 break;
381 case XFS_DQTYPE_PROJ:
382 buftype = XFS_BLFT_PDQUOT_BUF;
383 break;
385 xfs_trans_buf_set_type(sc->tp, bp, buftype);
386 xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
387 return xrep_roll_trans(sc);
391 * Repair a quota file's data fork. The function returns with the inode
392 * joined.
394 STATIC int
395 xrep_quota_data_fork(
396 struct xfs_scrub *sc,
397 xfs_dqtype_t dqtype)
399 struct xfs_bmbt_irec irec = { 0 };
400 struct xfs_iext_cursor icur;
401 struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
402 struct xfs_ifork *ifp;
403 xfs_fileoff_t max_dqid_off;
404 xfs_fileoff_t off;
405 xfs_fsblock_t fsbno;
406 bool truncate = false;
407 bool joined = false;
408 int error = 0;
410 error = xrep_metadata_inode_forks(sc);
411 if (error)
412 goto out;
414 /* Check for data fork problems that apply only to quota files. */
415 max_dqid_off = XFS_DQ_ID_MAX / qi->qi_dqperchunk;
416 ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
417 for_each_xfs_iext(ifp, &icur, &irec) {
418 if (isnullstartblock(irec.br_startblock)) {
419 error = -EFSCORRUPTED;
420 goto out;
423 if (irec.br_startoff > max_dqid_off ||
424 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
425 truncate = true;
426 break;
429 /* Convert unwritten extents to real ones. */
430 if (irec.br_state == XFS_EXT_UNWRITTEN) {
431 struct xfs_bmbt_irec nrec;
432 int nmap = 1;
434 if (!joined) {
435 xfs_trans_ijoin(sc->tp, sc->ip, 0);
436 joined = true;
439 error = xfs_bmapi_write(sc->tp, sc->ip,
440 irec.br_startoff, irec.br_blockcount,
441 XFS_BMAPI_CONVERT, 0, &nrec, &nmap);
442 if (error)
443 goto out;
444 ASSERT(nrec.br_startoff == irec.br_startoff);
445 ASSERT(nrec.br_blockcount == irec.br_blockcount);
447 error = xfs_defer_finish(&sc->tp);
448 if (error)
449 goto out;
453 if (!joined) {
454 xfs_trans_ijoin(sc->tp, sc->ip, 0);
455 joined = true;
458 if (truncate) {
459 /* Erase everything after the block containing the max dquot */
460 error = xfs_bunmapi_range(&sc->tp, sc->ip, 0,
461 max_dqid_off * sc->mp->m_sb.sb_blocksize,
462 XFS_MAX_FILEOFF);
463 if (error)
464 goto out;
466 /* Remove all CoW reservations. */
467 error = xfs_reflink_cancel_cow_blocks(sc->ip, &sc->tp, 0,
468 XFS_MAX_FILEOFF, true);
469 if (error)
470 goto out;
471 sc->ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
474 * Always re-log the inode so that our permanent transaction
475 * can keep on rolling it forward in the log.
477 xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
480 /* Now go fix anything that fails the verifiers. */
481 for_each_xfs_iext(ifp, &icur, &irec) {
482 for (fsbno = irec.br_startblock, off = irec.br_startoff;
483 fsbno < irec.br_startblock + irec.br_blockcount;
484 fsbno += XFS_DQUOT_CLUSTER_SIZE_FSB,
485 off += XFS_DQUOT_CLUSTER_SIZE_FSB) {
486 error = xrep_quota_block(sc,
487 XFS_FSB_TO_DADDR(sc->mp, fsbno),
488 dqtype, off * qi->qi_dqperchunk);
489 if (error)
490 goto out;
494 out:
495 return error;
499 * Go fix anything in the quota items that we could have been mad about. Now
500 * that we've checked the quota inode data fork we have to drop ILOCK_EXCL to
501 * use the regular dquot functions.
503 STATIC int
504 xrep_quota_problems(
505 struct xfs_scrub *sc,
506 xfs_dqtype_t dqtype)
508 struct xchk_dqiter cursor = { };
509 struct xrep_quota_info rqi = { .sc = sc };
510 struct xfs_dquot *dq;
511 int error;
513 xchk_dqiter_init(&cursor, sc, dqtype);
514 while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
515 error = xrep_quota_item(&rqi, dq);
516 xfs_qm_dqput(dq);
517 if (error)
518 break;
520 if (error)
521 return error;
523 /* Make a quotacheck happen. */
524 if (rqi.need_quotacheck)
525 xrep_force_quotacheck(sc, dqtype);
526 return 0;
529 /* Repair all of a quota type's items. */
531 xrep_quota(
532 struct xfs_scrub *sc)
534 xfs_dqtype_t dqtype;
535 int error;
537 dqtype = xchk_quota_to_dqtype(sc);
540 * Re-take the ILOCK so that we can fix any problems that we found
541 * with the data fork mappings, or with the dquot bufs themselves.
543 if (!(sc->ilock_flags & XFS_ILOCK_EXCL))
544 xchk_ilock(sc, XFS_ILOCK_EXCL);
545 error = xrep_quota_data_fork(sc, dqtype);
546 if (error)
547 return error;
550 * Finish deferred items and roll the transaction to unjoin the quota
551 * inode from transaction so that we can unlock the quota inode; we
552 * play only with dquots from now on.
554 error = xrep_defer_finish(sc);
555 if (error)
556 return error;
557 error = xfs_trans_roll(&sc->tp);
558 if (error)
559 return error;
560 xchk_iunlock(sc, sc->ilock_flags);
562 /* Fix anything the dquot verifiers don't complain about. */
563 error = xrep_quota_problems(sc, dqtype);
564 if (error)
565 return error;
567 return xrep_trans_commit(sc);