drm/panel-edp: Add STA 116QHD024002
[drm/drm-misc.git] / fs / xfs / libxfs / xfs_dquot_buf.c
blobdceef2abd4e2a3c6576909066e43421a7d4e038d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
17 #include "xfs_qm.h"
18 #include "xfs_error.h"
19 #include "xfs_health.h"
20 #include "xfs_metadir.h"
21 #include "xfs_metafile.h"
23 int
24 xfs_calc_dquots_per_chunk(
25 unsigned int nbblks) /* basic block units */
27 ASSERT(nbblks > 0);
28 return BBTOB(nbblks) / sizeof(struct xfs_dqblk);
32 * Do some primitive error checking on ondisk dquot data structures.
34 * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure;
35 * we verify them separately because at some points we have only the
36 * smaller xfs_disk_dquot structure available.
39 xfs_failaddr_t
40 xfs_dquot_verify(
41 struct xfs_mount *mp,
42 struct xfs_disk_dquot *ddq,
43 xfs_dqid_t id) /* used only during quotacheck */
45 __u8 ddq_type;
48 * We can encounter an uninitialized dquot buffer for 2 reasons:
49 * 1. If we crash while deleting the quotainode(s), and those blks got
50 * used for user data. This is because we take the path of regular
51 * file deletion; however, the size field of quotainodes is never
52 * updated, so all the tricks that we play in itruncate_finish
53 * don't quite matter.
55 * 2. We don't play the quota buffers when there's a quotaoff logitem.
56 * But the allocation will be replayed so we'll end up with an
57 * uninitialized quota block.
59 * This is all fine; things are still consistent, and we haven't lost
60 * any quota information. Just don't complain about bad dquot blks.
62 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC))
63 return __this_address;
64 if (ddq->d_version != XFS_DQUOT_VERSION)
65 return __this_address;
67 if (ddq->d_type & ~XFS_DQTYPE_ANY)
68 return __this_address;
69 ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK;
70 if (ddq_type != XFS_DQTYPE_USER &&
71 ddq_type != XFS_DQTYPE_PROJ &&
72 ddq_type != XFS_DQTYPE_GROUP)
73 return __this_address;
75 if ((ddq->d_type & XFS_DQTYPE_BIGTIME) &&
76 !xfs_has_bigtime(mp))
77 return __this_address;
79 if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && !ddq->d_id)
80 return __this_address;
82 if (id != -1 && id != be32_to_cpu(ddq->d_id))
83 return __this_address;
85 if (!ddq->d_id)
86 return NULL;
88 if (ddq->d_blk_softlimit &&
89 be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) &&
90 !ddq->d_btimer)
91 return __this_address;
93 if (ddq->d_ino_softlimit &&
94 be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) &&
95 !ddq->d_itimer)
96 return __this_address;
98 if (ddq->d_rtb_softlimit &&
99 be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) &&
100 !ddq->d_rtbtimer)
101 return __this_address;
103 return NULL;
106 xfs_failaddr_t
107 xfs_dqblk_verify(
108 struct xfs_mount *mp,
109 struct xfs_dqblk *dqb,
110 xfs_dqid_t id) /* used only during quotacheck */
112 if (xfs_has_crc(mp) &&
113 !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid))
114 return __this_address;
116 return xfs_dquot_verify(mp, &dqb->dd_diskdq, id);
120 * Do some primitive error checking on ondisk dquot data structures.
122 void
123 xfs_dqblk_repair(
124 struct xfs_mount *mp,
125 struct xfs_dqblk *dqb,
126 xfs_dqid_t id,
127 xfs_dqtype_t type)
130 * Typically, a repair is only requested by quotacheck.
132 ASSERT(id != -1);
133 memset(dqb, 0, sizeof(struct xfs_dqblk));
135 dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
136 dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION;
137 dqb->dd_diskdq.d_type = type;
138 dqb->dd_diskdq.d_id = cpu_to_be32(id);
140 if (xfs_has_crc(mp)) {
141 uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid);
142 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
143 XFS_DQUOT_CRC_OFF);
147 STATIC bool
148 xfs_dquot_buf_verify_crc(
149 struct xfs_mount *mp,
150 struct xfs_buf *bp,
151 bool readahead)
153 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
154 int ndquots;
155 int i;
157 if (!xfs_has_crc(mp))
158 return true;
161 * if we are in log recovery, the quota subsystem has not been
162 * initialised so we have no quotainfo structure. In that case, we need
163 * to manually calculate the number of dquots in the buffer.
165 if (mp->m_quotainfo)
166 ndquots = mp->m_quotainfo->qi_dqperchunk;
167 else
168 ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
170 for (i = 0; i < ndquots; i++, d++) {
171 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
172 XFS_DQUOT_CRC_OFF)) {
173 if (!readahead)
174 xfs_buf_verifier_error(bp, -EFSBADCRC, __func__,
175 d, sizeof(*d), __this_address);
176 return false;
179 return true;
182 STATIC xfs_failaddr_t
183 xfs_dquot_buf_verify(
184 struct xfs_mount *mp,
185 struct xfs_buf *bp,
186 bool readahead)
188 struct xfs_dqblk *dqb = bp->b_addr;
189 xfs_failaddr_t fa;
190 xfs_dqid_t id = 0;
191 int ndquots;
192 int i;
195 * if we are in log recovery, the quota subsystem has not been
196 * initialised so we have no quotainfo structure. In that case, we need
197 * to manually calculate the number of dquots in the buffer.
199 if (mp->m_quotainfo)
200 ndquots = mp->m_quotainfo->qi_dqperchunk;
201 else
202 ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
205 * On the first read of the buffer, verify that each dquot is valid.
206 * We don't know what the id of the dquot is supposed to be, just that
207 * they should be increasing monotonically within the buffer. If the
208 * first id is corrupt, then it will fail on the second dquot in the
209 * buffer so corruptions could point to the wrong dquot in this case.
211 for (i = 0; i < ndquots; i++) {
212 struct xfs_disk_dquot *ddq;
214 ddq = &dqb[i].dd_diskdq;
216 if (i == 0)
217 id = be32_to_cpu(ddq->d_id);
219 fa = xfs_dqblk_verify(mp, &dqb[i], id + i);
220 if (fa) {
221 if (!readahead)
222 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
223 __func__, &dqb[i],
224 sizeof(struct xfs_dqblk), fa);
225 return fa;
229 return NULL;
232 static xfs_failaddr_t
233 xfs_dquot_buf_verify_struct(
234 struct xfs_buf *bp)
236 struct xfs_mount *mp = bp->b_mount;
238 return xfs_dquot_buf_verify(mp, bp, false);
241 static void
242 xfs_dquot_buf_read_verify(
243 struct xfs_buf *bp)
245 struct xfs_mount *mp = bp->b_mount;
247 if (!xfs_dquot_buf_verify_crc(mp, bp, false))
248 return;
249 xfs_dquot_buf_verify(mp, bp, false);
253 * readahead errors are silent and simply leave the buffer as !done so a real
254 * read will then be run with the xfs_dquot_buf_ops verifier. See
255 * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
256 * reporting the failure.
258 static void
259 xfs_dquot_buf_readahead_verify(
260 struct xfs_buf *bp)
262 struct xfs_mount *mp = bp->b_mount;
264 if (!xfs_dquot_buf_verify_crc(mp, bp, true) ||
265 xfs_dquot_buf_verify(mp, bp, true) != NULL) {
266 xfs_buf_ioerror(bp, -EIO);
267 bp->b_flags &= ~XBF_DONE;
272 * we don't calculate the CRC here as that is done when the dquot is flushed to
273 * the buffer after the update is done. This ensures that the dquot in the
274 * buffer always has an up-to-date CRC value.
276 static void
277 xfs_dquot_buf_write_verify(
278 struct xfs_buf *bp)
280 struct xfs_mount *mp = bp->b_mount;
282 xfs_dquot_buf_verify(mp, bp, false);
285 const struct xfs_buf_ops xfs_dquot_buf_ops = {
286 .name = "xfs_dquot",
287 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
288 cpu_to_be16(XFS_DQUOT_MAGIC) },
289 .verify_read = xfs_dquot_buf_read_verify,
290 .verify_write = xfs_dquot_buf_write_verify,
291 .verify_struct = xfs_dquot_buf_verify_struct,
294 const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
295 .name = "xfs_dquot_ra",
296 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
297 cpu_to_be16(XFS_DQUOT_MAGIC) },
298 .verify_read = xfs_dquot_buf_readahead_verify,
299 .verify_write = xfs_dquot_buf_write_verify,
302 /* Convert an on-disk timer value into an incore timer value. */
303 time64_t
304 xfs_dquot_from_disk_ts(
305 struct xfs_disk_dquot *ddq,
306 __be32 dtimer)
308 uint32_t t = be32_to_cpu(dtimer);
310 if (t != 0 && (ddq->d_type & XFS_DQTYPE_BIGTIME))
311 return xfs_dq_bigtime_to_unix(t);
313 return t;
316 /* Convert an incore timer value into an on-disk timer value. */
317 __be32
318 xfs_dquot_to_disk_ts(
319 struct xfs_dquot *dqp,
320 time64_t timer)
322 uint32_t t = timer;
324 if (timer != 0 && (dqp->q_type & XFS_DQTYPE_BIGTIME))
325 t = xfs_dq_unix_to_bigtime(timer);
327 return cpu_to_be32(t);
330 inline unsigned int
331 xfs_dqinode_sick_mask(xfs_dqtype_t type)
333 switch (type) {
334 case XFS_DQTYPE_USER:
335 return XFS_SICK_FS_UQUOTA;
336 case XFS_DQTYPE_GROUP:
337 return XFS_SICK_FS_GQUOTA;
338 case XFS_DQTYPE_PROJ:
339 return XFS_SICK_FS_PQUOTA;
342 ASSERT(0);
343 return 0;
347 * Load the inode for a given type of quota, assuming that the sb fields have
348 * been sorted out. This is not true when switching quota types on a V4
349 * filesystem, so do not use this function for that. If metadir is enabled,
350 * @dp must be the /quota metadir.
352 * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
353 * success; or a negative errno.
356 xfs_dqinode_load(
357 struct xfs_trans *tp,
358 struct xfs_inode *dp,
359 xfs_dqtype_t type,
360 struct xfs_inode **ipp)
362 struct xfs_mount *mp = tp->t_mountp;
363 struct xfs_inode *ip;
364 enum xfs_metafile_type metafile_type = xfs_dqinode_metafile_type(type);
365 int error;
367 if (!xfs_has_metadir(mp)) {
368 xfs_ino_t ino;
370 switch (type) {
371 case XFS_DQTYPE_USER:
372 ino = mp->m_sb.sb_uquotino;
373 break;
374 case XFS_DQTYPE_GROUP:
375 ino = mp->m_sb.sb_gquotino;
376 break;
377 case XFS_DQTYPE_PROJ:
378 ino = mp->m_sb.sb_pquotino;
379 break;
380 default:
381 ASSERT(0);
382 return -EFSCORRUPTED;
385 /* Should have set 0 to NULLFSINO when loading superblock */
386 if (ino == NULLFSINO)
387 return -ENOENT;
389 error = xfs_trans_metafile_iget(tp, ino, metafile_type, &ip);
390 } else {
391 error = xfs_metadir_load(tp, dp, xfs_dqinode_path(type),
392 metafile_type, &ip);
393 if (error == -ENOENT)
394 return error;
396 if (error) {
397 if (xfs_metadata_is_sick(error))
398 xfs_fs_mark_sick(mp, xfs_dqinode_sick_mask(type));
399 return error;
402 if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
403 ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) {
404 xfs_irele(ip);
405 xfs_fs_mark_sick(mp, xfs_dqinode_sick_mask(type));
406 return -EFSCORRUPTED;
409 if (XFS_IS_CORRUPT(mp, ip->i_projid != 0)) {
410 xfs_irele(ip);
411 xfs_fs_mark_sick(mp, xfs_dqinode_sick_mask(type));
412 return -EFSCORRUPTED;
415 *ipp = ip;
416 return 0;
419 /* Create a metadata directory quota inode. */
421 xfs_dqinode_metadir_create(
422 struct xfs_inode *dp,
423 xfs_dqtype_t type,
424 struct xfs_inode **ipp)
426 struct xfs_metadir_update upd = {
427 .dp = dp,
428 .metafile_type = xfs_dqinode_metafile_type(type),
429 .path = xfs_dqinode_path(type),
431 int error;
433 error = xfs_metadir_start_create(&upd);
434 if (error)
435 return error;
437 error = xfs_metadir_create(&upd, S_IFREG);
438 if (error)
439 return error;
441 xfs_trans_log_inode(upd.tp, upd.ip, XFS_ILOG_CORE);
443 error = xfs_metadir_commit(&upd);
444 if (error)
445 return error;
447 xfs_finish_inode_setup(upd.ip);
448 *ipp = upd.ip;
449 return 0;
452 #ifndef __KERNEL__
453 /* Link a metadata directory quota inode. */
455 xfs_dqinode_metadir_link(
456 struct xfs_inode *dp,
457 xfs_dqtype_t type,
458 struct xfs_inode *ip)
460 struct xfs_metadir_update upd = {
461 .dp = dp,
462 .metafile_type = xfs_dqinode_metafile_type(type),
463 .path = xfs_dqinode_path(type),
464 .ip = ip,
466 int error;
468 error = xfs_metadir_start_link(&upd);
469 if (error)
470 return error;
472 error = xfs_metadir_link(&upd);
473 if (error)
474 return error;
476 xfs_trans_log_inode(upd.tp, upd.ip, XFS_ILOG_CORE);
478 return xfs_metadir_commit(&upd);
480 #endif /* __KERNEL__ */
482 /* Create the parent directory for all quota inodes and load it. */
484 xfs_dqinode_mkdir_parent(
485 struct xfs_mount *mp,
486 struct xfs_inode **dpp)
488 if (!mp->m_metadirip) {
489 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
490 return -EFSCORRUPTED;
493 return xfs_metadir_mkdir(mp->m_metadirip, "quota", dpp);
497 * Load the parent directory of all quota inodes. Pass the inode to the caller
498 * because quota functions (e.g. QUOTARM) can be called on the quota files even
499 * if quotas are not enabled.
502 xfs_dqinode_load_parent(
503 struct xfs_trans *tp,
504 struct xfs_inode **dpp)
506 struct xfs_mount *mp = tp->t_mountp;
508 if (!mp->m_metadirip) {
509 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
510 return -EFSCORRUPTED;
513 return xfs_metadir_load(tp, mp->m_metadirip, "quota", XFS_METAFILE_DIR,
514 dpp);