1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
18 #include "xfs_error.h"
19 #include "xfs_health.h"
20 #include "xfs_metadir.h"
21 #include "xfs_metafile.h"
24 xfs_calc_dquots_per_chunk(
25 unsigned int nbblks
) /* basic block units */
28 return BBTOB(nbblks
) / sizeof(struct xfs_dqblk
);
32 * Do some primitive error checking on ondisk dquot data structures.
34 * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure;
35 * we verify them separately because at some points we have only the
36 * smaller xfs_disk_dquot structure available.
42 struct xfs_disk_dquot
*ddq
,
43 xfs_dqid_t id
) /* used only during quotacheck */
48 * We can encounter an uninitialized dquot buffer for 2 reasons:
49 * 1. If we crash while deleting the quotainode(s), and those blks got
50 * used for user data. This is because we take the path of regular
51 * file deletion; however, the size field of quotainodes is never
52 * updated, so all the tricks that we play in itruncate_finish
55 * 2. We don't play the quota buffers when there's a quotaoff logitem.
56 * But the allocation will be replayed so we'll end up with an
57 * uninitialized quota block.
59 * This is all fine; things are still consistent, and we haven't lost
60 * any quota information. Just don't complain about bad dquot blks.
62 if (ddq
->d_magic
!= cpu_to_be16(XFS_DQUOT_MAGIC
))
63 return __this_address
;
64 if (ddq
->d_version
!= XFS_DQUOT_VERSION
)
65 return __this_address
;
67 if (ddq
->d_type
& ~XFS_DQTYPE_ANY
)
68 return __this_address
;
69 ddq_type
= ddq
->d_type
& XFS_DQTYPE_REC_MASK
;
70 if (ddq_type
!= XFS_DQTYPE_USER
&&
71 ddq_type
!= XFS_DQTYPE_PROJ
&&
72 ddq_type
!= XFS_DQTYPE_GROUP
)
73 return __this_address
;
75 if ((ddq
->d_type
& XFS_DQTYPE_BIGTIME
) &&
77 return __this_address
;
79 if ((ddq
->d_type
& XFS_DQTYPE_BIGTIME
) && !ddq
->d_id
)
80 return __this_address
;
82 if (id
!= -1 && id
!= be32_to_cpu(ddq
->d_id
))
83 return __this_address
;
88 if (ddq
->d_blk_softlimit
&&
89 be64_to_cpu(ddq
->d_bcount
) > be64_to_cpu(ddq
->d_blk_softlimit
) &&
91 return __this_address
;
93 if (ddq
->d_ino_softlimit
&&
94 be64_to_cpu(ddq
->d_icount
) > be64_to_cpu(ddq
->d_ino_softlimit
) &&
96 return __this_address
;
98 if (ddq
->d_rtb_softlimit
&&
99 be64_to_cpu(ddq
->d_rtbcount
) > be64_to_cpu(ddq
->d_rtb_softlimit
) &&
101 return __this_address
;
108 struct xfs_mount
*mp
,
109 struct xfs_dqblk
*dqb
,
110 xfs_dqid_t id
) /* used only during quotacheck */
112 if (xfs_has_crc(mp
) &&
113 !uuid_equal(&dqb
->dd_uuid
, &mp
->m_sb
.sb_meta_uuid
))
114 return __this_address
;
116 return xfs_dquot_verify(mp
, &dqb
->dd_diskdq
, id
);
120 * Do some primitive error checking on ondisk dquot data structures.
124 struct xfs_mount
*mp
,
125 struct xfs_dqblk
*dqb
,
130 * Typically, a repair is only requested by quotacheck.
133 memset(dqb
, 0, sizeof(struct xfs_dqblk
));
135 dqb
->dd_diskdq
.d_magic
= cpu_to_be16(XFS_DQUOT_MAGIC
);
136 dqb
->dd_diskdq
.d_version
= XFS_DQUOT_VERSION
;
137 dqb
->dd_diskdq
.d_type
= type
;
138 dqb
->dd_diskdq
.d_id
= cpu_to_be32(id
);
140 if (xfs_has_crc(mp
)) {
141 uuid_copy(&dqb
->dd_uuid
, &mp
->m_sb
.sb_meta_uuid
);
142 xfs_update_cksum((char *)dqb
, sizeof(struct xfs_dqblk
),
148 xfs_dquot_buf_verify_crc(
149 struct xfs_mount
*mp
,
153 struct xfs_dqblk
*d
= (struct xfs_dqblk
*)bp
->b_addr
;
157 if (!xfs_has_crc(mp
))
161 * if we are in log recovery, the quota subsystem has not been
162 * initialised so we have no quotainfo structure. In that case, we need
163 * to manually calculate the number of dquots in the buffer.
166 ndquots
= mp
->m_quotainfo
->qi_dqperchunk
;
168 ndquots
= xfs_calc_dquots_per_chunk(bp
->b_length
);
170 for (i
= 0; i
< ndquots
; i
++, d
++) {
171 if (!xfs_verify_cksum((char *)d
, sizeof(struct xfs_dqblk
),
172 XFS_DQUOT_CRC_OFF
)) {
174 xfs_buf_verifier_error(bp
, -EFSBADCRC
, __func__
,
175 d
, sizeof(*d
), __this_address
);
182 STATIC xfs_failaddr_t
183 xfs_dquot_buf_verify(
184 struct xfs_mount
*mp
,
188 struct xfs_dqblk
*dqb
= bp
->b_addr
;
195 * if we are in log recovery, the quota subsystem has not been
196 * initialised so we have no quotainfo structure. In that case, we need
197 * to manually calculate the number of dquots in the buffer.
200 ndquots
= mp
->m_quotainfo
->qi_dqperchunk
;
202 ndquots
= xfs_calc_dquots_per_chunk(bp
->b_length
);
205 * On the first read of the buffer, verify that each dquot is valid.
206 * We don't know what the id of the dquot is supposed to be, just that
207 * they should be increasing monotonically within the buffer. If the
208 * first id is corrupt, then it will fail on the second dquot in the
209 * buffer so corruptions could point to the wrong dquot in this case.
211 for (i
= 0; i
< ndquots
; i
++) {
212 struct xfs_disk_dquot
*ddq
;
214 ddq
= &dqb
[i
].dd_diskdq
;
217 id
= be32_to_cpu(ddq
->d_id
);
219 fa
= xfs_dqblk_verify(mp
, &dqb
[i
], id
+ i
);
222 xfs_buf_verifier_error(bp
, -EFSCORRUPTED
,
224 sizeof(struct xfs_dqblk
), fa
);
232 static xfs_failaddr_t
233 xfs_dquot_buf_verify_struct(
236 struct xfs_mount
*mp
= bp
->b_mount
;
238 return xfs_dquot_buf_verify(mp
, bp
, false);
242 xfs_dquot_buf_read_verify(
245 struct xfs_mount
*mp
= bp
->b_mount
;
247 if (!xfs_dquot_buf_verify_crc(mp
, bp
, false))
249 xfs_dquot_buf_verify(mp
, bp
, false);
253 * readahead errors are silent and simply leave the buffer as !done so a real
254 * read will then be run with the xfs_dquot_buf_ops verifier. See
255 * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
256 * reporting the failure.
259 xfs_dquot_buf_readahead_verify(
262 struct xfs_mount
*mp
= bp
->b_mount
;
264 if (!xfs_dquot_buf_verify_crc(mp
, bp
, true) ||
265 xfs_dquot_buf_verify(mp
, bp
, true) != NULL
) {
266 xfs_buf_ioerror(bp
, -EIO
);
267 bp
->b_flags
&= ~XBF_DONE
;
272 * we don't calculate the CRC here as that is done when the dquot is flushed to
273 * the buffer after the update is done. This ensures that the dquot in the
274 * buffer always has an up-to-date CRC value.
277 xfs_dquot_buf_write_verify(
280 struct xfs_mount
*mp
= bp
->b_mount
;
282 xfs_dquot_buf_verify(mp
, bp
, false);
285 const struct xfs_buf_ops xfs_dquot_buf_ops
= {
287 .magic16
= { cpu_to_be16(XFS_DQUOT_MAGIC
),
288 cpu_to_be16(XFS_DQUOT_MAGIC
) },
289 .verify_read
= xfs_dquot_buf_read_verify
,
290 .verify_write
= xfs_dquot_buf_write_verify
,
291 .verify_struct
= xfs_dquot_buf_verify_struct
,
294 const struct xfs_buf_ops xfs_dquot_buf_ra_ops
= {
295 .name
= "xfs_dquot_ra",
296 .magic16
= { cpu_to_be16(XFS_DQUOT_MAGIC
),
297 cpu_to_be16(XFS_DQUOT_MAGIC
) },
298 .verify_read
= xfs_dquot_buf_readahead_verify
,
299 .verify_write
= xfs_dquot_buf_write_verify
,
302 /* Convert an on-disk timer value into an incore timer value. */
304 xfs_dquot_from_disk_ts(
305 struct xfs_disk_dquot
*ddq
,
308 uint32_t t
= be32_to_cpu(dtimer
);
310 if (t
!= 0 && (ddq
->d_type
& XFS_DQTYPE_BIGTIME
))
311 return xfs_dq_bigtime_to_unix(t
);
316 /* Convert an incore timer value into an on-disk timer value. */
318 xfs_dquot_to_disk_ts(
319 struct xfs_dquot
*dqp
,
324 if (timer
!= 0 && (dqp
->q_type
& XFS_DQTYPE_BIGTIME
))
325 t
= xfs_dq_unix_to_bigtime(timer
);
327 return cpu_to_be32(t
);
331 xfs_dqinode_sick_mask(xfs_dqtype_t type
)
334 case XFS_DQTYPE_USER
:
335 return XFS_SICK_FS_UQUOTA
;
336 case XFS_DQTYPE_GROUP
:
337 return XFS_SICK_FS_GQUOTA
;
338 case XFS_DQTYPE_PROJ
:
339 return XFS_SICK_FS_PQUOTA
;
347 * Load the inode for a given type of quota, assuming that the sb fields have
348 * been sorted out. This is not true when switching quota types on a V4
349 * filesystem, so do not use this function for that. If metadir is enabled,
350 * @dp must be the /quota metadir.
352 * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
353 * success; or a negative errno.
357 struct xfs_trans
*tp
,
358 struct xfs_inode
*dp
,
360 struct xfs_inode
**ipp
)
362 struct xfs_mount
*mp
= tp
->t_mountp
;
363 struct xfs_inode
*ip
;
364 enum xfs_metafile_type metafile_type
= xfs_dqinode_metafile_type(type
);
367 if (!xfs_has_metadir(mp
)) {
371 case XFS_DQTYPE_USER
:
372 ino
= mp
->m_sb
.sb_uquotino
;
374 case XFS_DQTYPE_GROUP
:
375 ino
= mp
->m_sb
.sb_gquotino
;
377 case XFS_DQTYPE_PROJ
:
378 ino
= mp
->m_sb
.sb_pquotino
;
382 return -EFSCORRUPTED
;
385 /* Should have set 0 to NULLFSINO when loading superblock */
386 if (ino
== NULLFSINO
)
389 error
= xfs_trans_metafile_iget(tp
, ino
, metafile_type
, &ip
);
391 error
= xfs_metadir_load(tp
, dp
, xfs_dqinode_path(type
),
393 if (error
== -ENOENT
)
397 if (xfs_metadata_is_sick(error
))
398 xfs_fs_mark_sick(mp
, xfs_dqinode_sick_mask(type
));
402 if (XFS_IS_CORRUPT(mp
, ip
->i_df
.if_format
!= XFS_DINODE_FMT_EXTENTS
&&
403 ip
->i_df
.if_format
!= XFS_DINODE_FMT_BTREE
)) {
405 xfs_fs_mark_sick(mp
, xfs_dqinode_sick_mask(type
));
406 return -EFSCORRUPTED
;
409 if (XFS_IS_CORRUPT(mp
, ip
->i_projid
!= 0)) {
411 xfs_fs_mark_sick(mp
, xfs_dqinode_sick_mask(type
));
412 return -EFSCORRUPTED
;
419 /* Create a metadata directory quota inode. */
421 xfs_dqinode_metadir_create(
422 struct xfs_inode
*dp
,
424 struct xfs_inode
**ipp
)
426 struct xfs_metadir_update upd
= {
428 .metafile_type
= xfs_dqinode_metafile_type(type
),
429 .path
= xfs_dqinode_path(type
),
433 error
= xfs_metadir_start_create(&upd
);
437 error
= xfs_metadir_create(&upd
, S_IFREG
);
441 xfs_trans_log_inode(upd
.tp
, upd
.ip
, XFS_ILOG_CORE
);
443 error
= xfs_metadir_commit(&upd
);
447 xfs_finish_inode_setup(upd
.ip
);
453 /* Link a metadata directory quota inode. */
455 xfs_dqinode_metadir_link(
456 struct xfs_inode
*dp
,
458 struct xfs_inode
*ip
)
460 struct xfs_metadir_update upd
= {
462 .metafile_type
= xfs_dqinode_metafile_type(type
),
463 .path
= xfs_dqinode_path(type
),
468 error
= xfs_metadir_start_link(&upd
);
472 error
= xfs_metadir_link(&upd
);
476 xfs_trans_log_inode(upd
.tp
, upd
.ip
, XFS_ILOG_CORE
);
478 return xfs_metadir_commit(&upd
);
480 #endif /* __KERNEL__ */
482 /* Create the parent directory for all quota inodes and load it. */
484 xfs_dqinode_mkdir_parent(
485 struct xfs_mount
*mp
,
486 struct xfs_inode
**dpp
)
488 if (!mp
->m_metadirip
) {
489 xfs_fs_mark_sick(mp
, XFS_SICK_FS_METADIR
);
490 return -EFSCORRUPTED
;
493 return xfs_metadir_mkdir(mp
->m_metadirip
, "quota", dpp
);
497 * Load the parent directory of all quota inodes. Pass the inode to the caller
498 * because quota functions (e.g. QUOTARM) can be called on the quota files even
499 * if quotas are not enabled.
502 xfs_dqinode_load_parent(
503 struct xfs_trans
*tp
,
504 struct xfs_inode
**dpp
)
506 struct xfs_mount
*mp
= tp
->t_mountp
;
508 if (!mp
->m_metadirip
) {
509 xfs_fs_mark_sick(mp
, XFS_SICK_FS_METADIR
);
510 return -EFSCORRUPTED
;
513 return xfs_metadir_load(tp
, mp
->m_metadirip
, "quota", XFS_METAFILE_DIR
,