1 // SPDX-License-Identifier: GPL-2.0
3 * Quota code necessary even when VFS quota support is not compiled
4 * into the kernel. The interesting stuff is over in dquot.c, here
5 * we have symbols for initial quotactl(2) handling, the sysctl(2)
6 * variables, etc - things needed even when quota support disabled.
10 #include <linux/namei.h>
11 #include <linux/slab.h>
12 #include <asm/current.h>
13 #include <linux/blkdev.h>
14 #include <linux/uaccess.h>
15 #include <linux/kernel.h>
16 #include <linux/security.h>
17 #include <linux/syscalls.h>
18 #include <linux/capability.h>
19 #include <linux/quotaops.h>
20 #include <linux/types.h>
21 #include <linux/mount.h>
22 #include <linux/writeback.h>
23 #include <linux/nospec.h>
25 #include "../internal.h"
27 static int check_quotactl_permission(struct super_block
*sb
, int type
, int cmd
,
31 /* these commands do not require any special privilegues */
39 /* allow to query information for dquots we "own" */
42 if ((type
== USRQUOTA
&& uid_eq(current_euid(), make_kuid(current_user_ns(), id
))) ||
43 (type
== GRPQUOTA
&& in_egroup_p(make_kgid(current_user_ns(), id
))))
47 if (!capable(CAP_SYS_ADMIN
))
51 return security_quotactl(cmd
, type
, id
, sb
);
54 static void quota_sync_one(struct super_block
*sb
, void *arg
)
56 int type
= *(int *)arg
;
58 if (sb
->s_qcop
&& sb
->s_qcop
->quota_sync
&&
59 (sb
->s_quota_types
& (1 << type
)))
60 sb
->s_qcop
->quota_sync(sb
, type
);
63 static int quota_sync_all(int type
)
67 ret
= security_quotactl(Q_SYNC
, type
, 0, NULL
);
69 iterate_supers(quota_sync_one
, &type
);
73 unsigned int qtype_enforce_flag(int type
)
77 return FS_QUOTA_UDQ_ENFD
;
79 return FS_QUOTA_GDQ_ENFD
;
81 return FS_QUOTA_PDQ_ENFD
;
86 static int quota_quotaon(struct super_block
*sb
, int type
, qid_t id
,
87 const struct path
*path
)
89 if (!sb
->s_qcop
->quota_on
&& !sb
->s_qcop
->quota_enable
)
91 if (sb
->s_qcop
->quota_enable
)
92 return sb
->s_qcop
->quota_enable(sb
, qtype_enforce_flag(type
));
95 return sb
->s_qcop
->quota_on(sb
, type
, id
, path
);
98 static int quota_quotaoff(struct super_block
*sb
, int type
)
100 if (!sb
->s_qcop
->quota_off
&& !sb
->s_qcop
->quota_disable
)
102 if (sb
->s_qcop
->quota_disable
)
103 return sb
->s_qcop
->quota_disable(sb
, qtype_enforce_flag(type
));
104 return sb
->s_qcop
->quota_off(sb
, type
);
107 static int quota_getfmt(struct super_block
*sb
, int type
, void __user
*addr
)
111 if (!sb_has_quota_active(sb
, type
))
113 fmt
= sb_dqopt(sb
)->info
[type
].dqi_format
->qf_fmt_id
;
114 if (copy_to_user(addr
, &fmt
, sizeof(fmt
)))
119 static int quota_getinfo(struct super_block
*sb
, int type
, void __user
*addr
)
121 struct qc_state state
;
122 struct qc_type_state
*tstate
;
123 struct if_dqinfo uinfo
;
126 if (!sb
->s_qcop
->get_state
)
128 ret
= sb
->s_qcop
->get_state(sb
, &state
);
131 tstate
= state
.s_state
+ type
;
132 if (!(tstate
->flags
& QCI_ACCT_ENABLED
))
134 memset(&uinfo
, 0, sizeof(uinfo
));
135 uinfo
.dqi_bgrace
= tstate
->spc_timelimit
;
136 uinfo
.dqi_igrace
= tstate
->ino_timelimit
;
137 if (tstate
->flags
& QCI_SYSFILE
)
138 uinfo
.dqi_flags
|= DQF_SYS_FILE
;
139 if (tstate
->flags
& QCI_ROOT_SQUASH
)
140 uinfo
.dqi_flags
|= DQF_ROOT_SQUASH
;
141 uinfo
.dqi_valid
= IIF_ALL
;
142 if (copy_to_user(addr
, &uinfo
, sizeof(uinfo
)))
147 static int quota_setinfo(struct super_block
*sb
, int type
, void __user
*addr
)
149 struct if_dqinfo info
;
150 struct qc_info qinfo
;
152 if (copy_from_user(&info
, addr
, sizeof(info
)))
154 if (!sb
->s_qcop
->set_info
)
156 if (info
.dqi_valid
& ~(IIF_FLAGS
| IIF_BGRACE
| IIF_IGRACE
))
158 memset(&qinfo
, 0, sizeof(qinfo
));
159 if (info
.dqi_valid
& IIF_FLAGS
) {
160 if (info
.dqi_flags
& ~DQF_SETINFO_MASK
)
162 if (info
.dqi_flags
& DQF_ROOT_SQUASH
)
163 qinfo
.i_flags
|= QCI_ROOT_SQUASH
;
164 qinfo
.i_fieldmask
|= QC_FLAGS
;
166 if (info
.dqi_valid
& IIF_BGRACE
) {
167 qinfo
.i_spc_timelimit
= info
.dqi_bgrace
;
168 qinfo
.i_fieldmask
|= QC_SPC_TIMER
;
170 if (info
.dqi_valid
& IIF_IGRACE
) {
171 qinfo
.i_ino_timelimit
= info
.dqi_igrace
;
172 qinfo
.i_fieldmask
|= QC_INO_TIMER
;
174 return sb
->s_qcop
->set_info(sb
, type
, &qinfo
);
177 static inline qsize_t
qbtos(qsize_t blocks
)
179 return blocks
<< QIF_DQBLKSIZE_BITS
;
182 static inline qsize_t
stoqb(qsize_t space
)
184 return (space
+ QIF_DQBLKSIZE
- 1) >> QIF_DQBLKSIZE_BITS
;
187 static void copy_to_if_dqblk(struct if_dqblk
*dst
, struct qc_dqblk
*src
)
189 memset(dst
, 0, sizeof(*dst
));
190 dst
->dqb_bhardlimit
= stoqb(src
->d_spc_hardlimit
);
191 dst
->dqb_bsoftlimit
= stoqb(src
->d_spc_softlimit
);
192 dst
->dqb_curspace
= src
->d_space
;
193 dst
->dqb_ihardlimit
= src
->d_ino_hardlimit
;
194 dst
->dqb_isoftlimit
= src
->d_ino_softlimit
;
195 dst
->dqb_curinodes
= src
->d_ino_count
;
196 dst
->dqb_btime
= src
->d_spc_timer
;
197 dst
->dqb_itime
= src
->d_ino_timer
;
198 dst
->dqb_valid
= QIF_ALL
;
201 static int quota_getquota(struct super_block
*sb
, int type
, qid_t id
,
209 if (!sb
->s_qcop
->get_dqblk
)
211 qid
= make_kqid(current_user_ns(), type
, id
);
212 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
214 ret
= sb
->s_qcop
->get_dqblk(sb
, qid
, &fdq
);
217 copy_to_if_dqblk(&idq
, &fdq
);
219 if (compat_need_64bit_alignment_fixup()) {
220 struct compat_if_dqblk __user
*compat_dqblk
= addr
;
222 if (copy_to_user(compat_dqblk
, &idq
, sizeof(*compat_dqblk
)))
224 if (put_user(idq
.dqb_valid
, &compat_dqblk
->dqb_valid
))
227 if (copy_to_user(addr
, &idq
, sizeof(idq
)))
234 * Return quota for next active quota >= this id, if any exists,
235 * otherwise return -ENOENT via ->get_nextdqblk
237 static int quota_getnextquota(struct super_block
*sb
, int type
, qid_t id
,
242 struct if_nextdqblk idq
;
245 if (!sb
->s_qcop
->get_nextdqblk
)
247 qid
= make_kqid(current_user_ns(), type
, id
);
248 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
250 ret
= sb
->s_qcop
->get_nextdqblk(sb
, &qid
, &fdq
);
253 /* struct if_nextdqblk is a superset of struct if_dqblk */
254 copy_to_if_dqblk((struct if_dqblk
*)&idq
, &fdq
);
255 idq
.dqb_id
= from_kqid(current_user_ns(), qid
);
256 if (copy_to_user(addr
, &idq
, sizeof(idq
)))
261 static void copy_from_if_dqblk(struct qc_dqblk
*dst
, struct if_dqblk
*src
)
263 dst
->d_spc_hardlimit
= qbtos(src
->dqb_bhardlimit
);
264 dst
->d_spc_softlimit
= qbtos(src
->dqb_bsoftlimit
);
265 dst
->d_space
= src
->dqb_curspace
;
266 dst
->d_ino_hardlimit
= src
->dqb_ihardlimit
;
267 dst
->d_ino_softlimit
= src
->dqb_isoftlimit
;
268 dst
->d_ino_count
= src
->dqb_curinodes
;
269 dst
->d_spc_timer
= src
->dqb_btime
;
270 dst
->d_ino_timer
= src
->dqb_itime
;
272 dst
->d_fieldmask
= 0;
273 if (src
->dqb_valid
& QIF_BLIMITS
)
274 dst
->d_fieldmask
|= QC_SPC_SOFT
| QC_SPC_HARD
;
275 if (src
->dqb_valid
& QIF_SPACE
)
276 dst
->d_fieldmask
|= QC_SPACE
;
277 if (src
->dqb_valid
& QIF_ILIMITS
)
278 dst
->d_fieldmask
|= QC_INO_SOFT
| QC_INO_HARD
;
279 if (src
->dqb_valid
& QIF_INODES
)
280 dst
->d_fieldmask
|= QC_INO_COUNT
;
281 if (src
->dqb_valid
& QIF_BTIME
)
282 dst
->d_fieldmask
|= QC_SPC_TIMER
;
283 if (src
->dqb_valid
& QIF_ITIME
)
284 dst
->d_fieldmask
|= QC_INO_TIMER
;
287 static int quota_setquota(struct super_block
*sb
, int type
, qid_t id
,
294 if (compat_need_64bit_alignment_fixup()) {
295 struct compat_if_dqblk __user
*compat_dqblk
= addr
;
297 if (copy_from_user(&idq
, compat_dqblk
, sizeof(*compat_dqblk
)) ||
298 get_user(idq
.dqb_valid
, &compat_dqblk
->dqb_valid
))
301 if (copy_from_user(&idq
, addr
, sizeof(idq
)))
304 if (!sb
->s_qcop
->set_dqblk
)
306 qid
= make_kqid(current_user_ns(), type
, id
);
307 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
309 copy_from_if_dqblk(&fdq
, &idq
);
310 return sb
->s_qcop
->set_dqblk(sb
, qid
, &fdq
);
313 static int quota_enable(struct super_block
*sb
, void __user
*addr
)
317 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
319 if (!sb
->s_qcop
->quota_enable
)
321 return sb
->s_qcop
->quota_enable(sb
, flags
);
324 static int quota_disable(struct super_block
*sb
, void __user
*addr
)
328 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
330 if (!sb
->s_qcop
->quota_disable
)
332 return sb
->s_qcop
->quota_disable(sb
, flags
);
335 static int quota_state_to_flags(struct qc_state
*state
)
339 if (state
->s_state
[USRQUOTA
].flags
& QCI_ACCT_ENABLED
)
340 flags
|= FS_QUOTA_UDQ_ACCT
;
341 if (state
->s_state
[USRQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
342 flags
|= FS_QUOTA_UDQ_ENFD
;
343 if (state
->s_state
[GRPQUOTA
].flags
& QCI_ACCT_ENABLED
)
344 flags
|= FS_QUOTA_GDQ_ACCT
;
345 if (state
->s_state
[GRPQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
346 flags
|= FS_QUOTA_GDQ_ENFD
;
347 if (state
->s_state
[PRJQUOTA
].flags
& QCI_ACCT_ENABLED
)
348 flags
|= FS_QUOTA_PDQ_ACCT
;
349 if (state
->s_state
[PRJQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
350 flags
|= FS_QUOTA_PDQ_ENFD
;
354 static int quota_getstate(struct super_block
*sb
, int type
,
355 struct fs_quota_stat
*fqs
)
357 struct qc_state state
;
360 memset(&state
, 0, sizeof (struct qc_state
));
361 ret
= sb
->s_qcop
->get_state(sb
, &state
);
365 memset(fqs
, 0, sizeof(*fqs
));
366 fqs
->qs_version
= FS_QSTAT_VERSION
;
367 fqs
->qs_flags
= quota_state_to_flags(&state
);
368 /* No quota enabled? */
371 fqs
->qs_incoredqs
= state
.s_incoredqs
;
373 fqs
->qs_btimelimit
= state
.s_state
[type
].spc_timelimit
;
374 fqs
->qs_itimelimit
= state
.s_state
[type
].ino_timelimit
;
375 fqs
->qs_rtbtimelimit
= state
.s_state
[type
].rt_spc_timelimit
;
376 fqs
->qs_bwarnlimit
= state
.s_state
[type
].spc_warnlimit
;
377 fqs
->qs_iwarnlimit
= state
.s_state
[type
].ino_warnlimit
;
379 /* Inodes may be allocated even if inactive; copy out if present */
380 if (state
.s_state
[USRQUOTA
].ino
) {
381 fqs
->qs_uquota
.qfs_ino
= state
.s_state
[USRQUOTA
].ino
;
382 fqs
->qs_uquota
.qfs_nblks
= state
.s_state
[USRQUOTA
].blocks
;
383 fqs
->qs_uquota
.qfs_nextents
= state
.s_state
[USRQUOTA
].nextents
;
385 if (state
.s_state
[GRPQUOTA
].ino
) {
386 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[GRPQUOTA
].ino
;
387 fqs
->qs_gquota
.qfs_nblks
= state
.s_state
[GRPQUOTA
].blocks
;
388 fqs
->qs_gquota
.qfs_nextents
= state
.s_state
[GRPQUOTA
].nextents
;
390 if (state
.s_state
[PRJQUOTA
].ino
) {
392 * Q_XGETQSTAT doesn't have room for both group and project
393 * quotas. So, allow the project quota values to be copied out
394 * only if there is no group quota information available.
396 if (!(state
.s_state
[GRPQUOTA
].flags
& QCI_ACCT_ENABLED
)) {
397 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[PRJQUOTA
].ino
;
398 fqs
->qs_gquota
.qfs_nblks
=
399 state
.s_state
[PRJQUOTA
].blocks
;
400 fqs
->qs_gquota
.qfs_nextents
=
401 state
.s_state
[PRJQUOTA
].nextents
;
407 static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user
*to
,
408 struct fs_qfilestat
*from
)
410 if (copy_to_user(to
, from
, sizeof(*to
)) ||
411 put_user(from
->qfs_nextents
, &to
->qfs_nextents
))
416 static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user
*to
,
417 struct fs_quota_stat
*from
)
419 if (put_user(from
->qs_version
, &to
->qs_version
) ||
420 put_user(from
->qs_flags
, &to
->qs_flags
) ||
421 put_user(from
->qs_pad
, &to
->qs_pad
) ||
422 compat_copy_fs_qfilestat(&to
->qs_uquota
, &from
->qs_uquota
) ||
423 compat_copy_fs_qfilestat(&to
->qs_gquota
, &from
->qs_gquota
) ||
424 put_user(from
->qs_incoredqs
, &to
->qs_incoredqs
) ||
425 put_user(from
->qs_btimelimit
, &to
->qs_btimelimit
) ||
426 put_user(from
->qs_itimelimit
, &to
->qs_itimelimit
) ||
427 put_user(from
->qs_rtbtimelimit
, &to
->qs_rtbtimelimit
) ||
428 put_user(from
->qs_bwarnlimit
, &to
->qs_bwarnlimit
) ||
429 put_user(from
->qs_iwarnlimit
, &to
->qs_iwarnlimit
))
434 static int quota_getxstate(struct super_block
*sb
, int type
, void __user
*addr
)
436 struct fs_quota_stat fqs
;
439 if (!sb
->s_qcop
->get_state
)
441 ret
= quota_getstate(sb
, type
, &fqs
);
445 if (compat_need_64bit_alignment_fixup())
446 return compat_copy_fs_quota_stat(addr
, &fqs
);
447 if (copy_to_user(addr
, &fqs
, sizeof(fqs
)))
452 static int quota_getstatev(struct super_block
*sb
, int type
,
453 struct fs_quota_statv
*fqs
)
455 struct qc_state state
;
458 memset(&state
, 0, sizeof (struct qc_state
));
459 ret
= sb
->s_qcop
->get_state(sb
, &state
);
463 memset(fqs
, 0, sizeof(*fqs
));
464 fqs
->qs_version
= FS_QSTAT_VERSION
;
465 fqs
->qs_flags
= quota_state_to_flags(&state
);
466 /* No quota enabled? */
469 fqs
->qs_incoredqs
= state
.s_incoredqs
;
471 fqs
->qs_btimelimit
= state
.s_state
[type
].spc_timelimit
;
472 fqs
->qs_itimelimit
= state
.s_state
[type
].ino_timelimit
;
473 fqs
->qs_rtbtimelimit
= state
.s_state
[type
].rt_spc_timelimit
;
474 fqs
->qs_bwarnlimit
= state
.s_state
[type
].spc_warnlimit
;
475 fqs
->qs_iwarnlimit
= state
.s_state
[type
].ino_warnlimit
;
476 fqs
->qs_rtbwarnlimit
= state
.s_state
[type
].rt_spc_warnlimit
;
478 /* Inodes may be allocated even if inactive; copy out if present */
479 if (state
.s_state
[USRQUOTA
].ino
) {
480 fqs
->qs_uquota
.qfs_ino
= state
.s_state
[USRQUOTA
].ino
;
481 fqs
->qs_uquota
.qfs_nblks
= state
.s_state
[USRQUOTA
].blocks
;
482 fqs
->qs_uquota
.qfs_nextents
= state
.s_state
[USRQUOTA
].nextents
;
484 if (state
.s_state
[GRPQUOTA
].ino
) {
485 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[GRPQUOTA
].ino
;
486 fqs
->qs_gquota
.qfs_nblks
= state
.s_state
[GRPQUOTA
].blocks
;
487 fqs
->qs_gquota
.qfs_nextents
= state
.s_state
[GRPQUOTA
].nextents
;
489 if (state
.s_state
[PRJQUOTA
].ino
) {
490 fqs
->qs_pquota
.qfs_ino
= state
.s_state
[PRJQUOTA
].ino
;
491 fqs
->qs_pquota
.qfs_nblks
= state
.s_state
[PRJQUOTA
].blocks
;
492 fqs
->qs_pquota
.qfs_nextents
= state
.s_state
[PRJQUOTA
].nextents
;
497 static int quota_getxstatev(struct super_block
*sb
, int type
, void __user
*addr
)
499 struct fs_quota_statv fqs
;
502 if (!sb
->s_qcop
->get_state
)
505 memset(&fqs
, 0, sizeof(fqs
));
506 if (copy_from_user(&fqs
, addr
, 1)) /* Just read qs_version */
509 /* If this kernel doesn't support user specified version, fail */
510 switch (fqs
.qs_version
) {
511 case FS_QSTATV_VERSION1
:
516 ret
= quota_getstatev(sb
, type
, &fqs
);
517 if (!ret
&& copy_to_user(addr
, &fqs
, sizeof(fqs
)))
523 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
524 * out of there as xfsprogs rely on definitions being in that header file. So
525 * just define same functions here for quota purposes.
527 #define XFS_BB_SHIFT 9
529 static inline u64
quota_bbtob(u64 blocks
)
531 return blocks
<< XFS_BB_SHIFT
;
534 static inline u64
quota_btobb(u64 bytes
)
536 return (bytes
+ (1 << XFS_BB_SHIFT
) - 1) >> XFS_BB_SHIFT
;
539 static inline s64
copy_from_xfs_dqblk_ts(const struct fs_disk_quota
*d
,
540 __s32 timer
, __s8 timer_hi
)
542 if (d
->d_fieldmask
& FS_DQ_BIGTIME
)
543 return (u32
)timer
| (s64
)timer_hi
<< 32;
547 static void copy_from_xfs_dqblk(struct qc_dqblk
*dst
, struct fs_disk_quota
*src
)
549 dst
->d_spc_hardlimit
= quota_bbtob(src
->d_blk_hardlimit
);
550 dst
->d_spc_softlimit
= quota_bbtob(src
->d_blk_softlimit
);
551 dst
->d_ino_hardlimit
= src
->d_ino_hardlimit
;
552 dst
->d_ino_softlimit
= src
->d_ino_softlimit
;
553 dst
->d_space
= quota_bbtob(src
->d_bcount
);
554 dst
->d_ino_count
= src
->d_icount
;
555 dst
->d_ino_timer
= copy_from_xfs_dqblk_ts(src
, src
->d_itimer
,
557 dst
->d_spc_timer
= copy_from_xfs_dqblk_ts(src
, src
->d_btimer
,
559 dst
->d_ino_warns
= src
->d_iwarns
;
560 dst
->d_spc_warns
= src
->d_bwarns
;
561 dst
->d_rt_spc_hardlimit
= quota_bbtob(src
->d_rtb_hardlimit
);
562 dst
->d_rt_spc_softlimit
= quota_bbtob(src
->d_rtb_softlimit
);
563 dst
->d_rt_space
= quota_bbtob(src
->d_rtbcount
);
564 dst
->d_rt_spc_timer
= copy_from_xfs_dqblk_ts(src
, src
->d_rtbtimer
,
566 dst
->d_rt_spc_warns
= src
->d_rtbwarns
;
567 dst
->d_fieldmask
= 0;
568 if (src
->d_fieldmask
& FS_DQ_ISOFT
)
569 dst
->d_fieldmask
|= QC_INO_SOFT
;
570 if (src
->d_fieldmask
& FS_DQ_IHARD
)
571 dst
->d_fieldmask
|= QC_INO_HARD
;
572 if (src
->d_fieldmask
& FS_DQ_BSOFT
)
573 dst
->d_fieldmask
|= QC_SPC_SOFT
;
574 if (src
->d_fieldmask
& FS_DQ_BHARD
)
575 dst
->d_fieldmask
|= QC_SPC_HARD
;
576 if (src
->d_fieldmask
& FS_DQ_RTBSOFT
)
577 dst
->d_fieldmask
|= QC_RT_SPC_SOFT
;
578 if (src
->d_fieldmask
& FS_DQ_RTBHARD
)
579 dst
->d_fieldmask
|= QC_RT_SPC_HARD
;
580 if (src
->d_fieldmask
& FS_DQ_BTIMER
)
581 dst
->d_fieldmask
|= QC_SPC_TIMER
;
582 if (src
->d_fieldmask
& FS_DQ_ITIMER
)
583 dst
->d_fieldmask
|= QC_INO_TIMER
;
584 if (src
->d_fieldmask
& FS_DQ_RTBTIMER
)
585 dst
->d_fieldmask
|= QC_RT_SPC_TIMER
;
586 if (src
->d_fieldmask
& FS_DQ_BWARNS
)
587 dst
->d_fieldmask
|= QC_SPC_WARNS
;
588 if (src
->d_fieldmask
& FS_DQ_IWARNS
)
589 dst
->d_fieldmask
|= QC_INO_WARNS
;
590 if (src
->d_fieldmask
& FS_DQ_RTBWARNS
)
591 dst
->d_fieldmask
|= QC_RT_SPC_WARNS
;
592 if (src
->d_fieldmask
& FS_DQ_BCOUNT
)
593 dst
->d_fieldmask
|= QC_SPACE
;
594 if (src
->d_fieldmask
& FS_DQ_ICOUNT
)
595 dst
->d_fieldmask
|= QC_INO_COUNT
;
596 if (src
->d_fieldmask
& FS_DQ_RTBCOUNT
)
597 dst
->d_fieldmask
|= QC_RT_SPACE
;
600 static void copy_qcinfo_from_xfs_dqblk(struct qc_info
*dst
,
601 struct fs_disk_quota
*src
)
603 memset(dst
, 0, sizeof(*dst
));
604 dst
->i_spc_timelimit
= src
->d_btimer
;
605 dst
->i_ino_timelimit
= src
->d_itimer
;
606 dst
->i_rt_spc_timelimit
= src
->d_rtbtimer
;
607 dst
->i_ino_warnlimit
= src
->d_iwarns
;
608 dst
->i_spc_warnlimit
= src
->d_bwarns
;
609 dst
->i_rt_spc_warnlimit
= src
->d_rtbwarns
;
610 if (src
->d_fieldmask
& FS_DQ_BWARNS
)
611 dst
->i_fieldmask
|= QC_SPC_WARNS
;
612 if (src
->d_fieldmask
& FS_DQ_IWARNS
)
613 dst
->i_fieldmask
|= QC_INO_WARNS
;
614 if (src
->d_fieldmask
& FS_DQ_RTBWARNS
)
615 dst
->i_fieldmask
|= QC_RT_SPC_WARNS
;
616 if (src
->d_fieldmask
& FS_DQ_BTIMER
)
617 dst
->i_fieldmask
|= QC_SPC_TIMER
;
618 if (src
->d_fieldmask
& FS_DQ_ITIMER
)
619 dst
->i_fieldmask
|= QC_INO_TIMER
;
620 if (src
->d_fieldmask
& FS_DQ_RTBTIMER
)
621 dst
->i_fieldmask
|= QC_RT_SPC_TIMER
;
624 static int quota_setxquota(struct super_block
*sb
, int type
, qid_t id
,
627 struct fs_disk_quota fdq
;
631 if (copy_from_user(&fdq
, addr
, sizeof(fdq
)))
633 if (!sb
->s_qcop
->set_dqblk
)
635 qid
= make_kqid(current_user_ns(), type
, id
);
636 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
638 /* Are we actually setting timer / warning limits for all users? */
639 if (from_kqid(sb
->s_user_ns
, qid
) == 0 &&
640 fdq
.d_fieldmask
& (FS_DQ_WARNS_MASK
| FS_DQ_TIMER_MASK
)) {
641 struct qc_info qinfo
;
644 if (!sb
->s_qcop
->set_info
)
646 copy_qcinfo_from_xfs_dqblk(&qinfo
, &fdq
);
647 ret
= sb
->s_qcop
->set_info(sb
, type
, &qinfo
);
650 /* These are already done */
651 fdq
.d_fieldmask
&= ~(FS_DQ_WARNS_MASK
| FS_DQ_TIMER_MASK
);
653 copy_from_xfs_dqblk(&qdq
, &fdq
);
654 return sb
->s_qcop
->set_dqblk(sb
, qid
, &qdq
);
657 static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota
*d
,
658 __s32
*timer_lo
, __s8
*timer_hi
, s64 timer
)
661 if (d
->d_fieldmask
& FS_DQ_BIGTIME
)
662 *timer_hi
= timer
>> 32;
665 static inline bool want_bigtime(s64 timer
)
667 return timer
> S32_MAX
|| timer
< S32_MIN
;
670 static void copy_to_xfs_dqblk(struct fs_disk_quota
*dst
, struct qc_dqblk
*src
,
673 memset(dst
, 0, sizeof(*dst
));
674 if (want_bigtime(src
->d_ino_timer
) || want_bigtime(src
->d_spc_timer
) ||
675 want_bigtime(src
->d_rt_spc_timer
))
676 dst
->d_fieldmask
|= FS_DQ_BIGTIME
;
677 dst
->d_version
= FS_DQUOT_VERSION
;
679 if (type
== USRQUOTA
)
680 dst
->d_flags
= FS_USER_QUOTA
;
681 else if (type
== PRJQUOTA
)
682 dst
->d_flags
= FS_PROJ_QUOTA
;
684 dst
->d_flags
= FS_GROUP_QUOTA
;
685 dst
->d_blk_hardlimit
= quota_btobb(src
->d_spc_hardlimit
);
686 dst
->d_blk_softlimit
= quota_btobb(src
->d_spc_softlimit
);
687 dst
->d_ino_hardlimit
= src
->d_ino_hardlimit
;
688 dst
->d_ino_softlimit
= src
->d_ino_softlimit
;
689 dst
->d_bcount
= quota_btobb(src
->d_space
);
690 dst
->d_icount
= src
->d_ino_count
;
691 copy_to_xfs_dqblk_ts(dst
, &dst
->d_itimer
, &dst
->d_itimer_hi
,
693 copy_to_xfs_dqblk_ts(dst
, &dst
->d_btimer
, &dst
->d_btimer_hi
,
695 dst
->d_iwarns
= src
->d_ino_warns
;
696 dst
->d_bwarns
= src
->d_spc_warns
;
697 dst
->d_rtb_hardlimit
= quota_btobb(src
->d_rt_spc_hardlimit
);
698 dst
->d_rtb_softlimit
= quota_btobb(src
->d_rt_spc_softlimit
);
699 dst
->d_rtbcount
= quota_btobb(src
->d_rt_space
);
700 copy_to_xfs_dqblk_ts(dst
, &dst
->d_rtbtimer
, &dst
->d_rtbtimer_hi
,
701 src
->d_rt_spc_timer
);
702 dst
->d_rtbwarns
= src
->d_rt_spc_warns
;
705 static int quota_getxquota(struct super_block
*sb
, int type
, qid_t id
,
708 struct fs_disk_quota fdq
;
713 if (!sb
->s_qcop
->get_dqblk
)
715 qid
= make_kqid(current_user_ns(), type
, id
);
716 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
718 ret
= sb
->s_qcop
->get_dqblk(sb
, qid
, &qdq
);
721 copy_to_xfs_dqblk(&fdq
, &qdq
, type
, id
);
722 if (copy_to_user(addr
, &fdq
, sizeof(fdq
)))
728 * Return quota for next active quota >= this id, if any exists,
729 * otherwise return -ENOENT via ->get_nextdqblk.
731 static int quota_getnextxquota(struct super_block
*sb
, int type
, qid_t id
,
734 struct fs_disk_quota fdq
;
740 if (!sb
->s_qcop
->get_nextdqblk
)
742 qid
= make_kqid(current_user_ns(), type
, id
);
743 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
745 ret
= sb
->s_qcop
->get_nextdqblk(sb
, &qid
, &qdq
);
748 id_out
= from_kqid(current_user_ns(), qid
);
749 copy_to_xfs_dqblk(&fdq
, &qdq
, type
, id_out
);
750 if (copy_to_user(addr
, &fdq
, sizeof(fdq
)))
755 static int quota_rmxquota(struct super_block
*sb
, void __user
*addr
)
759 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
761 if (!sb
->s_qcop
->rm_xquota
)
763 return sb
->s_qcop
->rm_xquota(sb
, flags
);
766 /* Copy parameters and call proper function */
767 static int do_quotactl(struct super_block
*sb
, int type
, int cmd
, qid_t id
,
768 void __user
*addr
, const struct path
*path
)
772 type
= array_index_nospec(type
, MAXQUOTAS
);
774 * Quota not supported on this fs? Check this before s_quota_types
775 * since they needn't be set if quota is not supported at all.
779 if (!(sb
->s_quota_types
& (1 << type
)))
782 ret
= check_quotactl_permission(sb
, type
, cmd
, id
);
788 return quota_quotaon(sb
, type
, id
, path
);
790 return quota_quotaoff(sb
, type
);
792 return quota_getfmt(sb
, type
, addr
);
794 return quota_getinfo(sb
, type
, addr
);
796 return quota_setinfo(sb
, type
, addr
);
798 return quota_getquota(sb
, type
, id
, addr
);
800 return quota_getnextquota(sb
, type
, id
, addr
);
802 return quota_setquota(sb
, type
, id
, addr
);
804 if (!sb
->s_qcop
->quota_sync
)
806 return sb
->s_qcop
->quota_sync(sb
, type
);
808 return quota_enable(sb
, addr
);
810 return quota_disable(sb
, addr
);
812 return quota_rmxquota(sb
, addr
);
814 return quota_getxstate(sb
, type
, addr
);
816 return quota_getxstatev(sb
, type
, addr
);
818 return quota_setxquota(sb
, type
, id
, addr
);
820 return quota_getxquota(sb
, type
, id
, addr
);
821 case Q_XGETNEXTQUOTA
:
822 return quota_getnextxquota(sb
, type
, id
, addr
);
826 /* XFS quotas are fully coherent now, making this call a noop */
833 /* Return 1 if 'cmd' will block on frozen filesystem */
834 static int quotactl_cmd_write(int cmd
)
837 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
838 * as dquot_acquire() may allocate space for new structure and OCFS2
839 * needs to increment on-disk use count.
848 case Q_XGETNEXTQUOTA
:
855 /* Return true if quotactl command is manipulating quota on/off state */
856 static bool quotactl_cmd_onoff(int cmd
)
858 return (cmd
== Q_QUOTAON
) || (cmd
== Q_QUOTAOFF
) ||
859 (cmd
== Q_XQUOTAON
) || (cmd
== Q_XQUOTAOFF
);
863 * look up a superblock on which quota ops will be performed
864 * - use the name of a block device to find the superblock thereon
866 static struct super_block
*quotactl_block(const char __user
*special
, int cmd
)
869 struct super_block
*sb
;
870 struct filename
*tmp
= getname(special
);
871 bool excl
= false, thawed
= false;
876 return ERR_CAST(tmp
);
877 error
= lookup_bdev(tmp
->name
, &dev
);
880 return ERR_PTR(error
);
882 if (quotactl_cmd_onoff(cmd
)) {
885 } else if (quotactl_cmd_write(cmd
)) {
890 sb
= user_get_super(dev
, excl
);
892 return ERR_PTR(-ENODEV
);
893 if (thawed
&& sb
->s_writers
.frozen
!= SB_UNFROZEN
) {
895 up_write(&sb
->s_umount
);
897 up_read(&sb
->s_umount
);
898 /* Wait for sb to unfreeze */
907 return ERR_PTR(-ENODEV
);
912 * This is the system call interface. This communicates with
913 * the user-level programs. Currently this only supports diskquota
914 * calls. Maybe we need to add the process quotas etc. in the future,
915 * but we probably should use rlimits for that.
917 SYSCALL_DEFINE4(quotactl
, unsigned int, cmd
, const char __user
*, special
,
918 qid_t
, id
, void __user
*, addr
)
921 struct super_block
*sb
= NULL
;
922 struct path path
, *pathp
= NULL
;
925 cmds
= cmd
>> SUBCMDSHIFT
;
926 type
= cmd
& SUBCMDMASK
;
928 if (type
>= MAXQUOTAS
)
932 * As a special case Q_SYNC can be called without a specific device.
933 * It will iterate all superblocks that have quota enabled and call
934 * the sync action on each of them.
938 return quota_sync_all(type
);
943 * Path for quotaon has to be resolved before grabbing superblock
944 * because that gets s_umount sem which is also possibly needed by path
945 * resolution (think about autofs) and thus deadlocks could arise.
947 if (cmds
== Q_QUOTAON
) {
948 ret
= user_path_at(AT_FDCWD
, addr
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
950 pathp
= ERR_PTR(ret
);
955 sb
= quotactl_block(special
, cmds
);
961 ret
= do_quotactl(sb
, type
, cmds
, id
, addr
, pathp
);
963 if (!quotactl_cmd_onoff(cmds
))
966 drop_super_exclusive(sb
);
968 if (pathp
&& !IS_ERR(pathp
))
973 SYSCALL_DEFINE4(quotactl_fd
, unsigned int, fd
, unsigned int, cmd
,
974 qid_t
, id
, void __user
*, addr
)
976 struct super_block
*sb
;
977 unsigned int cmds
= cmd
>> SUBCMDSHIFT
;
978 unsigned int type
= cmd
& SUBCMDMASK
;
979 CLASS(fd_raw
, f
)(fd
);
985 if (type
>= MAXQUOTAS
)
988 if (quotactl_cmd_write(cmds
)) {
989 ret
= mnt_want_write(fd_file(f
)->f_path
.mnt
);
994 sb
= fd_file(f
)->f_path
.mnt
->mnt_sb
;
995 if (quotactl_cmd_onoff(cmds
))
996 down_write(&sb
->s_umount
);
998 down_read(&sb
->s_umount
);
1000 ret
= do_quotactl(sb
, type
, cmds
, id
, addr
, ERR_PTR(-EINVAL
));
1002 if (quotactl_cmd_onoff(cmds
))
1003 up_write(&sb
->s_umount
);
1005 up_read(&sb
->s_umount
);
1007 if (quotactl_cmd_write(cmds
))
1008 mnt_drop_write(fd_file(f
)->f_path
.mnt
);