1 // SPDX-License-Identifier: GPL-2.0
3 * Quota code necessary even when VFS quota support is not compiled
4 * into the kernel. The interesting stuff is over in dquot.c, here
5 * we have symbols for initial quotactl(2) handling, the sysctl(2)
6 * variables, etc - things needed even when quota support disabled.
10 #include <linux/namei.h>
11 #include <linux/slab.h>
12 #include <asm/current.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/security.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/quotaops.h>
19 #include <linux/types.h>
20 #include <linux/writeback.h>
22 static int check_quotactl_permission(struct super_block
*sb
, int type
, int cmd
,
26 /* these commands do not require any special privilegues */
34 /* allow to query information for dquots we "own" */
37 if ((type
== USRQUOTA
&& uid_eq(current_euid(), make_kuid(current_user_ns(), id
))) ||
38 (type
== GRPQUOTA
&& in_egroup_p(make_kgid(current_user_ns(), id
))))
42 if (!capable(CAP_SYS_ADMIN
))
46 return security_quotactl(cmd
, type
, id
, sb
);
49 static void quota_sync_one(struct super_block
*sb
, void *arg
)
51 int type
= *(int *)arg
;
53 if (sb
->s_qcop
&& sb
->s_qcop
->quota_sync
&&
54 (sb
->s_quota_types
& (1 << type
)))
55 sb
->s_qcop
->quota_sync(sb
, type
);
58 static int quota_sync_all(int type
)
62 if (type
>= MAXQUOTAS
)
64 ret
= security_quotactl(Q_SYNC
, type
, 0, NULL
);
66 iterate_supers(quota_sync_one
, &type
);
70 unsigned int qtype_enforce_flag(int type
)
74 return FS_QUOTA_UDQ_ENFD
;
76 return FS_QUOTA_GDQ_ENFD
;
78 return FS_QUOTA_PDQ_ENFD
;
83 static int quota_quotaon(struct super_block
*sb
, int type
, qid_t id
,
84 const struct path
*path
)
86 if (!sb
->s_qcop
->quota_on
&& !sb
->s_qcop
->quota_enable
)
88 if (sb
->s_qcop
->quota_enable
)
89 return sb
->s_qcop
->quota_enable(sb
, qtype_enforce_flag(type
));
92 return sb
->s_qcop
->quota_on(sb
, type
, id
, path
);
95 static int quota_quotaoff(struct super_block
*sb
, int type
)
97 if (!sb
->s_qcop
->quota_off
&& !sb
->s_qcop
->quota_disable
)
99 if (sb
->s_qcop
->quota_disable
)
100 return sb
->s_qcop
->quota_disable(sb
, qtype_enforce_flag(type
));
101 return sb
->s_qcop
->quota_off(sb
, type
);
104 static int quota_getfmt(struct super_block
*sb
, int type
, void __user
*addr
)
108 if (!sb_has_quota_active(sb
, type
))
110 fmt
= sb_dqopt(sb
)->info
[type
].dqi_format
->qf_fmt_id
;
111 if (copy_to_user(addr
, &fmt
, sizeof(fmt
)))
116 static int quota_getinfo(struct super_block
*sb
, int type
, void __user
*addr
)
118 struct qc_state state
;
119 struct qc_type_state
*tstate
;
120 struct if_dqinfo uinfo
;
123 /* This checks whether qc_state has enough entries... */
124 BUILD_BUG_ON(MAXQUOTAS
> XQM_MAXQUOTAS
);
125 if (!sb
->s_qcop
->get_state
)
127 ret
= sb
->s_qcop
->get_state(sb
, &state
);
130 tstate
= state
.s_state
+ type
;
131 if (!(tstate
->flags
& QCI_ACCT_ENABLED
))
133 memset(&uinfo
, 0, sizeof(uinfo
));
134 uinfo
.dqi_bgrace
= tstate
->spc_timelimit
;
135 uinfo
.dqi_igrace
= tstate
->ino_timelimit
;
136 if (tstate
->flags
& QCI_SYSFILE
)
137 uinfo
.dqi_flags
|= DQF_SYS_FILE
;
138 if (tstate
->flags
& QCI_ROOT_SQUASH
)
139 uinfo
.dqi_flags
|= DQF_ROOT_SQUASH
;
140 uinfo
.dqi_valid
= IIF_ALL
;
141 if (copy_to_user(addr
, &uinfo
, sizeof(uinfo
)))
146 static int quota_setinfo(struct super_block
*sb
, int type
, void __user
*addr
)
148 struct if_dqinfo info
;
149 struct qc_info qinfo
;
151 if (copy_from_user(&info
, addr
, sizeof(info
)))
153 if (!sb
->s_qcop
->set_info
)
155 if (info
.dqi_valid
& ~(IIF_FLAGS
| IIF_BGRACE
| IIF_IGRACE
))
157 memset(&qinfo
, 0, sizeof(qinfo
));
158 if (info
.dqi_valid
& IIF_FLAGS
) {
159 if (info
.dqi_flags
& ~DQF_SETINFO_MASK
)
161 if (info
.dqi_flags
& DQF_ROOT_SQUASH
)
162 qinfo
.i_flags
|= QCI_ROOT_SQUASH
;
163 qinfo
.i_fieldmask
|= QC_FLAGS
;
165 if (info
.dqi_valid
& IIF_BGRACE
) {
166 qinfo
.i_spc_timelimit
= info
.dqi_bgrace
;
167 qinfo
.i_fieldmask
|= QC_SPC_TIMER
;
169 if (info
.dqi_valid
& IIF_IGRACE
) {
170 qinfo
.i_ino_timelimit
= info
.dqi_igrace
;
171 qinfo
.i_fieldmask
|= QC_INO_TIMER
;
173 return sb
->s_qcop
->set_info(sb
, type
, &qinfo
);
176 static inline qsize_t
qbtos(qsize_t blocks
)
178 return blocks
<< QIF_DQBLKSIZE_BITS
;
181 static inline qsize_t
stoqb(qsize_t space
)
183 return (space
+ QIF_DQBLKSIZE
- 1) >> QIF_DQBLKSIZE_BITS
;
186 static void copy_to_if_dqblk(struct if_dqblk
*dst
, struct qc_dqblk
*src
)
188 memset(dst
, 0, sizeof(*dst
));
189 dst
->dqb_bhardlimit
= stoqb(src
->d_spc_hardlimit
);
190 dst
->dqb_bsoftlimit
= stoqb(src
->d_spc_softlimit
);
191 dst
->dqb_curspace
= src
->d_space
;
192 dst
->dqb_ihardlimit
= src
->d_ino_hardlimit
;
193 dst
->dqb_isoftlimit
= src
->d_ino_softlimit
;
194 dst
->dqb_curinodes
= src
->d_ino_count
;
195 dst
->dqb_btime
= src
->d_spc_timer
;
196 dst
->dqb_itime
= src
->d_ino_timer
;
197 dst
->dqb_valid
= QIF_ALL
;
200 static int quota_getquota(struct super_block
*sb
, int type
, qid_t id
,
208 if (!sb
->s_qcop
->get_dqblk
)
210 qid
= make_kqid(current_user_ns(), type
, id
);
211 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
213 ret
= sb
->s_qcop
->get_dqblk(sb
, qid
, &fdq
);
216 copy_to_if_dqblk(&idq
, &fdq
);
217 if (copy_to_user(addr
, &idq
, sizeof(idq
)))
223 * Return quota for next active quota >= this id, if any exists,
224 * otherwise return -ENOENT via ->get_nextdqblk
226 static int quota_getnextquota(struct super_block
*sb
, int type
, qid_t id
,
231 struct if_nextdqblk idq
;
234 if (!sb
->s_qcop
->get_nextdqblk
)
236 qid
= make_kqid(current_user_ns(), type
, id
);
237 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
239 ret
= sb
->s_qcop
->get_nextdqblk(sb
, &qid
, &fdq
);
242 /* struct if_nextdqblk is a superset of struct if_dqblk */
243 copy_to_if_dqblk((struct if_dqblk
*)&idq
, &fdq
);
244 idq
.dqb_id
= from_kqid(current_user_ns(), qid
);
245 if (copy_to_user(addr
, &idq
, sizeof(idq
)))
250 static void copy_from_if_dqblk(struct qc_dqblk
*dst
, struct if_dqblk
*src
)
252 dst
->d_spc_hardlimit
= qbtos(src
->dqb_bhardlimit
);
253 dst
->d_spc_softlimit
= qbtos(src
->dqb_bsoftlimit
);
254 dst
->d_space
= src
->dqb_curspace
;
255 dst
->d_ino_hardlimit
= src
->dqb_ihardlimit
;
256 dst
->d_ino_softlimit
= src
->dqb_isoftlimit
;
257 dst
->d_ino_count
= src
->dqb_curinodes
;
258 dst
->d_spc_timer
= src
->dqb_btime
;
259 dst
->d_ino_timer
= src
->dqb_itime
;
261 dst
->d_fieldmask
= 0;
262 if (src
->dqb_valid
& QIF_BLIMITS
)
263 dst
->d_fieldmask
|= QC_SPC_SOFT
| QC_SPC_HARD
;
264 if (src
->dqb_valid
& QIF_SPACE
)
265 dst
->d_fieldmask
|= QC_SPACE
;
266 if (src
->dqb_valid
& QIF_ILIMITS
)
267 dst
->d_fieldmask
|= QC_INO_SOFT
| QC_INO_HARD
;
268 if (src
->dqb_valid
& QIF_INODES
)
269 dst
->d_fieldmask
|= QC_INO_COUNT
;
270 if (src
->dqb_valid
& QIF_BTIME
)
271 dst
->d_fieldmask
|= QC_SPC_TIMER
;
272 if (src
->dqb_valid
& QIF_ITIME
)
273 dst
->d_fieldmask
|= QC_INO_TIMER
;
276 static int quota_setquota(struct super_block
*sb
, int type
, qid_t id
,
283 if (copy_from_user(&idq
, addr
, sizeof(idq
)))
285 if (!sb
->s_qcop
->set_dqblk
)
287 qid
= make_kqid(current_user_ns(), type
, id
);
288 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
290 copy_from_if_dqblk(&fdq
, &idq
);
291 return sb
->s_qcop
->set_dqblk(sb
, qid
, &fdq
);
294 static int quota_enable(struct super_block
*sb
, void __user
*addr
)
298 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
300 if (!sb
->s_qcop
->quota_enable
)
302 return sb
->s_qcop
->quota_enable(sb
, flags
);
305 static int quota_disable(struct super_block
*sb
, void __user
*addr
)
309 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
311 if (!sb
->s_qcop
->quota_disable
)
313 return sb
->s_qcop
->quota_disable(sb
, flags
);
316 static int quota_state_to_flags(struct qc_state
*state
)
320 if (state
->s_state
[USRQUOTA
].flags
& QCI_ACCT_ENABLED
)
321 flags
|= FS_QUOTA_UDQ_ACCT
;
322 if (state
->s_state
[USRQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
323 flags
|= FS_QUOTA_UDQ_ENFD
;
324 if (state
->s_state
[GRPQUOTA
].flags
& QCI_ACCT_ENABLED
)
325 flags
|= FS_QUOTA_GDQ_ACCT
;
326 if (state
->s_state
[GRPQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
327 flags
|= FS_QUOTA_GDQ_ENFD
;
328 if (state
->s_state
[PRJQUOTA
].flags
& QCI_ACCT_ENABLED
)
329 flags
|= FS_QUOTA_PDQ_ACCT
;
330 if (state
->s_state
[PRJQUOTA
].flags
& QCI_LIMITS_ENFORCED
)
331 flags
|= FS_QUOTA_PDQ_ENFD
;
335 static int quota_getstate(struct super_block
*sb
, struct fs_quota_stat
*fqs
)
338 struct qc_state state
;
341 memset(&state
, 0, sizeof (struct qc_state
));
342 ret
= sb
->s_qcop
->get_state(sb
, &state
);
346 memset(fqs
, 0, sizeof(*fqs
));
347 fqs
->qs_version
= FS_QSTAT_VERSION
;
348 fqs
->qs_flags
= quota_state_to_flags(&state
);
349 /* No quota enabled? */
352 fqs
->qs_incoredqs
= state
.s_incoredqs
;
354 * GETXSTATE quotactl has space for just one set of time limits so
355 * report them for the first enabled quota type
357 for (type
= 0; type
< XQM_MAXQUOTAS
; type
++)
358 if (state
.s_state
[type
].flags
& QCI_ACCT_ENABLED
)
360 BUG_ON(type
== XQM_MAXQUOTAS
);
361 fqs
->qs_btimelimit
= state
.s_state
[type
].spc_timelimit
;
362 fqs
->qs_itimelimit
= state
.s_state
[type
].ino_timelimit
;
363 fqs
->qs_rtbtimelimit
= state
.s_state
[type
].rt_spc_timelimit
;
364 fqs
->qs_bwarnlimit
= state
.s_state
[type
].spc_warnlimit
;
365 fqs
->qs_iwarnlimit
= state
.s_state
[type
].ino_warnlimit
;
367 /* Inodes may be allocated even if inactive; copy out if present */
368 if (state
.s_state
[USRQUOTA
].ino
) {
369 fqs
->qs_uquota
.qfs_ino
= state
.s_state
[USRQUOTA
].ino
;
370 fqs
->qs_uquota
.qfs_nblks
= state
.s_state
[USRQUOTA
].blocks
;
371 fqs
->qs_uquota
.qfs_nextents
= state
.s_state
[USRQUOTA
].nextents
;
373 if (state
.s_state
[GRPQUOTA
].ino
) {
374 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[GRPQUOTA
].ino
;
375 fqs
->qs_gquota
.qfs_nblks
= state
.s_state
[GRPQUOTA
].blocks
;
376 fqs
->qs_gquota
.qfs_nextents
= state
.s_state
[GRPQUOTA
].nextents
;
378 if (state
.s_state
[PRJQUOTA
].ino
) {
380 * Q_XGETQSTAT doesn't have room for both group and project
381 * quotas. So, allow the project quota values to be copied out
382 * only if there is no group quota information available.
384 if (!(state
.s_state
[GRPQUOTA
].flags
& QCI_ACCT_ENABLED
)) {
385 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[PRJQUOTA
].ino
;
386 fqs
->qs_gquota
.qfs_nblks
=
387 state
.s_state
[PRJQUOTA
].blocks
;
388 fqs
->qs_gquota
.qfs_nextents
=
389 state
.s_state
[PRJQUOTA
].nextents
;
395 static int quota_getxstate(struct super_block
*sb
, void __user
*addr
)
397 struct fs_quota_stat fqs
;
400 if (!sb
->s_qcop
->get_state
)
402 ret
= quota_getstate(sb
, &fqs
);
403 if (!ret
&& copy_to_user(addr
, &fqs
, sizeof(fqs
)))
408 static int quota_getstatev(struct super_block
*sb
, struct fs_quota_statv
*fqs
)
411 struct qc_state state
;
414 memset(&state
, 0, sizeof (struct qc_state
));
415 ret
= sb
->s_qcop
->get_state(sb
, &state
);
419 memset(fqs
, 0, sizeof(*fqs
));
420 fqs
->qs_version
= FS_QSTAT_VERSION
;
421 fqs
->qs_flags
= quota_state_to_flags(&state
);
422 /* No quota enabled? */
425 fqs
->qs_incoredqs
= state
.s_incoredqs
;
427 * GETXSTATV quotactl has space for just one set of time limits so
428 * report them for the first enabled quota type
430 for (type
= 0; type
< XQM_MAXQUOTAS
; type
++)
431 if (state
.s_state
[type
].flags
& QCI_ACCT_ENABLED
)
433 BUG_ON(type
== XQM_MAXQUOTAS
);
434 fqs
->qs_btimelimit
= state
.s_state
[type
].spc_timelimit
;
435 fqs
->qs_itimelimit
= state
.s_state
[type
].ino_timelimit
;
436 fqs
->qs_rtbtimelimit
= state
.s_state
[type
].rt_spc_timelimit
;
437 fqs
->qs_bwarnlimit
= state
.s_state
[type
].spc_warnlimit
;
438 fqs
->qs_iwarnlimit
= state
.s_state
[type
].ino_warnlimit
;
440 /* Inodes may be allocated even if inactive; copy out if present */
441 if (state
.s_state
[USRQUOTA
].ino
) {
442 fqs
->qs_uquota
.qfs_ino
= state
.s_state
[USRQUOTA
].ino
;
443 fqs
->qs_uquota
.qfs_nblks
= state
.s_state
[USRQUOTA
].blocks
;
444 fqs
->qs_uquota
.qfs_nextents
= state
.s_state
[USRQUOTA
].nextents
;
446 if (state
.s_state
[GRPQUOTA
].ino
) {
447 fqs
->qs_gquota
.qfs_ino
= state
.s_state
[GRPQUOTA
].ino
;
448 fqs
->qs_gquota
.qfs_nblks
= state
.s_state
[GRPQUOTA
].blocks
;
449 fqs
->qs_gquota
.qfs_nextents
= state
.s_state
[GRPQUOTA
].nextents
;
451 if (state
.s_state
[PRJQUOTA
].ino
) {
452 fqs
->qs_pquota
.qfs_ino
= state
.s_state
[PRJQUOTA
].ino
;
453 fqs
->qs_pquota
.qfs_nblks
= state
.s_state
[PRJQUOTA
].blocks
;
454 fqs
->qs_pquota
.qfs_nextents
= state
.s_state
[PRJQUOTA
].nextents
;
459 static int quota_getxstatev(struct super_block
*sb
, void __user
*addr
)
461 struct fs_quota_statv fqs
;
464 if (!sb
->s_qcop
->get_state
)
467 memset(&fqs
, 0, sizeof(fqs
));
468 if (copy_from_user(&fqs
, addr
, 1)) /* Just read qs_version */
471 /* If this kernel doesn't support user specified version, fail */
472 switch (fqs
.qs_version
) {
473 case FS_QSTATV_VERSION1
:
478 ret
= quota_getstatev(sb
, &fqs
);
479 if (!ret
&& copy_to_user(addr
, &fqs
, sizeof(fqs
)))
485 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
486 * out of there as xfsprogs rely on definitions being in that header file. So
487 * just define same functions here for quota purposes.
489 #define XFS_BB_SHIFT 9
491 static inline u64
quota_bbtob(u64 blocks
)
493 return blocks
<< XFS_BB_SHIFT
;
496 static inline u64
quota_btobb(u64 bytes
)
498 return (bytes
+ (1 << XFS_BB_SHIFT
) - 1) >> XFS_BB_SHIFT
;
501 static void copy_from_xfs_dqblk(struct qc_dqblk
*dst
, struct fs_disk_quota
*src
)
503 dst
->d_spc_hardlimit
= quota_bbtob(src
->d_blk_hardlimit
);
504 dst
->d_spc_softlimit
= quota_bbtob(src
->d_blk_softlimit
);
505 dst
->d_ino_hardlimit
= src
->d_ino_hardlimit
;
506 dst
->d_ino_softlimit
= src
->d_ino_softlimit
;
507 dst
->d_space
= quota_bbtob(src
->d_bcount
);
508 dst
->d_ino_count
= src
->d_icount
;
509 dst
->d_ino_timer
= src
->d_itimer
;
510 dst
->d_spc_timer
= src
->d_btimer
;
511 dst
->d_ino_warns
= src
->d_iwarns
;
512 dst
->d_spc_warns
= src
->d_bwarns
;
513 dst
->d_rt_spc_hardlimit
= quota_bbtob(src
->d_rtb_hardlimit
);
514 dst
->d_rt_spc_softlimit
= quota_bbtob(src
->d_rtb_softlimit
);
515 dst
->d_rt_space
= quota_bbtob(src
->d_rtbcount
);
516 dst
->d_rt_spc_timer
= src
->d_rtbtimer
;
517 dst
->d_rt_spc_warns
= src
->d_rtbwarns
;
518 dst
->d_fieldmask
= 0;
519 if (src
->d_fieldmask
& FS_DQ_ISOFT
)
520 dst
->d_fieldmask
|= QC_INO_SOFT
;
521 if (src
->d_fieldmask
& FS_DQ_IHARD
)
522 dst
->d_fieldmask
|= QC_INO_HARD
;
523 if (src
->d_fieldmask
& FS_DQ_BSOFT
)
524 dst
->d_fieldmask
|= QC_SPC_SOFT
;
525 if (src
->d_fieldmask
& FS_DQ_BHARD
)
526 dst
->d_fieldmask
|= QC_SPC_HARD
;
527 if (src
->d_fieldmask
& FS_DQ_RTBSOFT
)
528 dst
->d_fieldmask
|= QC_RT_SPC_SOFT
;
529 if (src
->d_fieldmask
& FS_DQ_RTBHARD
)
530 dst
->d_fieldmask
|= QC_RT_SPC_HARD
;
531 if (src
->d_fieldmask
& FS_DQ_BTIMER
)
532 dst
->d_fieldmask
|= QC_SPC_TIMER
;
533 if (src
->d_fieldmask
& FS_DQ_ITIMER
)
534 dst
->d_fieldmask
|= QC_INO_TIMER
;
535 if (src
->d_fieldmask
& FS_DQ_RTBTIMER
)
536 dst
->d_fieldmask
|= QC_RT_SPC_TIMER
;
537 if (src
->d_fieldmask
& FS_DQ_BWARNS
)
538 dst
->d_fieldmask
|= QC_SPC_WARNS
;
539 if (src
->d_fieldmask
& FS_DQ_IWARNS
)
540 dst
->d_fieldmask
|= QC_INO_WARNS
;
541 if (src
->d_fieldmask
& FS_DQ_RTBWARNS
)
542 dst
->d_fieldmask
|= QC_RT_SPC_WARNS
;
543 if (src
->d_fieldmask
& FS_DQ_BCOUNT
)
544 dst
->d_fieldmask
|= QC_SPACE
;
545 if (src
->d_fieldmask
& FS_DQ_ICOUNT
)
546 dst
->d_fieldmask
|= QC_INO_COUNT
;
547 if (src
->d_fieldmask
& FS_DQ_RTBCOUNT
)
548 dst
->d_fieldmask
|= QC_RT_SPACE
;
551 static void copy_qcinfo_from_xfs_dqblk(struct qc_info
*dst
,
552 struct fs_disk_quota
*src
)
554 memset(dst
, 0, sizeof(*dst
));
555 dst
->i_spc_timelimit
= src
->d_btimer
;
556 dst
->i_ino_timelimit
= src
->d_itimer
;
557 dst
->i_rt_spc_timelimit
= src
->d_rtbtimer
;
558 dst
->i_ino_warnlimit
= src
->d_iwarns
;
559 dst
->i_spc_warnlimit
= src
->d_bwarns
;
560 dst
->i_rt_spc_warnlimit
= src
->d_rtbwarns
;
561 if (src
->d_fieldmask
& FS_DQ_BWARNS
)
562 dst
->i_fieldmask
|= QC_SPC_WARNS
;
563 if (src
->d_fieldmask
& FS_DQ_IWARNS
)
564 dst
->i_fieldmask
|= QC_INO_WARNS
;
565 if (src
->d_fieldmask
& FS_DQ_RTBWARNS
)
566 dst
->i_fieldmask
|= QC_RT_SPC_WARNS
;
567 if (src
->d_fieldmask
& FS_DQ_BTIMER
)
568 dst
->i_fieldmask
|= QC_SPC_TIMER
;
569 if (src
->d_fieldmask
& FS_DQ_ITIMER
)
570 dst
->i_fieldmask
|= QC_INO_TIMER
;
571 if (src
->d_fieldmask
& FS_DQ_RTBTIMER
)
572 dst
->i_fieldmask
|= QC_RT_SPC_TIMER
;
575 static int quota_setxquota(struct super_block
*sb
, int type
, qid_t id
,
578 struct fs_disk_quota fdq
;
582 if (copy_from_user(&fdq
, addr
, sizeof(fdq
)))
584 if (!sb
->s_qcop
->set_dqblk
)
586 qid
= make_kqid(current_user_ns(), type
, id
);
587 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
589 /* Are we actually setting timer / warning limits for all users? */
590 if (from_kqid(sb
->s_user_ns
, qid
) == 0 &&
591 fdq
.d_fieldmask
& (FS_DQ_WARNS_MASK
| FS_DQ_TIMER_MASK
)) {
592 struct qc_info qinfo
;
595 if (!sb
->s_qcop
->set_info
)
597 copy_qcinfo_from_xfs_dqblk(&qinfo
, &fdq
);
598 ret
= sb
->s_qcop
->set_info(sb
, type
, &qinfo
);
601 /* These are already done */
602 fdq
.d_fieldmask
&= ~(FS_DQ_WARNS_MASK
| FS_DQ_TIMER_MASK
);
604 copy_from_xfs_dqblk(&qdq
, &fdq
);
605 return sb
->s_qcop
->set_dqblk(sb
, qid
, &qdq
);
608 static void copy_to_xfs_dqblk(struct fs_disk_quota
*dst
, struct qc_dqblk
*src
,
611 memset(dst
, 0, sizeof(*dst
));
612 dst
->d_version
= FS_DQUOT_VERSION
;
614 if (type
== USRQUOTA
)
615 dst
->d_flags
= FS_USER_QUOTA
;
616 else if (type
== PRJQUOTA
)
617 dst
->d_flags
= FS_PROJ_QUOTA
;
619 dst
->d_flags
= FS_GROUP_QUOTA
;
620 dst
->d_blk_hardlimit
= quota_btobb(src
->d_spc_hardlimit
);
621 dst
->d_blk_softlimit
= quota_btobb(src
->d_spc_softlimit
);
622 dst
->d_ino_hardlimit
= src
->d_ino_hardlimit
;
623 dst
->d_ino_softlimit
= src
->d_ino_softlimit
;
624 dst
->d_bcount
= quota_btobb(src
->d_space
);
625 dst
->d_icount
= src
->d_ino_count
;
626 dst
->d_itimer
= src
->d_ino_timer
;
627 dst
->d_btimer
= src
->d_spc_timer
;
628 dst
->d_iwarns
= src
->d_ino_warns
;
629 dst
->d_bwarns
= src
->d_spc_warns
;
630 dst
->d_rtb_hardlimit
= quota_btobb(src
->d_rt_spc_hardlimit
);
631 dst
->d_rtb_softlimit
= quota_btobb(src
->d_rt_spc_softlimit
);
632 dst
->d_rtbcount
= quota_btobb(src
->d_rt_space
);
633 dst
->d_rtbtimer
= src
->d_rt_spc_timer
;
634 dst
->d_rtbwarns
= src
->d_rt_spc_warns
;
637 static int quota_getxquota(struct super_block
*sb
, int type
, qid_t id
,
640 struct fs_disk_quota fdq
;
645 if (!sb
->s_qcop
->get_dqblk
)
647 qid
= make_kqid(current_user_ns(), type
, id
);
648 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
650 ret
= sb
->s_qcop
->get_dqblk(sb
, qid
, &qdq
);
653 copy_to_xfs_dqblk(&fdq
, &qdq
, type
, id
);
654 if (copy_to_user(addr
, &fdq
, sizeof(fdq
)))
660 * Return quota for next active quota >= this id, if any exists,
661 * otherwise return -ENOENT via ->get_nextdqblk.
663 static int quota_getnextxquota(struct super_block
*sb
, int type
, qid_t id
,
666 struct fs_disk_quota fdq
;
672 if (!sb
->s_qcop
->get_nextdqblk
)
674 qid
= make_kqid(current_user_ns(), type
, id
);
675 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
677 ret
= sb
->s_qcop
->get_nextdqblk(sb
, &qid
, &qdq
);
680 id_out
= from_kqid(current_user_ns(), qid
);
681 copy_to_xfs_dqblk(&fdq
, &qdq
, type
, id_out
);
682 if (copy_to_user(addr
, &fdq
, sizeof(fdq
)))
687 static int quota_rmxquota(struct super_block
*sb
, void __user
*addr
)
691 if (copy_from_user(&flags
, addr
, sizeof(flags
)))
693 if (!sb
->s_qcop
->rm_xquota
)
695 return sb
->s_qcop
->rm_xquota(sb
, flags
);
698 /* Copy parameters and call proper function */
699 static int do_quotactl(struct super_block
*sb
, int type
, int cmd
, qid_t id
,
700 void __user
*addr
, const struct path
*path
)
704 if (type
>= (XQM_COMMAND(cmd
) ? XQM_MAXQUOTAS
: MAXQUOTAS
))
707 * Quota not supported on this fs? Check this before s_quota_types
708 * since they needn't be set if quota is not supported at all.
712 if (!(sb
->s_quota_types
& (1 << type
)))
715 ret
= check_quotactl_permission(sb
, type
, cmd
, id
);
721 return quota_quotaon(sb
, type
, id
, path
);
723 return quota_quotaoff(sb
, type
);
725 return quota_getfmt(sb
, type
, addr
);
727 return quota_getinfo(sb
, type
, addr
);
729 return quota_setinfo(sb
, type
, addr
);
731 return quota_getquota(sb
, type
, id
, addr
);
733 return quota_getnextquota(sb
, type
, id
, addr
);
735 return quota_setquota(sb
, type
, id
, addr
);
737 if (!sb
->s_qcop
->quota_sync
)
739 return sb
->s_qcop
->quota_sync(sb
, type
);
741 return quota_enable(sb
, addr
);
743 return quota_disable(sb
, addr
);
745 return quota_rmxquota(sb
, addr
);
747 return quota_getxstate(sb
, addr
);
749 return quota_getxstatev(sb
, addr
);
751 return quota_setxquota(sb
, type
, id
, addr
);
753 return quota_getxquota(sb
, type
, id
, addr
);
754 case Q_XGETNEXTQUOTA
:
755 return quota_getnextxquota(sb
, type
, id
, addr
);
759 /* XFS quotas are fully coherent now, making this call a noop */
768 /* Return 1 if 'cmd' will block on frozen filesystem */
769 static int quotactl_cmd_write(int cmd
)
772 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
773 * as dquot_acquire() may allocate space for new structure and OCFS2
774 * needs to increment on-disk use count.
783 case Q_XGETNEXTQUOTA
:
789 #endif /* CONFIG_BLOCK */
791 /* Return true if quotactl command is manipulating quota on/off state */
792 static bool quotactl_cmd_onoff(int cmd
)
794 return (cmd
== Q_QUOTAON
) || (cmd
== Q_QUOTAOFF
);
798 * look up a superblock on which quota ops will be performed
799 * - use the name of a block device to find the superblock thereon
801 static struct super_block
*quotactl_block(const char __user
*special
, int cmd
)
804 struct block_device
*bdev
;
805 struct super_block
*sb
;
806 struct filename
*tmp
= getname(special
);
809 return ERR_CAST(tmp
);
810 bdev
= lookup_bdev(tmp
->name
);
813 return ERR_CAST(bdev
);
814 if (quotactl_cmd_onoff(cmd
))
815 sb
= get_super_exclusive_thawed(bdev
);
816 else if (quotactl_cmd_write(cmd
))
817 sb
= get_super_thawed(bdev
);
819 sb
= get_super(bdev
);
822 return ERR_PTR(-ENODEV
);
826 return ERR_PTR(-ENODEV
);
831 * This is the system call interface. This communicates with
832 * the user-level programs. Currently this only supports diskquota
833 * calls. Maybe we need to add the process quotas etc. in the future,
834 * but we probably should use rlimits for that.
836 SYSCALL_DEFINE4(quotactl
, unsigned int, cmd
, const char __user
*, special
,
837 qid_t
, id
, void __user
*, addr
)
840 struct super_block
*sb
= NULL
;
841 struct path path
, *pathp
= NULL
;
844 cmds
= cmd
>> SUBCMDSHIFT
;
845 type
= cmd
& SUBCMDMASK
;
848 * As a special case Q_SYNC can be called without a specific device.
849 * It will iterate all superblocks that have quota enabled and call
850 * the sync action on each of them.
854 return quota_sync_all(type
);
859 * Path for quotaon has to be resolved before grabbing superblock
860 * because that gets s_umount sem which is also possibly needed by path
861 * resolution (think about autofs) and thus deadlocks could arise.
863 if (cmds
== Q_QUOTAON
) {
864 ret
= user_path_at(AT_FDCWD
, addr
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
866 pathp
= ERR_PTR(ret
);
871 sb
= quotactl_block(special
, cmds
);
877 ret
= do_quotactl(sb
, type
, cmds
, id
, addr
, pathp
);
879 if (!quotactl_cmd_onoff(cmds
))
882 drop_super_exclusive(sb
);
884 if (pathp
&& !IS_ERR(pathp
))