drm/panfrost: Remove set but not used variable 'bo'
[linux/fpc-iii.git] / fs / xfs / xfs_qm_syscalls.c
blob1ea82764bf897b41beb15bd5b14fa5f6a924fdf6
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_quota.h"
19 #include "xfs_qm.h"
20 #include "xfs_icache.h"
22 STATIC int
23 xfs_qm_log_quotaoff(
24 struct xfs_mount *mp,
25 struct xfs_qoff_logitem **qoffstartp,
26 uint flags)
28 struct xfs_trans *tp;
29 int error;
30 struct xfs_qoff_logitem *qoffi;
32 *qoffstartp = NULL;
34 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
35 if (error)
36 goto out;
38 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
39 xfs_trans_log_quotaoff_item(tp, qoffi);
41 spin_lock(&mp->m_sb_lock);
42 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
43 spin_unlock(&mp->m_sb_lock);
45 xfs_log_sb(tp);
48 * We have to make sure that the transaction is secure on disk before we
49 * return and actually stop quota accounting. So, make it synchronous.
50 * We don't care about quotoff's performance.
52 xfs_trans_set_sync(tp);
53 error = xfs_trans_commit(tp);
54 if (error)
55 goto out;
57 *qoffstartp = qoffi;
58 out:
59 return error;
62 STATIC int
63 xfs_qm_log_quotaoff_end(
64 struct xfs_mount *mp,
65 struct xfs_qoff_logitem *startqoff,
66 uint flags)
68 struct xfs_trans *tp;
69 int error;
70 struct xfs_qoff_logitem *qoffi;
72 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
73 if (error)
74 return error;
76 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
77 flags & XFS_ALL_QUOTA_ACCT);
78 xfs_trans_log_quotaoff_item(tp, qoffi);
81 * We have to make sure that the transaction is secure on disk before we
82 * return and actually stop quota accounting. So, make it synchronous.
83 * We don't care about quotoff's performance.
85 xfs_trans_set_sync(tp);
86 return xfs_trans_commit(tp);
90 * Turn off quota accounting and/or enforcement for all udquots and/or
91 * gdquots. Called only at unmount time.
93 * This assumes that there are no dquots of this file system cached
94 * incore, and modifies the ondisk dquot directly. Therefore, for example,
95 * it is an error to call this twice, without purging the cache.
97 int
98 xfs_qm_scall_quotaoff(
99 xfs_mount_t *mp,
100 uint flags)
102 struct xfs_quotainfo *q = mp->m_quotainfo;
103 uint dqtype;
104 int error;
105 uint inactivate_flags;
106 struct xfs_qoff_logitem *qoffstart;
109 * No file system can have quotas enabled on disk but not in core.
110 * Note that quota utilities (like quotaoff) _expect_
111 * errno == -EEXIST here.
113 if ((mp->m_qflags & flags) == 0)
114 return -EEXIST;
115 error = 0;
117 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
120 * We don't want to deal with two quotaoffs messing up each other,
121 * so we're going to serialize it. quotaoff isn't exactly a performance
122 * critical thing.
123 * If quotaoff, then we must be dealing with the root filesystem.
125 ASSERT(q);
126 mutex_lock(&q->qi_quotaofflock);
129 * If we're just turning off quota enforcement, change mp and go.
131 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
132 mp->m_qflags &= ~(flags);
134 spin_lock(&mp->m_sb_lock);
135 mp->m_sb.sb_qflags = mp->m_qflags;
136 spin_unlock(&mp->m_sb_lock);
137 mutex_unlock(&q->qi_quotaofflock);
139 /* XXX what to do if error ? Revert back to old vals incore ? */
140 return xfs_sync_sb(mp, false);
143 dqtype = 0;
144 inactivate_flags = 0;
146 * If accounting is off, we must turn enforcement off, clear the
147 * quota 'CHKD' certificate to make it known that we have to
148 * do a quotacheck the next time this quota is turned on.
150 if (flags & XFS_UQUOTA_ACCT) {
151 dqtype |= XFS_QMOPT_UQUOTA;
152 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
153 inactivate_flags |= XFS_UQUOTA_ACTIVE;
155 if (flags & XFS_GQUOTA_ACCT) {
156 dqtype |= XFS_QMOPT_GQUOTA;
157 flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
158 inactivate_flags |= XFS_GQUOTA_ACTIVE;
160 if (flags & XFS_PQUOTA_ACCT) {
161 dqtype |= XFS_QMOPT_PQUOTA;
162 flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
163 inactivate_flags |= XFS_PQUOTA_ACTIVE;
167 * Nothing to do? Don't complain. This happens when we're just
168 * turning off quota enforcement.
170 if ((mp->m_qflags & flags) == 0)
171 goto out_unlock;
174 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
175 * and synchronously. If we fail to write, we should abort the
176 * operation as it cannot be recovered safely if we crash.
178 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
179 if (error)
180 goto out_unlock;
183 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
184 * to take care of the race between dqget and quotaoff. We don't take
185 * any special locks to reset these bits. All processes need to check
186 * these bits *after* taking inode lock(s) to see if the particular
187 * quota type is in the process of being turned off. If *ACTIVE, it is
188 * guaranteed that all dquot structures and all quotainode ptrs will all
189 * stay valid as long as that inode is kept locked.
191 * There is no turning back after this.
193 mp->m_qflags &= ~inactivate_flags;
196 * Give back all the dquot reference(s) held by inodes.
197 * Here we go thru every single incore inode in this file system, and
198 * do a dqrele on the i_udquot/i_gdquot that it may have.
199 * Essentially, as long as somebody has an inode locked, this guarantees
200 * that quotas will not be turned off. This is handy because in a
201 * transaction once we lock the inode(s) and check for quotaon, we can
202 * depend on the quota inodes (and other things) being valid as long as
203 * we keep the lock(s).
205 xfs_qm_dqrele_all_inodes(mp, flags);
208 * Next we make the changes in the quota flag in the mount struct.
209 * This isn't protected by a particular lock directly, because we
210 * don't want to take a mrlock every time we depend on quotas being on.
212 mp->m_qflags &= ~flags;
215 * Go through all the dquots of this file system and purge them,
216 * according to what was turned off.
218 xfs_qm_dqpurge_all(mp, dqtype);
221 * Transactions that had started before ACTIVE state bit was cleared
222 * could have logged many dquots, so they'd have higher LSNs than
223 * the first QUOTAOFF log record does. If we happen to crash when
224 * the tail of the log has gone past the QUOTAOFF record, but
225 * before the last dquot modification, those dquots __will__
226 * recover, and that's not good.
228 * So, we have QUOTAOFF start and end logitems; the start
229 * logitem won't get overwritten until the end logitem appears...
231 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
232 if (error) {
233 /* We're screwed now. Shutdown is the only option. */
234 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
235 goto out_unlock;
239 * If all quotas are completely turned off, close shop.
241 if (mp->m_qflags == 0) {
242 mutex_unlock(&q->qi_quotaofflock);
243 xfs_qm_destroy_quotainfo(mp);
244 return 0;
248 * Release our quotainode references if we don't need them anymore.
250 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
251 xfs_irele(q->qi_uquotaip);
252 q->qi_uquotaip = NULL;
254 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
255 xfs_irele(q->qi_gquotaip);
256 q->qi_gquotaip = NULL;
258 if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
259 xfs_irele(q->qi_pquotaip);
260 q->qi_pquotaip = NULL;
263 out_unlock:
264 mutex_unlock(&q->qi_quotaofflock);
265 return error;
268 STATIC int
269 xfs_qm_scall_trunc_qfile(
270 struct xfs_mount *mp,
271 xfs_ino_t ino)
273 struct xfs_inode *ip;
274 struct xfs_trans *tp;
275 int error;
277 if (ino == NULLFSINO)
278 return 0;
280 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
281 if (error)
282 return error;
284 xfs_ilock(ip, XFS_IOLOCK_EXCL);
286 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
287 if (error) {
288 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
289 goto out_put;
292 xfs_ilock(ip, XFS_ILOCK_EXCL);
293 xfs_trans_ijoin(tp, ip, 0);
295 ip->i_d.di_size = 0;
296 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
298 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
299 if (error) {
300 xfs_trans_cancel(tp);
301 goto out_unlock;
304 ASSERT(ip->i_d.di_nextents == 0);
306 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
307 error = xfs_trans_commit(tp);
309 out_unlock:
310 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
311 out_put:
312 xfs_irele(ip);
313 return error;
317 xfs_qm_scall_trunc_qfiles(
318 xfs_mount_t *mp,
319 uint flags)
321 int error = -EINVAL;
323 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
324 (flags & ~XFS_DQ_ALLTYPES)) {
325 xfs_debug(mp, "%s: flags=%x m_qflags=%x",
326 __func__, flags, mp->m_qflags);
327 return -EINVAL;
330 if (flags & XFS_DQ_USER) {
331 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
332 if (error)
333 return error;
335 if (flags & XFS_DQ_GROUP) {
336 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
337 if (error)
338 return error;
340 if (flags & XFS_DQ_PROJ)
341 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
343 return error;
347 * Switch on (a given) quota enforcement for a filesystem. This takes
348 * effect immediately.
349 * (Switching on quota accounting must be done at mount time.)
352 xfs_qm_scall_quotaon(
353 xfs_mount_t *mp,
354 uint flags)
356 int error;
357 uint qf;
359 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
361 * Switching on quota accounting must be done at mount time.
363 flags &= ~(XFS_ALL_QUOTA_ACCT);
365 if (flags == 0) {
366 xfs_debug(mp, "%s: zero flags, m_qflags=%x",
367 __func__, mp->m_qflags);
368 return -EINVAL;
372 * Can't enforce without accounting. We check the superblock
373 * qflags here instead of m_qflags because rootfs can have
374 * quota acct on ondisk without m_qflags' knowing.
376 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
377 (flags & XFS_UQUOTA_ENFD)) ||
378 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
379 (flags & XFS_GQUOTA_ENFD)) ||
380 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
381 (flags & XFS_PQUOTA_ENFD))) {
382 xfs_debug(mp,
383 "%s: Can't enforce without acct, flags=%x sbflags=%x",
384 __func__, flags, mp->m_sb.sb_qflags);
385 return -EINVAL;
388 * If everything's up to-date incore, then don't waste time.
390 if ((mp->m_qflags & flags) == flags)
391 return -EEXIST;
394 * Change sb_qflags on disk but not incore mp->qflags
395 * if this is the root filesystem.
397 spin_lock(&mp->m_sb_lock);
398 qf = mp->m_sb.sb_qflags;
399 mp->m_sb.sb_qflags = qf | flags;
400 spin_unlock(&mp->m_sb_lock);
403 * There's nothing to change if it's the same.
405 if ((qf & flags) == flags)
406 return -EEXIST;
408 error = xfs_sync_sb(mp, false);
409 if (error)
410 return error;
412 * If we aren't trying to switch on quota enforcement, we are done.
414 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
415 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
416 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
417 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
418 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
419 (mp->m_qflags & XFS_GQUOTA_ACCT)))
420 return 0;
422 if (! XFS_IS_QUOTA_RUNNING(mp))
423 return -ESRCH;
426 * Switch on quota enforcement in core.
428 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
429 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
430 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
432 return 0;
435 #define XFS_QC_MASK \
436 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
439 * Adjust quota limits, and start/stop timers accordingly.
442 xfs_qm_scall_setqlim(
443 struct xfs_mount *mp,
444 xfs_dqid_t id,
445 uint type,
446 struct qc_dqblk *newlim)
448 struct xfs_quotainfo *q = mp->m_quotainfo;
449 struct xfs_disk_dquot *ddq;
450 struct xfs_dquot *dqp;
451 struct xfs_trans *tp;
452 struct xfs_def_quota *defq;
453 int error;
454 xfs_qcnt_t hard, soft;
456 if (newlim->d_fieldmask & ~XFS_QC_MASK)
457 return -EINVAL;
458 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
459 return 0;
462 * We don't want to race with a quotaoff so take the quotaoff lock.
463 * We don't hold an inode lock, so there's nothing else to stop
464 * a quotaoff from happening.
466 mutex_lock(&q->qi_quotaofflock);
469 * Get the dquot (locked) before we start, as we need to do a
470 * transaction to allocate it if it doesn't exist. Once we have the
471 * dquot, unlock it so we can start the next transaction safely. We hold
472 * a reference to the dquot, so it's safe to do this unlock/lock without
473 * it being reclaimed in the mean time.
475 error = xfs_qm_dqget(mp, id, type, true, &dqp);
476 if (error) {
477 ASSERT(error != -ENOENT);
478 goto out_unlock;
481 defq = xfs_get_defquota(dqp, q);
482 xfs_dqunlock(dqp);
484 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
485 if (error)
486 goto out_rele;
488 xfs_dqlock(dqp);
489 xfs_trans_dqjoin(tp, dqp);
490 ddq = &dqp->q_core;
493 * Make sure that hardlimits are >= soft limits before changing.
495 hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
496 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
497 be64_to_cpu(ddq->d_blk_hardlimit);
498 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
499 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
500 be64_to_cpu(ddq->d_blk_softlimit);
501 if (hard == 0 || hard >= soft) {
502 ddq->d_blk_hardlimit = cpu_to_be64(hard);
503 ddq->d_blk_softlimit = cpu_to_be64(soft);
504 xfs_dquot_set_prealloc_limits(dqp);
505 if (id == 0) {
506 defq->bhardlimit = hard;
507 defq->bsoftlimit = soft;
509 } else {
510 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
512 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
513 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
514 be64_to_cpu(ddq->d_rtb_hardlimit);
515 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
516 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
517 be64_to_cpu(ddq->d_rtb_softlimit);
518 if (hard == 0 || hard >= soft) {
519 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
520 ddq->d_rtb_softlimit = cpu_to_be64(soft);
521 if (id == 0) {
522 defq->rtbhardlimit = hard;
523 defq->rtbsoftlimit = soft;
525 } else {
526 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
529 hard = (newlim->d_fieldmask & QC_INO_HARD) ?
530 (xfs_qcnt_t) newlim->d_ino_hardlimit :
531 be64_to_cpu(ddq->d_ino_hardlimit);
532 soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
533 (xfs_qcnt_t) newlim->d_ino_softlimit :
534 be64_to_cpu(ddq->d_ino_softlimit);
535 if (hard == 0 || hard >= soft) {
536 ddq->d_ino_hardlimit = cpu_to_be64(hard);
537 ddq->d_ino_softlimit = cpu_to_be64(soft);
538 if (id == 0) {
539 defq->ihardlimit = hard;
540 defq->isoftlimit = soft;
542 } else {
543 xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
547 * Update warnings counter(s) if requested
549 if (newlim->d_fieldmask & QC_SPC_WARNS)
550 ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
551 if (newlim->d_fieldmask & QC_INO_WARNS)
552 ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
553 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
554 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
556 if (id == 0) {
558 * Timelimits for the super user set the relative time
559 * the other users can be over quota for this file system.
560 * If it is zero a default is used. Ditto for the default
561 * soft and hard limit values (already done, above), and
562 * for warnings.
564 if (newlim->d_fieldmask & QC_SPC_TIMER) {
565 q->qi_btimelimit = newlim->d_spc_timer;
566 ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
568 if (newlim->d_fieldmask & QC_INO_TIMER) {
569 q->qi_itimelimit = newlim->d_ino_timer;
570 ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
572 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
573 q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
574 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
576 if (newlim->d_fieldmask & QC_SPC_WARNS)
577 q->qi_bwarnlimit = newlim->d_spc_warns;
578 if (newlim->d_fieldmask & QC_INO_WARNS)
579 q->qi_iwarnlimit = newlim->d_ino_warns;
580 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
581 q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
582 } else {
584 * If the user is now over quota, start the timelimit.
585 * The user will not be 'warned'.
586 * Note that we keep the timers ticking, whether enforcement
587 * is on or off. We don't really want to bother with iterating
588 * over all ondisk dquots and turning the timers on/off.
590 xfs_qm_adjust_dqtimers(mp, ddq);
592 dqp->dq_flags |= XFS_DQ_DIRTY;
593 xfs_trans_log_dquot(tp, dqp);
595 error = xfs_trans_commit(tp);
597 out_rele:
598 xfs_qm_dqrele(dqp);
599 out_unlock:
600 mutex_unlock(&q->qi_quotaofflock);
601 return error;
604 /* Fill out the quota context. */
605 static void
606 xfs_qm_scall_getquota_fill_qc(
607 struct xfs_mount *mp,
608 uint type,
609 const struct xfs_dquot *dqp,
610 struct qc_dqblk *dst)
612 memset(dst, 0, sizeof(*dst));
613 dst->d_spc_hardlimit =
614 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
615 dst->d_spc_softlimit =
616 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
617 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
618 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
619 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
620 dst->d_ino_count = dqp->q_res_icount;
621 dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
622 dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
623 dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
624 dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
625 dst->d_rt_spc_hardlimit =
626 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
627 dst->d_rt_spc_softlimit =
628 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
629 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
630 dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
631 dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
634 * Internally, we don't reset all the timers when quota enforcement
635 * gets turned off. No need to confuse the user level code,
636 * so return zeroes in that case.
638 if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
639 dqp->q_core.d_flags == XFS_DQ_USER) ||
640 (!XFS_IS_GQUOTA_ENFORCED(mp) &&
641 dqp->q_core.d_flags == XFS_DQ_GROUP) ||
642 (!XFS_IS_PQUOTA_ENFORCED(mp) &&
643 dqp->q_core.d_flags == XFS_DQ_PROJ)) {
644 dst->d_spc_timer = 0;
645 dst->d_ino_timer = 0;
646 dst->d_rt_spc_timer = 0;
649 #ifdef DEBUG
650 if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
651 (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
652 (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
653 dqp->q_core.d_id != 0) {
654 if ((dst->d_space > dst->d_spc_softlimit) &&
655 (dst->d_spc_softlimit > 0)) {
656 ASSERT(dst->d_spc_timer != 0);
658 if ((dst->d_ino_count > dst->d_ino_softlimit) &&
659 (dst->d_ino_softlimit > 0)) {
660 ASSERT(dst->d_ino_timer != 0);
663 #endif
666 /* Return the quota information for the dquot matching id. */
668 xfs_qm_scall_getquota(
669 struct xfs_mount *mp,
670 xfs_dqid_t id,
671 uint type,
672 struct qc_dqblk *dst)
674 struct xfs_dquot *dqp;
675 int error;
678 * Try to get the dquot. We don't want it allocated on disk, so don't
679 * set doalloc. If it doesn't exist, we'll get ENOENT back.
681 error = xfs_qm_dqget(mp, id, type, false, &dqp);
682 if (error)
683 return error;
686 * If everything's NULL, this dquot doesn't quite exist as far as
687 * our utility programs are concerned.
689 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
690 error = -ENOENT;
691 goto out_put;
694 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
696 out_put:
697 xfs_qm_dqput(dqp);
698 return error;
702 * Return the quota information for the first initialized dquot whose id
703 * is at least as high as id.
706 xfs_qm_scall_getquota_next(
707 struct xfs_mount *mp,
708 xfs_dqid_t *id,
709 uint type,
710 struct qc_dqblk *dst)
712 struct xfs_dquot *dqp;
713 int error;
715 error = xfs_qm_dqget_next(mp, *id, type, &dqp);
716 if (error)
717 return error;
719 /* Fill in the ID we actually read from disk */
720 *id = be32_to_cpu(dqp->q_core.d_id);
722 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
724 xfs_qm_dqput(dqp);
725 return error;
728 STATIC int
729 xfs_dqrele_inode(
730 struct xfs_inode *ip,
731 int flags,
732 void *args)
734 /* skip quota inodes */
735 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
736 ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
737 ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
738 ASSERT(ip->i_udquot == NULL);
739 ASSERT(ip->i_gdquot == NULL);
740 ASSERT(ip->i_pdquot == NULL);
741 return 0;
744 xfs_ilock(ip, XFS_ILOCK_EXCL);
745 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
746 xfs_qm_dqrele(ip->i_udquot);
747 ip->i_udquot = NULL;
749 if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
750 xfs_qm_dqrele(ip->i_gdquot);
751 ip->i_gdquot = NULL;
753 if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
754 xfs_qm_dqrele(ip->i_pdquot);
755 ip->i_pdquot = NULL;
757 xfs_iunlock(ip, XFS_ILOCK_EXCL);
758 return 0;
763 * Go thru all the inodes in the file system, releasing their dquots.
765 * Note that the mount structure gets modified to indicate that quotas are off
766 * AFTER this, in the case of quotaoff.
768 void
769 xfs_qm_dqrele_all_inodes(
770 struct xfs_mount *mp,
771 uint flags)
773 ASSERT(mp->m_quotainfo);
774 xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
775 XFS_AGITER_INEW_WAIT);