1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
7 #include <linux/capability.h>
11 #include "xfs_shared.h"
12 #include "xfs_format.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans_resv.h"
17 #include "xfs_mount.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
20 #include "xfs_error.h"
21 #include "xfs_quota.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_defer.h"
27 STATIC
int xfs_qm_log_quotaoff(xfs_mount_t
*, xfs_qoff_logitem_t
**, uint
);
28 STATIC
int xfs_qm_log_quotaoff_end(xfs_mount_t
*, xfs_qoff_logitem_t
*,
32 * Turn off quota accounting and/or enforcement for all udquots and/or
33 * gdquots. Called only at unmount time.
35 * This assumes that there are no dquots of this file system cached
36 * incore, and modifies the ondisk dquot directly. Therefore, for example,
37 * it is an error to call this twice, without purging the cache.
40 xfs_qm_scall_quotaoff(
44 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
47 uint inactivate_flags
;
48 xfs_qoff_logitem_t
*qoffstart
;
51 * No file system can have quotas enabled on disk but not in core.
52 * Note that quota utilities (like quotaoff) _expect_
53 * errno == -EEXIST here.
55 if ((mp
->m_qflags
& flags
) == 0)
59 flags
&= (XFS_ALL_QUOTA_ACCT
| XFS_ALL_QUOTA_ENFD
);
62 * We don't want to deal with two quotaoffs messing up each other,
63 * so we're going to serialize it. quotaoff isn't exactly a performance
65 * If quotaoff, then we must be dealing with the root filesystem.
68 mutex_lock(&q
->qi_quotaofflock
);
71 * If we're just turning off quota enforcement, change mp and go.
73 if ((flags
& XFS_ALL_QUOTA_ACCT
) == 0) {
74 mp
->m_qflags
&= ~(flags
);
76 spin_lock(&mp
->m_sb_lock
);
77 mp
->m_sb
.sb_qflags
= mp
->m_qflags
;
78 spin_unlock(&mp
->m_sb_lock
);
79 mutex_unlock(&q
->qi_quotaofflock
);
81 /* XXX what to do if error ? Revert back to old vals incore ? */
82 return xfs_sync_sb(mp
, false);
88 * If accounting is off, we must turn enforcement off, clear the
89 * quota 'CHKD' certificate to make it known that we have to
90 * do a quotacheck the next time this quota is turned on.
92 if (flags
& XFS_UQUOTA_ACCT
) {
93 dqtype
|= XFS_QMOPT_UQUOTA
;
94 flags
|= (XFS_UQUOTA_CHKD
| XFS_UQUOTA_ENFD
);
95 inactivate_flags
|= XFS_UQUOTA_ACTIVE
;
97 if (flags
& XFS_GQUOTA_ACCT
) {
98 dqtype
|= XFS_QMOPT_GQUOTA
;
99 flags
|= (XFS_GQUOTA_CHKD
| XFS_GQUOTA_ENFD
);
100 inactivate_flags
|= XFS_GQUOTA_ACTIVE
;
102 if (flags
& XFS_PQUOTA_ACCT
) {
103 dqtype
|= XFS_QMOPT_PQUOTA
;
104 flags
|= (XFS_PQUOTA_CHKD
| XFS_PQUOTA_ENFD
);
105 inactivate_flags
|= XFS_PQUOTA_ACTIVE
;
109 * Nothing to do? Don't complain. This happens when we're just
110 * turning off quota enforcement.
112 if ((mp
->m_qflags
& flags
) == 0)
116 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
117 * and synchronously. If we fail to write, we should abort the
118 * operation as it cannot be recovered safely if we crash.
120 error
= xfs_qm_log_quotaoff(mp
, &qoffstart
, flags
);
125 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
126 * to take care of the race between dqget and quotaoff. We don't take
127 * any special locks to reset these bits. All processes need to check
128 * these bits *after* taking inode lock(s) to see if the particular
129 * quota type is in the process of being turned off. If *ACTIVE, it is
130 * guaranteed that all dquot structures and all quotainode ptrs will all
131 * stay valid as long as that inode is kept locked.
133 * There is no turning back after this.
135 mp
->m_qflags
&= ~inactivate_flags
;
138 * Give back all the dquot reference(s) held by inodes.
139 * Here we go thru every single incore inode in this file system, and
140 * do a dqrele on the i_udquot/i_gdquot that it may have.
141 * Essentially, as long as somebody has an inode locked, this guarantees
142 * that quotas will not be turned off. This is handy because in a
143 * transaction once we lock the inode(s) and check for quotaon, we can
144 * depend on the quota inodes (and other things) being valid as long as
145 * we keep the lock(s).
147 xfs_qm_dqrele_all_inodes(mp
, flags
);
150 * Next we make the changes in the quota flag in the mount struct.
151 * This isn't protected by a particular lock directly, because we
152 * don't want to take a mrlock every time we depend on quotas being on.
154 mp
->m_qflags
&= ~flags
;
157 * Go through all the dquots of this file system and purge them,
158 * according to what was turned off.
160 xfs_qm_dqpurge_all(mp
, dqtype
);
163 * Transactions that had started before ACTIVE state bit was cleared
164 * could have logged many dquots, so they'd have higher LSNs than
165 * the first QUOTAOFF log record does. If we happen to crash when
166 * the tail of the log has gone past the QUOTAOFF record, but
167 * before the last dquot modification, those dquots __will__
168 * recover, and that's not good.
170 * So, we have QUOTAOFF start and end logitems; the start
171 * logitem won't get overwritten until the end logitem appears...
173 error
= xfs_qm_log_quotaoff_end(mp
, qoffstart
, flags
);
175 /* We're screwed now. Shutdown is the only option. */
176 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
181 * If all quotas are completely turned off, close shop.
183 if (mp
->m_qflags
== 0) {
184 mutex_unlock(&q
->qi_quotaofflock
);
185 xfs_qm_destroy_quotainfo(mp
);
190 * Release our quotainode references if we don't need them anymore.
192 if ((dqtype
& XFS_QMOPT_UQUOTA
) && q
->qi_uquotaip
) {
193 xfs_irele(q
->qi_uquotaip
);
194 q
->qi_uquotaip
= NULL
;
196 if ((dqtype
& XFS_QMOPT_GQUOTA
) && q
->qi_gquotaip
) {
197 xfs_irele(q
->qi_gquotaip
);
198 q
->qi_gquotaip
= NULL
;
200 if ((dqtype
& XFS_QMOPT_PQUOTA
) && q
->qi_pquotaip
) {
201 xfs_irele(q
->qi_pquotaip
);
202 q
->qi_pquotaip
= NULL
;
206 mutex_unlock(&q
->qi_quotaofflock
);
211 xfs_qm_scall_trunc_qfile(
212 struct xfs_mount
*mp
,
215 struct xfs_inode
*ip
;
216 struct xfs_trans
*tp
;
219 if (ino
== NULLFSINO
)
222 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, &ip
);
226 xfs_ilock(ip
, XFS_IOLOCK_EXCL
);
228 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, 0, 0, 0, &tp
);
230 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
234 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
235 xfs_trans_ijoin(tp
, ip
, 0);
238 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
240 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
, 0);
242 xfs_trans_cancel(tp
);
246 ASSERT(ip
->i_d
.di_nextents
== 0);
248 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
249 error
= xfs_trans_commit(tp
);
252 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
259 xfs_qm_scall_trunc_qfiles(
265 if (!xfs_sb_version_hasquota(&mp
->m_sb
) || flags
== 0 ||
266 (flags
& ~XFS_DQ_ALLTYPES
)) {
267 xfs_debug(mp
, "%s: flags=%x m_qflags=%x",
268 __func__
, flags
, mp
->m_qflags
);
272 if (flags
& XFS_DQ_USER
) {
273 error
= xfs_qm_scall_trunc_qfile(mp
, mp
->m_sb
.sb_uquotino
);
277 if (flags
& XFS_DQ_GROUP
) {
278 error
= xfs_qm_scall_trunc_qfile(mp
, mp
->m_sb
.sb_gquotino
);
282 if (flags
& XFS_DQ_PROJ
)
283 error
= xfs_qm_scall_trunc_qfile(mp
, mp
->m_sb
.sb_pquotino
);
289 * Switch on (a given) quota enforcement for a filesystem. This takes
290 * effect immediately.
291 * (Switching on quota accounting must be done at mount time.)
294 xfs_qm_scall_quotaon(
301 flags
&= (XFS_ALL_QUOTA_ACCT
| XFS_ALL_QUOTA_ENFD
);
303 * Switching on quota accounting must be done at mount time.
305 flags
&= ~(XFS_ALL_QUOTA_ACCT
);
308 xfs_debug(mp
, "%s: zero flags, m_qflags=%x",
309 __func__
, mp
->m_qflags
);
314 * Can't enforce without accounting. We check the superblock
315 * qflags here instead of m_qflags because rootfs can have
316 * quota acct on ondisk without m_qflags' knowing.
318 if (((mp
->m_sb
.sb_qflags
& XFS_UQUOTA_ACCT
) == 0 &&
319 (flags
& XFS_UQUOTA_ENFD
)) ||
320 ((mp
->m_sb
.sb_qflags
& XFS_GQUOTA_ACCT
) == 0 &&
321 (flags
& XFS_GQUOTA_ENFD
)) ||
322 ((mp
->m_sb
.sb_qflags
& XFS_PQUOTA_ACCT
) == 0 &&
323 (flags
& XFS_PQUOTA_ENFD
))) {
325 "%s: Can't enforce without acct, flags=%x sbflags=%x",
326 __func__
, flags
, mp
->m_sb
.sb_qflags
);
330 * If everything's up to-date incore, then don't waste time.
332 if ((mp
->m_qflags
& flags
) == flags
)
336 * Change sb_qflags on disk but not incore mp->qflags
337 * if this is the root filesystem.
339 spin_lock(&mp
->m_sb_lock
);
340 qf
= mp
->m_sb
.sb_qflags
;
341 mp
->m_sb
.sb_qflags
= qf
| flags
;
342 spin_unlock(&mp
->m_sb_lock
);
345 * There's nothing to change if it's the same.
347 if ((qf
& flags
) == flags
)
350 error
= xfs_sync_sb(mp
, false);
354 * If we aren't trying to switch on quota enforcement, we are done.
356 if (((mp
->m_sb
.sb_qflags
& XFS_UQUOTA_ACCT
) !=
357 (mp
->m_qflags
& XFS_UQUOTA_ACCT
)) ||
358 ((mp
->m_sb
.sb_qflags
& XFS_PQUOTA_ACCT
) !=
359 (mp
->m_qflags
& XFS_PQUOTA_ACCT
)) ||
360 ((mp
->m_sb
.sb_qflags
& XFS_GQUOTA_ACCT
) !=
361 (mp
->m_qflags
& XFS_GQUOTA_ACCT
)))
364 if (! XFS_IS_QUOTA_RUNNING(mp
))
368 * Switch on quota enforcement in core.
370 mutex_lock(&mp
->m_quotainfo
->qi_quotaofflock
);
371 mp
->m_qflags
|= (flags
& XFS_ALL_QUOTA_ENFD
);
372 mutex_unlock(&mp
->m_quotainfo
->qi_quotaofflock
);
377 #define XFS_QC_MASK \
378 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
381 * Adjust quota limits, and start/stop timers accordingly.
384 xfs_qm_scall_setqlim(
385 struct xfs_mount
*mp
,
388 struct qc_dqblk
*newlim
)
390 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
391 struct xfs_disk_dquot
*ddq
;
392 struct xfs_dquot
*dqp
;
393 struct xfs_trans
*tp
;
394 struct xfs_def_quota
*defq
;
396 xfs_qcnt_t hard
, soft
;
398 if (newlim
->d_fieldmask
& ~XFS_QC_MASK
)
400 if ((newlim
->d_fieldmask
& XFS_QC_MASK
) == 0)
404 * We don't want to race with a quotaoff so take the quotaoff lock.
405 * We don't hold an inode lock, so there's nothing else to stop
406 * a quotaoff from happening.
408 mutex_lock(&q
->qi_quotaofflock
);
411 * Get the dquot (locked) before we start, as we need to do a
412 * transaction to allocate it if it doesn't exist. Once we have the
413 * dquot, unlock it so we can start the next transaction safely. We hold
414 * a reference to the dquot, so it's safe to do this unlock/lock without
415 * it being reclaimed in the mean time.
417 error
= xfs_qm_dqget(mp
, id
, type
, true, &dqp
);
419 ASSERT(error
!= -ENOENT
);
423 defq
= xfs_get_defquota(dqp
, q
);
426 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_qm_setqlim
, 0, 0, 0, &tp
);
431 xfs_trans_dqjoin(tp
, dqp
);
435 * Make sure that hardlimits are >= soft limits before changing.
437 hard
= (newlim
->d_fieldmask
& QC_SPC_HARD
) ?
438 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_spc_hardlimit
) :
439 be64_to_cpu(ddq
->d_blk_hardlimit
);
440 soft
= (newlim
->d_fieldmask
& QC_SPC_SOFT
) ?
441 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_spc_softlimit
) :
442 be64_to_cpu(ddq
->d_blk_softlimit
);
443 if (hard
== 0 || hard
>= soft
) {
444 ddq
->d_blk_hardlimit
= cpu_to_be64(hard
);
445 ddq
->d_blk_softlimit
= cpu_to_be64(soft
);
446 xfs_dquot_set_prealloc_limits(dqp
);
448 defq
->bhardlimit
= hard
;
449 defq
->bsoftlimit
= soft
;
452 xfs_debug(mp
, "blkhard %Ld < blksoft %Ld", hard
, soft
);
454 hard
= (newlim
->d_fieldmask
& QC_RT_SPC_HARD
) ?
455 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_rt_spc_hardlimit
) :
456 be64_to_cpu(ddq
->d_rtb_hardlimit
);
457 soft
= (newlim
->d_fieldmask
& QC_RT_SPC_SOFT
) ?
458 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_rt_spc_softlimit
) :
459 be64_to_cpu(ddq
->d_rtb_softlimit
);
460 if (hard
== 0 || hard
>= soft
) {
461 ddq
->d_rtb_hardlimit
= cpu_to_be64(hard
);
462 ddq
->d_rtb_softlimit
= cpu_to_be64(soft
);
464 defq
->rtbhardlimit
= hard
;
465 defq
->rtbsoftlimit
= soft
;
468 xfs_debug(mp
, "rtbhard %Ld < rtbsoft %Ld", hard
, soft
);
471 hard
= (newlim
->d_fieldmask
& QC_INO_HARD
) ?
472 (xfs_qcnt_t
) newlim
->d_ino_hardlimit
:
473 be64_to_cpu(ddq
->d_ino_hardlimit
);
474 soft
= (newlim
->d_fieldmask
& QC_INO_SOFT
) ?
475 (xfs_qcnt_t
) newlim
->d_ino_softlimit
:
476 be64_to_cpu(ddq
->d_ino_softlimit
);
477 if (hard
== 0 || hard
>= soft
) {
478 ddq
->d_ino_hardlimit
= cpu_to_be64(hard
);
479 ddq
->d_ino_softlimit
= cpu_to_be64(soft
);
481 defq
->ihardlimit
= hard
;
482 defq
->isoftlimit
= soft
;
485 xfs_debug(mp
, "ihard %Ld < isoft %Ld", hard
, soft
);
489 * Update warnings counter(s) if requested
491 if (newlim
->d_fieldmask
& QC_SPC_WARNS
)
492 ddq
->d_bwarns
= cpu_to_be16(newlim
->d_spc_warns
);
493 if (newlim
->d_fieldmask
& QC_INO_WARNS
)
494 ddq
->d_iwarns
= cpu_to_be16(newlim
->d_ino_warns
);
495 if (newlim
->d_fieldmask
& QC_RT_SPC_WARNS
)
496 ddq
->d_rtbwarns
= cpu_to_be16(newlim
->d_rt_spc_warns
);
500 * Timelimits for the super user set the relative time
501 * the other users can be over quota for this file system.
502 * If it is zero a default is used. Ditto for the default
503 * soft and hard limit values (already done, above), and
506 if (newlim
->d_fieldmask
& QC_SPC_TIMER
) {
507 q
->qi_btimelimit
= newlim
->d_spc_timer
;
508 ddq
->d_btimer
= cpu_to_be32(newlim
->d_spc_timer
);
510 if (newlim
->d_fieldmask
& QC_INO_TIMER
) {
511 q
->qi_itimelimit
= newlim
->d_ino_timer
;
512 ddq
->d_itimer
= cpu_to_be32(newlim
->d_ino_timer
);
514 if (newlim
->d_fieldmask
& QC_RT_SPC_TIMER
) {
515 q
->qi_rtbtimelimit
= newlim
->d_rt_spc_timer
;
516 ddq
->d_rtbtimer
= cpu_to_be32(newlim
->d_rt_spc_timer
);
518 if (newlim
->d_fieldmask
& QC_SPC_WARNS
)
519 q
->qi_bwarnlimit
= newlim
->d_spc_warns
;
520 if (newlim
->d_fieldmask
& QC_INO_WARNS
)
521 q
->qi_iwarnlimit
= newlim
->d_ino_warns
;
522 if (newlim
->d_fieldmask
& QC_RT_SPC_WARNS
)
523 q
->qi_rtbwarnlimit
= newlim
->d_rt_spc_warns
;
526 * If the user is now over quota, start the timelimit.
527 * The user will not be 'warned'.
528 * Note that we keep the timers ticking, whether enforcement
529 * is on or off. We don't really want to bother with iterating
530 * over all ondisk dquots and turning the timers on/off.
532 xfs_qm_adjust_dqtimers(mp
, ddq
);
534 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
535 xfs_trans_log_dquot(tp
, dqp
);
537 error
= xfs_trans_commit(tp
);
542 mutex_unlock(&q
->qi_quotaofflock
);
547 xfs_qm_log_quotaoff_end(
549 xfs_qoff_logitem_t
*startqoff
,
554 xfs_qoff_logitem_t
*qoffi
;
556 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_qm_equotaoff
, 0, 0, 0, &tp
);
560 qoffi
= xfs_trans_get_qoff_item(tp
, startqoff
,
561 flags
& XFS_ALL_QUOTA_ACCT
);
562 xfs_trans_log_quotaoff_item(tp
, qoffi
);
565 * We have to make sure that the transaction is secure on disk before we
566 * return and actually stop quota accounting. So, make it synchronous.
567 * We don't care about quotoff's performance.
569 xfs_trans_set_sync(tp
);
570 return xfs_trans_commit(tp
);
577 xfs_qoff_logitem_t
**qoffstartp
,
582 xfs_qoff_logitem_t
*qoffi
;
586 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_qm_quotaoff
, 0, 0, 0, &tp
);
590 qoffi
= xfs_trans_get_qoff_item(tp
, NULL
, flags
& XFS_ALL_QUOTA_ACCT
);
591 xfs_trans_log_quotaoff_item(tp
, qoffi
);
593 spin_lock(&mp
->m_sb_lock
);
594 mp
->m_sb
.sb_qflags
= (mp
->m_qflags
& ~(flags
)) & XFS_MOUNT_QUOTA_ALL
;
595 spin_unlock(&mp
->m_sb_lock
);
600 * We have to make sure that the transaction is secure on disk before we
601 * return and actually stop quota accounting. So, make it synchronous.
602 * We don't care about quotoff's performance.
604 xfs_trans_set_sync(tp
);
605 error
= xfs_trans_commit(tp
);
614 /* Fill out the quota context. */
616 xfs_qm_scall_getquota_fill_qc(
617 struct xfs_mount
*mp
,
619 const struct xfs_dquot
*dqp
,
620 struct qc_dqblk
*dst
)
622 memset(dst
, 0, sizeof(*dst
));
623 dst
->d_spc_hardlimit
=
624 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
));
625 dst
->d_spc_softlimit
=
626 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_blk_softlimit
));
627 dst
->d_ino_hardlimit
= be64_to_cpu(dqp
->q_core
.d_ino_hardlimit
);
628 dst
->d_ino_softlimit
= be64_to_cpu(dqp
->q_core
.d_ino_softlimit
);
629 dst
->d_space
= XFS_FSB_TO_B(mp
, dqp
->q_res_bcount
);
630 dst
->d_ino_count
= dqp
->q_res_icount
;
631 dst
->d_spc_timer
= be32_to_cpu(dqp
->q_core
.d_btimer
);
632 dst
->d_ino_timer
= be32_to_cpu(dqp
->q_core
.d_itimer
);
633 dst
->d_ino_warns
= be16_to_cpu(dqp
->q_core
.d_iwarns
);
634 dst
->d_spc_warns
= be16_to_cpu(dqp
->q_core
.d_bwarns
);
635 dst
->d_rt_spc_hardlimit
=
636 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_rtb_hardlimit
));
637 dst
->d_rt_spc_softlimit
=
638 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_rtb_softlimit
));
639 dst
->d_rt_space
= XFS_FSB_TO_B(mp
, dqp
->q_res_rtbcount
);
640 dst
->d_rt_spc_timer
= be32_to_cpu(dqp
->q_core
.d_rtbtimer
);
641 dst
->d_rt_spc_warns
= be16_to_cpu(dqp
->q_core
.d_rtbwarns
);
644 * Internally, we don't reset all the timers when quota enforcement
645 * gets turned off. No need to confuse the user level code,
646 * so return zeroes in that case.
648 if ((!XFS_IS_UQUOTA_ENFORCED(mp
) &&
649 dqp
->q_core
.d_flags
== XFS_DQ_USER
) ||
650 (!XFS_IS_GQUOTA_ENFORCED(mp
) &&
651 dqp
->q_core
.d_flags
== XFS_DQ_GROUP
) ||
652 (!XFS_IS_PQUOTA_ENFORCED(mp
) &&
653 dqp
->q_core
.d_flags
== XFS_DQ_PROJ
)) {
654 dst
->d_spc_timer
= 0;
655 dst
->d_ino_timer
= 0;
656 dst
->d_rt_spc_timer
= 0;
660 if (((XFS_IS_UQUOTA_ENFORCED(mp
) && type
== XFS_DQ_USER
) ||
661 (XFS_IS_GQUOTA_ENFORCED(mp
) && type
== XFS_DQ_GROUP
) ||
662 (XFS_IS_PQUOTA_ENFORCED(mp
) && type
== XFS_DQ_PROJ
)) &&
663 dqp
->q_core
.d_id
!= 0) {
664 if ((dst
->d_space
> dst
->d_spc_softlimit
) &&
665 (dst
->d_spc_softlimit
> 0)) {
666 ASSERT(dst
->d_spc_timer
!= 0);
668 if ((dst
->d_ino_count
> dst
->d_ino_softlimit
) &&
669 (dst
->d_ino_softlimit
> 0)) {
670 ASSERT(dst
->d_ino_timer
!= 0);
676 /* Return the quota information for the dquot matching id. */
678 xfs_qm_scall_getquota(
679 struct xfs_mount
*mp
,
682 struct qc_dqblk
*dst
)
684 struct xfs_dquot
*dqp
;
688 * Try to get the dquot. We don't want it allocated on disk, so don't
689 * set doalloc. If it doesn't exist, we'll get ENOENT back.
691 error
= xfs_qm_dqget(mp
, id
, type
, false, &dqp
);
696 * If everything's NULL, this dquot doesn't quite exist as far as
697 * our utility programs are concerned.
699 if (XFS_IS_DQUOT_UNINITIALIZED(dqp
)) {
704 xfs_qm_scall_getquota_fill_qc(mp
, type
, dqp
, dst
);
712 * Return the quota information for the first initialized dquot whose id
713 * is at least as high as id.
716 xfs_qm_scall_getquota_next(
717 struct xfs_mount
*mp
,
720 struct qc_dqblk
*dst
)
722 struct xfs_dquot
*dqp
;
725 error
= xfs_qm_dqget_next(mp
, *id
, type
, &dqp
);
729 /* Fill in the ID we actually read from disk */
730 *id
= be32_to_cpu(dqp
->q_core
.d_id
);
732 xfs_qm_scall_getquota_fill_qc(mp
, type
, dqp
, dst
);
740 struct xfs_inode
*ip
,
744 /* skip quota inodes */
745 if (ip
== ip
->i_mount
->m_quotainfo
->qi_uquotaip
||
746 ip
== ip
->i_mount
->m_quotainfo
->qi_gquotaip
||
747 ip
== ip
->i_mount
->m_quotainfo
->qi_pquotaip
) {
748 ASSERT(ip
->i_udquot
== NULL
);
749 ASSERT(ip
->i_gdquot
== NULL
);
750 ASSERT(ip
->i_pdquot
== NULL
);
754 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
755 if ((flags
& XFS_UQUOTA_ACCT
) && ip
->i_udquot
) {
756 xfs_qm_dqrele(ip
->i_udquot
);
759 if ((flags
& XFS_GQUOTA_ACCT
) && ip
->i_gdquot
) {
760 xfs_qm_dqrele(ip
->i_gdquot
);
763 if ((flags
& XFS_PQUOTA_ACCT
) && ip
->i_pdquot
) {
764 xfs_qm_dqrele(ip
->i_pdquot
);
767 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
773 * Go thru all the inodes in the file system, releasing their dquots.
775 * Note that the mount structure gets modified to indicate that quotas are off
776 * AFTER this, in the case of quotaoff.
779 xfs_qm_dqrele_all_inodes(
780 struct xfs_mount
*mp
,
783 ASSERT(mp
->m_quotainfo
);
784 xfs_inode_ag_iterator_flags(mp
, xfs_dqrele_inode
, flags
, NULL
,
785 XFS_AGITER_INEW_WAIT
);