2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/capability.h>
23 #include "xfs_shared.h"
24 #include "xfs_format.h"
25 #include "xfs_log_format.h"
26 #include "xfs_trans_resv.h"
29 #include "xfs_mount.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_error.h"
33 #include "xfs_quota.h"
35 #include "xfs_trace.h"
36 #include "xfs_icache.h"
38 STATIC
int xfs_qm_log_quotaoff(xfs_mount_t
*, xfs_qoff_logitem_t
**, uint
);
39 STATIC
int xfs_qm_log_quotaoff_end(xfs_mount_t
*, xfs_qoff_logitem_t
*,
43 * Turn off quota accounting and/or enforcement for all udquots and/or
44 * gdquots. Called only at unmount time.
46 * This assumes that there are no dquots of this file system cached
47 * incore, and modifies the ondisk dquot directly. Therefore, for example,
48 * it is an error to call this twice, without purging the cache.
51 xfs_qm_scall_quotaoff(
55 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
58 uint inactivate_flags
;
59 xfs_qoff_logitem_t
*qoffstart
;
62 * No file system can have quotas enabled on disk but not in core.
63 * Note that quota utilities (like quotaoff) _expect_
64 * errno == -EEXIST here.
66 if ((mp
->m_qflags
& flags
) == 0)
70 flags
&= (XFS_ALL_QUOTA_ACCT
| XFS_ALL_QUOTA_ENFD
);
73 * We don't want to deal with two quotaoffs messing up each other,
74 * so we're going to serialize it. quotaoff isn't exactly a performance
76 * If quotaoff, then we must be dealing with the root filesystem.
79 mutex_lock(&q
->qi_quotaofflock
);
82 * If we're just turning off quota enforcement, change mp and go.
84 if ((flags
& XFS_ALL_QUOTA_ACCT
) == 0) {
85 mp
->m_qflags
&= ~(flags
);
87 spin_lock(&mp
->m_sb_lock
);
88 mp
->m_sb
.sb_qflags
= mp
->m_qflags
;
89 spin_unlock(&mp
->m_sb_lock
);
90 mutex_unlock(&q
->qi_quotaofflock
);
92 /* XXX what to do if error ? Revert back to old vals incore ? */
93 return xfs_sync_sb(mp
, false);
99 * If accounting is off, we must turn enforcement off, clear the
100 * quota 'CHKD' certificate to make it known that we have to
101 * do a quotacheck the next time this quota is turned on.
103 if (flags
& XFS_UQUOTA_ACCT
) {
104 dqtype
|= XFS_QMOPT_UQUOTA
;
105 flags
|= (XFS_UQUOTA_CHKD
| XFS_UQUOTA_ENFD
);
106 inactivate_flags
|= XFS_UQUOTA_ACTIVE
;
108 if (flags
& XFS_GQUOTA_ACCT
) {
109 dqtype
|= XFS_QMOPT_GQUOTA
;
110 flags
|= (XFS_GQUOTA_CHKD
| XFS_GQUOTA_ENFD
);
111 inactivate_flags
|= XFS_GQUOTA_ACTIVE
;
113 if (flags
& XFS_PQUOTA_ACCT
) {
114 dqtype
|= XFS_QMOPT_PQUOTA
;
115 flags
|= (XFS_PQUOTA_CHKD
| XFS_PQUOTA_ENFD
);
116 inactivate_flags
|= XFS_PQUOTA_ACTIVE
;
120 * Nothing to do? Don't complain. This happens when we're just
121 * turning off quota enforcement.
123 if ((mp
->m_qflags
& flags
) == 0)
127 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
128 * and synchronously. If we fail to write, we should abort the
129 * operation as it cannot be recovered safely if we crash.
131 error
= xfs_qm_log_quotaoff(mp
, &qoffstart
, flags
);
136 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
137 * to take care of the race between dqget and quotaoff. We don't take
138 * any special locks to reset these bits. All processes need to check
139 * these bits *after* taking inode lock(s) to see if the particular
140 * quota type is in the process of being turned off. If *ACTIVE, it is
141 * guaranteed that all dquot structures and all quotainode ptrs will all
142 * stay valid as long as that inode is kept locked.
144 * There is no turning back after this.
146 mp
->m_qflags
&= ~inactivate_flags
;
149 * Give back all the dquot reference(s) held by inodes.
150 * Here we go thru every single incore inode in this file system, and
151 * do a dqrele on the i_udquot/i_gdquot that it may have.
152 * Essentially, as long as somebody has an inode locked, this guarantees
153 * that quotas will not be turned off. This is handy because in a
154 * transaction once we lock the inode(s) and check for quotaon, we can
155 * depend on the quota inodes (and other things) being valid as long as
156 * we keep the lock(s).
158 xfs_qm_dqrele_all_inodes(mp
, flags
);
161 * Next we make the changes in the quota flag in the mount struct.
162 * This isn't protected by a particular lock directly, because we
163 * don't want to take a mrlock every time we depend on quotas being on.
165 mp
->m_qflags
&= ~flags
;
168 * Go through all the dquots of this file system and purge them,
169 * according to what was turned off.
171 xfs_qm_dqpurge_all(mp
, dqtype
);
174 * Transactions that had started before ACTIVE state bit was cleared
175 * could have logged many dquots, so they'd have higher LSNs than
176 * the first QUOTAOFF log record does. If we happen to crash when
177 * the tail of the log has gone past the QUOTAOFF record, but
178 * before the last dquot modification, those dquots __will__
179 * recover, and that's not good.
181 * So, we have QUOTAOFF start and end logitems; the start
182 * logitem won't get overwritten until the end logitem appears...
184 error
= xfs_qm_log_quotaoff_end(mp
, qoffstart
, flags
);
186 /* We're screwed now. Shutdown is the only option. */
187 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
192 * If all quotas are completely turned off, close shop.
194 if (mp
->m_qflags
== 0) {
195 mutex_unlock(&q
->qi_quotaofflock
);
196 xfs_qm_destroy_quotainfo(mp
);
201 * Release our quotainode references if we don't need them anymore.
203 if ((dqtype
& XFS_QMOPT_UQUOTA
) && q
->qi_uquotaip
) {
204 IRELE(q
->qi_uquotaip
);
205 q
->qi_uquotaip
= NULL
;
207 if ((dqtype
& XFS_QMOPT_GQUOTA
) && q
->qi_gquotaip
) {
208 IRELE(q
->qi_gquotaip
);
209 q
->qi_gquotaip
= NULL
;
211 if ((dqtype
& XFS_QMOPT_PQUOTA
) && q
->qi_pquotaip
) {
212 IRELE(q
->qi_pquotaip
);
213 q
->qi_pquotaip
= NULL
;
217 mutex_unlock(&q
->qi_quotaofflock
);
222 xfs_qm_scall_trunc_qfile(
223 struct xfs_mount
*mp
,
226 struct xfs_inode
*ip
;
227 struct xfs_trans
*tp
;
230 if (ino
== NULLFSINO
)
233 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, &ip
);
237 xfs_ilock(ip
, XFS_IOLOCK_EXCL
);
239 tp
= xfs_trans_alloc(mp
, XFS_TRANS_TRUNCATE_FILE
);
240 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
242 xfs_trans_cancel(tp
);
243 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
247 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
248 xfs_trans_ijoin(tp
, ip
, 0);
251 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
253 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
, 0);
255 xfs_trans_cancel(tp
);
259 ASSERT(ip
->i_d
.di_nextents
== 0);
261 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
262 error
= xfs_trans_commit(tp
);
265 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
272 xfs_qm_scall_trunc_qfiles(
278 if (!xfs_sb_version_hasquota(&mp
->m_sb
) || flags
== 0 ||
279 (flags
& ~XFS_DQ_ALLTYPES
)) {
280 xfs_debug(mp
, "%s: flags=%x m_qflags=%x",
281 __func__
, flags
, mp
->m_qflags
);
285 if (flags
& XFS_DQ_USER
) {
286 error
= xfs_qm_scall_trunc_qfile(mp
, mp
->m_sb
.sb_uquotino
);
290 if (flags
& XFS_DQ_GROUP
) {
291 error
= xfs_qm_scall_trunc_qfile(mp
, mp
->m_sb
.sb_gquotino
);
295 if (flags
& XFS_DQ_PROJ
)
296 error
= xfs_qm_scall_trunc_qfile(mp
, mp
->m_sb
.sb_pquotino
);
302 * Switch on (a given) quota enforcement for a filesystem. This takes
303 * effect immediately.
304 * (Switching on quota accounting must be done at mount time.)
307 xfs_qm_scall_quotaon(
314 flags
&= (XFS_ALL_QUOTA_ACCT
| XFS_ALL_QUOTA_ENFD
);
316 * Switching on quota accounting must be done at mount time.
318 flags
&= ~(XFS_ALL_QUOTA_ACCT
);
321 xfs_debug(mp
, "%s: zero flags, m_qflags=%x",
322 __func__
, mp
->m_qflags
);
327 * Can't enforce without accounting. We check the superblock
328 * qflags here instead of m_qflags because rootfs can have
329 * quota acct on ondisk without m_qflags' knowing.
331 if (((mp
->m_sb
.sb_qflags
& XFS_UQUOTA_ACCT
) == 0 &&
332 (flags
& XFS_UQUOTA_ENFD
)) ||
333 ((mp
->m_sb
.sb_qflags
& XFS_GQUOTA_ACCT
) == 0 &&
334 (flags
& XFS_GQUOTA_ENFD
)) ||
335 ((mp
->m_sb
.sb_qflags
& XFS_PQUOTA_ACCT
) == 0 &&
336 (flags
& XFS_PQUOTA_ENFD
))) {
338 "%s: Can't enforce without acct, flags=%x sbflags=%x",
339 __func__
, flags
, mp
->m_sb
.sb_qflags
);
343 * If everything's up to-date incore, then don't waste time.
345 if ((mp
->m_qflags
& flags
) == flags
)
349 * Change sb_qflags on disk but not incore mp->qflags
350 * if this is the root filesystem.
352 spin_lock(&mp
->m_sb_lock
);
353 qf
= mp
->m_sb
.sb_qflags
;
354 mp
->m_sb
.sb_qflags
= qf
| flags
;
355 spin_unlock(&mp
->m_sb_lock
);
358 * There's nothing to change if it's the same.
360 if ((qf
& flags
) == flags
)
363 error
= xfs_sync_sb(mp
, false);
367 * If we aren't trying to switch on quota enforcement, we are done.
369 if (((mp
->m_sb
.sb_qflags
& XFS_UQUOTA_ACCT
) !=
370 (mp
->m_qflags
& XFS_UQUOTA_ACCT
)) ||
371 ((mp
->m_sb
.sb_qflags
& XFS_PQUOTA_ACCT
) !=
372 (mp
->m_qflags
& XFS_PQUOTA_ACCT
)) ||
373 ((mp
->m_sb
.sb_qflags
& XFS_GQUOTA_ACCT
) !=
374 (mp
->m_qflags
& XFS_GQUOTA_ACCT
)))
377 if (! XFS_IS_QUOTA_RUNNING(mp
))
381 * Switch on quota enforcement in core.
383 mutex_lock(&mp
->m_quotainfo
->qi_quotaofflock
);
384 mp
->m_qflags
|= (flags
& XFS_ALL_QUOTA_ENFD
);
385 mutex_unlock(&mp
->m_quotainfo
->qi_quotaofflock
);
390 #define XFS_QC_MASK \
391 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
394 * Adjust quota limits, and start/stop timers accordingly.
397 xfs_qm_scall_setqlim(
398 struct xfs_mount
*mp
,
401 struct qc_dqblk
*newlim
)
403 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
404 struct xfs_disk_dquot
*ddq
;
405 struct xfs_dquot
*dqp
;
406 struct xfs_trans
*tp
;
408 xfs_qcnt_t hard
, soft
;
410 if (newlim
->d_fieldmask
& ~XFS_QC_MASK
)
412 if ((newlim
->d_fieldmask
& XFS_QC_MASK
) == 0)
416 * We don't want to race with a quotaoff so take the quotaoff lock.
417 * We don't hold an inode lock, so there's nothing else to stop
418 * a quotaoff from happening.
420 mutex_lock(&q
->qi_quotaofflock
);
423 * Get the dquot (locked) before we start, as we need to do a
424 * transaction to allocate it if it doesn't exist. Once we have the
425 * dquot, unlock it so we can start the next transaction safely. We hold
426 * a reference to the dquot, so it's safe to do this unlock/lock without
427 * it being reclaimed in the mean time.
429 error
= xfs_qm_dqget(mp
, NULL
, id
, type
, XFS_QMOPT_DQALLOC
, &dqp
);
431 ASSERT(error
!= -ENOENT
);
436 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SETQLIM
);
437 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_qm_setqlim
, 0, 0);
439 xfs_trans_cancel(tp
);
444 xfs_trans_dqjoin(tp
, dqp
);
448 * Make sure that hardlimits are >= soft limits before changing.
450 hard
= (newlim
->d_fieldmask
& QC_SPC_HARD
) ?
451 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_spc_hardlimit
) :
452 be64_to_cpu(ddq
->d_blk_hardlimit
);
453 soft
= (newlim
->d_fieldmask
& QC_SPC_SOFT
) ?
454 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_spc_softlimit
) :
455 be64_to_cpu(ddq
->d_blk_softlimit
);
456 if (hard
== 0 || hard
>= soft
) {
457 ddq
->d_blk_hardlimit
= cpu_to_be64(hard
);
458 ddq
->d_blk_softlimit
= cpu_to_be64(soft
);
459 xfs_dquot_set_prealloc_limits(dqp
);
461 q
->qi_bhardlimit
= hard
;
462 q
->qi_bsoftlimit
= soft
;
465 xfs_debug(mp
, "blkhard %Ld < blksoft %Ld", hard
, soft
);
467 hard
= (newlim
->d_fieldmask
& QC_RT_SPC_HARD
) ?
468 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_rt_spc_hardlimit
) :
469 be64_to_cpu(ddq
->d_rtb_hardlimit
);
470 soft
= (newlim
->d_fieldmask
& QC_RT_SPC_SOFT
) ?
471 (xfs_qcnt_t
) XFS_B_TO_FSB(mp
, newlim
->d_rt_spc_softlimit
) :
472 be64_to_cpu(ddq
->d_rtb_softlimit
);
473 if (hard
== 0 || hard
>= soft
) {
474 ddq
->d_rtb_hardlimit
= cpu_to_be64(hard
);
475 ddq
->d_rtb_softlimit
= cpu_to_be64(soft
);
477 q
->qi_rtbhardlimit
= hard
;
478 q
->qi_rtbsoftlimit
= soft
;
481 xfs_debug(mp
, "rtbhard %Ld < rtbsoft %Ld", hard
, soft
);
484 hard
= (newlim
->d_fieldmask
& QC_INO_HARD
) ?
485 (xfs_qcnt_t
) newlim
->d_ino_hardlimit
:
486 be64_to_cpu(ddq
->d_ino_hardlimit
);
487 soft
= (newlim
->d_fieldmask
& QC_INO_SOFT
) ?
488 (xfs_qcnt_t
) newlim
->d_ino_softlimit
:
489 be64_to_cpu(ddq
->d_ino_softlimit
);
490 if (hard
== 0 || hard
>= soft
) {
491 ddq
->d_ino_hardlimit
= cpu_to_be64(hard
);
492 ddq
->d_ino_softlimit
= cpu_to_be64(soft
);
494 q
->qi_ihardlimit
= hard
;
495 q
->qi_isoftlimit
= soft
;
498 xfs_debug(mp
, "ihard %Ld < isoft %Ld", hard
, soft
);
502 * Update warnings counter(s) if requested
504 if (newlim
->d_fieldmask
& QC_SPC_WARNS
)
505 ddq
->d_bwarns
= cpu_to_be16(newlim
->d_spc_warns
);
506 if (newlim
->d_fieldmask
& QC_INO_WARNS
)
507 ddq
->d_iwarns
= cpu_to_be16(newlim
->d_ino_warns
);
508 if (newlim
->d_fieldmask
& QC_RT_SPC_WARNS
)
509 ddq
->d_rtbwarns
= cpu_to_be16(newlim
->d_rt_spc_warns
);
513 * Timelimits for the super user set the relative time
514 * the other users can be over quota for this file system.
515 * If it is zero a default is used. Ditto for the default
516 * soft and hard limit values (already done, above), and
519 if (newlim
->d_fieldmask
& QC_SPC_TIMER
) {
520 q
->qi_btimelimit
= newlim
->d_spc_timer
;
521 ddq
->d_btimer
= cpu_to_be32(newlim
->d_spc_timer
);
523 if (newlim
->d_fieldmask
& QC_INO_TIMER
) {
524 q
->qi_itimelimit
= newlim
->d_ino_timer
;
525 ddq
->d_itimer
= cpu_to_be32(newlim
->d_ino_timer
);
527 if (newlim
->d_fieldmask
& QC_RT_SPC_TIMER
) {
528 q
->qi_rtbtimelimit
= newlim
->d_rt_spc_timer
;
529 ddq
->d_rtbtimer
= cpu_to_be32(newlim
->d_rt_spc_timer
);
531 if (newlim
->d_fieldmask
& QC_SPC_WARNS
)
532 q
->qi_bwarnlimit
= newlim
->d_spc_warns
;
533 if (newlim
->d_fieldmask
& QC_INO_WARNS
)
534 q
->qi_iwarnlimit
= newlim
->d_ino_warns
;
535 if (newlim
->d_fieldmask
& QC_RT_SPC_WARNS
)
536 q
->qi_rtbwarnlimit
= newlim
->d_rt_spc_warns
;
539 * If the user is now over quota, start the timelimit.
540 * The user will not be 'warned'.
541 * Note that we keep the timers ticking, whether enforcement
542 * is on or off. We don't really want to bother with iterating
543 * over all ondisk dquots and turning the timers on/off.
545 xfs_qm_adjust_dqtimers(mp
, ddq
);
547 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
548 xfs_trans_log_dquot(tp
, dqp
);
550 error
= xfs_trans_commit(tp
);
555 mutex_unlock(&q
->qi_quotaofflock
);
560 xfs_qm_log_quotaoff_end(
562 xfs_qoff_logitem_t
*startqoff
,
567 xfs_qoff_logitem_t
*qoffi
;
569 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QUOTAOFF_END
);
571 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_qm_equotaoff
, 0, 0);
573 xfs_trans_cancel(tp
);
577 qoffi
= xfs_trans_get_qoff_item(tp
, startqoff
,
578 flags
& XFS_ALL_QUOTA_ACCT
);
579 xfs_trans_log_quotaoff_item(tp
, qoffi
);
582 * We have to make sure that the transaction is secure on disk before we
583 * return and actually stop quota accounting. So, make it synchronous.
584 * We don't care about quotoff's performance.
586 xfs_trans_set_sync(tp
);
587 return xfs_trans_commit(tp
);
594 xfs_qoff_logitem_t
**qoffstartp
,
599 xfs_qoff_logitem_t
*qoffi
;
603 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QUOTAOFF
);
604 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_qm_quotaoff
, 0, 0);
606 xfs_trans_cancel(tp
);
610 qoffi
= xfs_trans_get_qoff_item(tp
, NULL
, flags
& XFS_ALL_QUOTA_ACCT
);
611 xfs_trans_log_quotaoff_item(tp
, qoffi
);
613 spin_lock(&mp
->m_sb_lock
);
614 mp
->m_sb
.sb_qflags
= (mp
->m_qflags
& ~(flags
)) & XFS_MOUNT_QUOTA_ALL
;
615 spin_unlock(&mp
->m_sb_lock
);
620 * We have to make sure that the transaction is secure on disk before we
621 * return and actually stop quota accounting. So, make it synchronous.
622 * We don't care about quotoff's performance.
624 xfs_trans_set_sync(tp
);
625 error
= xfs_trans_commit(tp
);
636 xfs_qm_scall_getquota(
637 struct xfs_mount
*mp
,
640 struct qc_dqblk
*dst
)
642 struct xfs_dquot
*dqp
;
646 * Try to get the dquot. We don't want it allocated on disk, so
647 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
648 * exist, we'll get ENOENT back.
650 error
= xfs_qm_dqget(mp
, NULL
, id
, type
, 0, &dqp
);
655 * If everything's NULL, this dquot doesn't quite exist as far as
656 * our utility programs are concerned.
658 if (XFS_IS_DQUOT_UNINITIALIZED(dqp
)) {
663 memset(dst
, 0, sizeof(*dst
));
664 dst
->d_spc_hardlimit
=
665 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
));
666 dst
->d_spc_softlimit
=
667 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_blk_softlimit
));
668 dst
->d_ino_hardlimit
= be64_to_cpu(dqp
->q_core
.d_ino_hardlimit
);
669 dst
->d_ino_softlimit
= be64_to_cpu(dqp
->q_core
.d_ino_softlimit
);
670 dst
->d_space
= XFS_FSB_TO_B(mp
, dqp
->q_res_bcount
);
671 dst
->d_ino_count
= dqp
->q_res_icount
;
672 dst
->d_spc_timer
= be32_to_cpu(dqp
->q_core
.d_btimer
);
673 dst
->d_ino_timer
= be32_to_cpu(dqp
->q_core
.d_itimer
);
674 dst
->d_ino_warns
= be16_to_cpu(dqp
->q_core
.d_iwarns
);
675 dst
->d_spc_warns
= be16_to_cpu(dqp
->q_core
.d_bwarns
);
676 dst
->d_rt_spc_hardlimit
=
677 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_rtb_hardlimit
));
678 dst
->d_rt_spc_softlimit
=
679 XFS_FSB_TO_B(mp
, be64_to_cpu(dqp
->q_core
.d_rtb_softlimit
));
680 dst
->d_rt_space
= XFS_FSB_TO_B(mp
, dqp
->q_res_rtbcount
);
681 dst
->d_rt_spc_timer
= be32_to_cpu(dqp
->q_core
.d_rtbtimer
);
682 dst
->d_rt_spc_warns
= be16_to_cpu(dqp
->q_core
.d_rtbwarns
);
685 * Internally, we don't reset all the timers when quota enforcement
686 * gets turned off. No need to confuse the user level code,
687 * so return zeroes in that case.
689 if ((!XFS_IS_UQUOTA_ENFORCED(mp
) &&
690 dqp
->q_core
.d_flags
== XFS_DQ_USER
) ||
691 (!XFS_IS_GQUOTA_ENFORCED(mp
) &&
692 dqp
->q_core
.d_flags
== XFS_DQ_GROUP
) ||
693 (!XFS_IS_PQUOTA_ENFORCED(mp
) &&
694 dqp
->q_core
.d_flags
== XFS_DQ_PROJ
)) {
695 dst
->d_spc_timer
= 0;
696 dst
->d_ino_timer
= 0;
697 dst
->d_rt_spc_timer
= 0;
701 if (((XFS_IS_UQUOTA_ENFORCED(mp
) && type
== XFS_DQ_USER
) ||
702 (XFS_IS_GQUOTA_ENFORCED(mp
) && type
== XFS_DQ_GROUP
) ||
703 (XFS_IS_PQUOTA_ENFORCED(mp
) && type
== XFS_DQ_PROJ
)) &&
705 if ((dst
->d_space
> dst
->d_spc_softlimit
) &&
706 (dst
->d_spc_softlimit
> 0)) {
707 ASSERT(dst
->d_spc_timer
!= 0);
709 if ((dst
->d_ino_count
> dst
->d_ino_softlimit
) &&
710 (dst
->d_ino_softlimit
> 0)) {
711 ASSERT(dst
->d_ino_timer
!= 0);
723 struct xfs_inode
*ip
,
727 /* skip quota inodes */
728 if (ip
== ip
->i_mount
->m_quotainfo
->qi_uquotaip
||
729 ip
== ip
->i_mount
->m_quotainfo
->qi_gquotaip
||
730 ip
== ip
->i_mount
->m_quotainfo
->qi_pquotaip
) {
731 ASSERT(ip
->i_udquot
== NULL
);
732 ASSERT(ip
->i_gdquot
== NULL
);
733 ASSERT(ip
->i_pdquot
== NULL
);
737 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
738 if ((flags
& XFS_UQUOTA_ACCT
) && ip
->i_udquot
) {
739 xfs_qm_dqrele(ip
->i_udquot
);
742 if ((flags
& XFS_GQUOTA_ACCT
) && ip
->i_gdquot
) {
743 xfs_qm_dqrele(ip
->i_gdquot
);
746 if ((flags
& XFS_PQUOTA_ACCT
) && ip
->i_pdquot
) {
747 xfs_qm_dqrele(ip
->i_pdquot
);
750 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
756 * Go thru all the inodes in the file system, releasing their dquots.
758 * Note that the mount structure gets modified to indicate that quotas are off
759 * AFTER this, in the case of quotaoff.
762 xfs_qm_dqrele_all_inodes(
763 struct xfs_mount
*mp
,
766 ASSERT(mp
->m_quotainfo
);
767 xfs_inode_ag_iterator(mp
, xfs_dqrele_inode
, flags
, NULL
);