2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_itable.h"
42 #include "xfs_rtalloc.h"
43 #include "xfs_error.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_trans_space.h"
50 #include "xfs_utils.h"
54 * The global quota manager. There is only one of these for the entire
55 * system, _not_ one per file system. XQM keeps track of the overall
56 * quota functionality, including maintaining the freelist and hash
60 struct xfs_qm
*xfs_Gqm
;
63 kmem_zone_t
*qm_dqzone
;
64 kmem_zone_t
*qm_dqtrxzone
;
66 static cred_t xfs_zerocr
;
68 STATIC
void xfs_qm_list_init(xfs_dqlist_t
*, char *, int);
69 STATIC
void xfs_qm_list_destroy(xfs_dqlist_t
*);
71 STATIC
void xfs_qm_freelist_init(xfs_frlist_t
*);
72 STATIC
void xfs_qm_freelist_destroy(xfs_frlist_t
*);
73 STATIC
int xfs_qm_mplist_nowait(xfs_mount_t
*);
74 STATIC
int xfs_qm_dqhashlock_nowait(xfs_dquot_t
*);
76 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
77 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
78 STATIC
int xfs_qm_shake(int, gfp_t
);
80 static struct shrinker xfs_qm_shaker
= {
81 .shrink
= xfs_qm_shake
,
82 .seeks
= DEFAULT_SEEKS
,
86 extern mutex_t qcheck_lock
;
90 #define XQM_LIST_PRINT(l, NXT, title) \
92 xfs_dquot_t *dqp; int i = 0; \
93 cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
94 for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
95 cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
96 "bcnt = %d, icnt = %d, refs = %d", \
97 ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
98 DQFLAGTO_TYPESTR(dqp), \
99 (int) be64_to_cpu(dqp->q_core.d_bcount), \
100 (int) be64_to_cpu(dqp->q_core.d_icount), \
101 (int) dqp->q_nrefs); } \
104 #define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
108 * Initialize the XQM structure.
109 * Note that there is not one quota manager per file system.
111 STATIC
struct xfs_qm
*
114 xfs_dqhash_t
*udqhash
, *gdqhash
;
120 * Initialize the dquot hash tables.
122 udqhash
= kmem_zalloc_greedy(&hsize
,
123 XFS_QM_HASHSIZE_LOW
* sizeof(xfs_dqhash_t
),
124 XFS_QM_HASHSIZE_HIGH
* sizeof(xfs_dqhash_t
),
125 KM_SLEEP
| KM_MAYFAIL
| KM_LARGE
);
126 gdqhash
= kmem_zalloc(hsize
, KM_SLEEP
| KM_LARGE
);
127 hsize
/= sizeof(xfs_dqhash_t
);
130 xqm
= kmem_zalloc(sizeof(xfs_qm_t
), KM_SLEEP
);
131 xqm
->qm_dqhashmask
= hsize
- 1;
132 xqm
->qm_usr_dqhtable
= udqhash
;
133 xqm
->qm_grp_dqhtable
= gdqhash
;
134 ASSERT(xqm
->qm_usr_dqhtable
!= NULL
);
135 ASSERT(xqm
->qm_grp_dqhtable
!= NULL
);
137 for (i
= 0; i
< hsize
; i
++) {
138 xfs_qm_list_init(&(xqm
->qm_usr_dqhtable
[i
]), "uxdqh", i
);
139 xfs_qm_list_init(&(xqm
->qm_grp_dqhtable
[i
]), "gxdqh", i
);
143 * Freelist of all dquots of all file systems
145 xfs_qm_freelist_init(&(xqm
->qm_dqfreelist
));
148 * dquot zone. we register our own low-memory callback.
151 xqm
->qm_dqzone
= kmem_zone_init(sizeof(xfs_dquot_t
),
153 qm_dqzone
= xqm
->qm_dqzone
;
155 xqm
->qm_dqzone
= qm_dqzone
;
157 register_shrinker(&xfs_qm_shaker
);
160 * The t_dqinfo portion of transactions.
163 xqm
->qm_dqtrxzone
= kmem_zone_init(sizeof(xfs_dquot_acct_t
),
165 qm_dqtrxzone
= xqm
->qm_dqtrxzone
;
167 xqm
->qm_dqtrxzone
= qm_dqtrxzone
;
169 atomic_set(&xqm
->qm_totaldquots
, 0);
170 xqm
->qm_dqfree_ratio
= XFS_QM_DQFREE_RATIO
;
173 mutex_init(&qcheck_lock
);
179 * Destroy the global quota manager when its reference count goes to zero.
188 ASSERT(xqm
->qm_nrefs
== 0);
189 unregister_shrinker(&xfs_qm_shaker
);
190 hsize
= xqm
->qm_dqhashmask
+ 1;
191 for (i
= 0; i
< hsize
; i
++) {
192 xfs_qm_list_destroy(&(xqm
->qm_usr_dqhtable
[i
]));
193 xfs_qm_list_destroy(&(xqm
->qm_grp_dqhtable
[i
]));
195 kmem_free(xqm
->qm_usr_dqhtable
, hsize
* sizeof(xfs_dqhash_t
));
196 kmem_free(xqm
->qm_grp_dqhtable
, hsize
* sizeof(xfs_dqhash_t
));
197 xqm
->qm_usr_dqhtable
= NULL
;
198 xqm
->qm_grp_dqhtable
= NULL
;
199 xqm
->qm_dqhashmask
= 0;
200 xfs_qm_freelist_destroy(&(xqm
->qm_dqfreelist
));
202 mutex_destroy(&qcheck_lock
);
204 kmem_free(xqm
, sizeof(xfs_qm_t
));
208 * Called at mount time to let XQM know that another file system is
209 * starting quotas. This isn't crucial information as the individual mount
210 * structures are pretty independent, but it helps the XQM keep a
211 * global view of what's going on.
215 xfs_qm_hold_quotafs_ref(
216 struct xfs_mount
*mp
)
219 * Need to lock the xfs_Gqm structure for things like this. For example,
220 * the structure could disappear between the entry to this routine and
221 * a HOLD operation if not locked.
223 XFS_QM_LOCK(xfs_Gqm
);
226 xfs_Gqm
= xfs_Gqm_init();
228 * We can keep a list of all filesystems with quotas mounted for
229 * debugging and statistical purposes, but ...
230 * Just take a reference and get out.
232 XFS_QM_HOLD(xfs_Gqm
);
233 XFS_QM_UNLOCK(xfs_Gqm
);
240 * Release the reference that a filesystem took at mount time,
241 * so that we know when we need to destroy the entire quota manager.
245 xfs_qm_rele_quotafs_ref(
246 struct xfs_mount
*mp
)
248 xfs_dquot_t
*dqp
, *nextdqp
;
251 ASSERT(xfs_Gqm
->qm_nrefs
> 0);
254 * Go thru the freelist and destroy all inactive dquots.
256 xfs_qm_freelist_lock(xfs_Gqm
);
258 for (dqp
= xfs_Gqm
->qm_dqfreelist
.qh_next
;
259 dqp
!= (xfs_dquot_t
*)&(xfs_Gqm
->qm_dqfreelist
); ) {
261 nextdqp
= dqp
->dq_flnext
;
262 if (dqp
->dq_flags
& XFS_DQ_INACTIVE
) {
263 ASSERT(dqp
->q_mount
== NULL
);
264 ASSERT(! XFS_DQ_IS_DIRTY(dqp
));
265 ASSERT(dqp
->HL_PREVP
== NULL
);
266 ASSERT(dqp
->MPL_PREVP
== NULL
);
267 XQM_FREELIST_REMOVE(dqp
);
269 xfs_qm_dqdestroy(dqp
);
275 xfs_qm_freelist_unlock(xfs_Gqm
);
278 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
281 XFS_QM_LOCK(xfs_Gqm
);
282 XFS_QM_RELE(xfs_Gqm
);
283 if (xfs_Gqm
->qm_nrefs
== 0) {
284 xfs_qm_destroy(xfs_Gqm
);
287 XFS_QM_UNLOCK(xfs_Gqm
);
291 * Just destroy the quotainfo structure.
294 xfs_qm_unmount_quotadestroy(
298 xfs_qm_destroy_quotainfo(mp
);
303 * This is called from xfs_mountfs to start quotas and initialize all
304 * necessary data structures like quotainfo. This is also responsible for
305 * running a quotacheck as necessary. We are guaranteed that the superblock
306 * is consistently read in at this point.
318 * If quotas on realtime volumes is not supported, we disable
319 * quotas immediately.
321 if (mp
->m_sb
.sb_rextents
) {
323 "Cannot turn on quotas for realtime filesystem %s",
329 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
332 * Allocate the quotainfo structure inside the mount struct, and
333 * create quotainode(s), and change/rev superblock if necessary.
335 if ((error
= xfs_qm_init_quotainfo(mp
))) {
337 * We must turn off quotas.
339 ASSERT(mp
->m_quotainfo
== NULL
);
344 * If any of the quotas are not consistent, do a quotacheck.
346 if (XFS_QM_NEED_QUOTACHECK(mp
) &&
347 !(mfsi_flags
& XFS_MFSI_NO_QUOTACHECK
)) {
348 if ((error
= xfs_qm_quotacheck(mp
))) {
349 /* Quotacheck has failed and quotas have
352 return XFS_ERROR(error
);
356 * If one type of quotas is off, then it will lose its
357 * quotachecked status, since we won't be doing accounting for
360 if (!XFS_IS_UQUOTA_ON(mp
)) {
361 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
363 if (!(XFS_IS_GQUOTA_ON(mp
) || XFS_IS_PQUOTA_ON(mp
))) {
364 mp
->m_qflags
&= ~XFS_OQUOTA_CHKD
;
369 * We actually don't have to acquire the m_sb_lock at all.
370 * This can only be called from mount, and that's single threaded. XXX
372 spin_lock(&mp
->m_sb_lock
);
373 sbf
= mp
->m_sb
.sb_qflags
;
374 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
375 spin_unlock(&mp
->m_sb_lock
);
377 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
378 if (xfs_qm_write_sb_changes(mp
, XFS_SB_QFLAGS
)) {
380 * We could only have been turning quotas off.
381 * We aren't in very good shape actually because
382 * the incore structures are convinced that quotas are
383 * off, but the on disk superblock doesn't know that !
385 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
386 xfs_fs_cmn_err(CE_ALERT
, mp
,
387 "XFS mount_quotas: Superblock update failed!");
392 xfs_fs_cmn_err(CE_WARN
, mp
,
393 "Failed to initialize disk quotas.");
395 return XFS_ERROR(error
);
399 * Called from the vfsops layer.
402 xfs_qm_unmount_quotas(
405 xfs_inode_t
*uqp
, *gqp
;
409 * Release the dquots that root inode, et al might be holding,
410 * before we flush quotas and blow away the quotainfo structure.
412 ASSERT(mp
->m_rootip
);
413 xfs_qm_dqdetach(mp
->m_rootip
);
415 xfs_qm_dqdetach(mp
->m_rbmip
);
417 xfs_qm_dqdetach(mp
->m_rsumip
);
420 * Flush out the quota inodes.
423 if (mp
->m_quotainfo
) {
424 if ((uqp
= mp
->m_quotainfo
->qi_uquotaip
) != NULL
) {
425 xfs_ilock(uqp
, XFS_ILOCK_EXCL
);
427 error
= xfs_iflush(uqp
, XFS_IFLUSH_SYNC
);
428 xfs_iunlock(uqp
, XFS_ILOCK_EXCL
);
429 if (unlikely(error
== EFSCORRUPTED
)) {
430 XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
431 XFS_ERRLEVEL_LOW
, mp
);
435 if ((gqp
= mp
->m_quotainfo
->qi_gquotaip
) != NULL
) {
436 xfs_ilock(gqp
, XFS_ILOCK_EXCL
);
438 error
= xfs_iflush(gqp
, XFS_IFLUSH_SYNC
);
439 xfs_iunlock(gqp
, XFS_ILOCK_EXCL
);
440 if (unlikely(error
== EFSCORRUPTED
)) {
441 XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
442 XFS_ERRLEVEL_LOW
, mp
);
448 XFS_PURGE_INODE(uqp
);
449 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
452 XFS_PURGE_INODE(gqp
);
453 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
456 return XFS_ERROR(error
);
460 * Flush all dquots of the given file system to disk. The dquots are
461 * _not_ purged from memory here, just their data written to disk.
473 if (mp
->m_quotainfo
== NULL
)
477 xfs_qm_mplist_lock(mp
);
478 FOREACH_DQUOT_IN_MP(dqp
, mp
) {
480 if (! XFS_DQ_IS_DIRTY(dqp
)) {
484 xfs_dqtrace_entry(dqp
, "FLUSHALL: DQDIRTY");
485 /* XXX a sentinel would be better */
486 recl
= XFS_QI_MPLRECLAIMS(mp
);
487 if (! xfs_qm_dqflock_nowait(dqp
)) {
489 * If we can't grab the flush lock then check
490 * to see if the dquot has been flushed delayed
491 * write. If so, grab its buffer and send it
492 * out immediately. We'll be able to acquire
493 * the flush lock when the I/O completes.
495 xfs_qm_dqflock_pushbuf_wait(dqp
);
498 * Let go of the mplist lock. We don't want to hold it
499 * across a disk write.
501 xfs_qm_mplist_unlock(mp
);
502 error
= xfs_qm_dqflush(dqp
, flags
);
507 xfs_qm_mplist_lock(mp
);
508 if (recl
!= XFS_QI_MPLRECLAIMS(mp
)) {
509 xfs_qm_mplist_unlock(mp
);
510 /* XXX restart limit */
515 xfs_qm_mplist_unlock(mp
);
520 * Release the group dquot pointers the user dquots may be
521 * carrying around as a hint. mplist is locked on entry and exit.
524 xfs_qm_detach_gdquots(
527 xfs_dquot_t
*dqp
, *gdqp
;
531 ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp
));
532 dqp
= XFS_QI_MPLNEXT(mp
);
535 if ((gdqp
= dqp
->q_gdquot
)) {
537 dqp
->q_gdquot
= NULL
;
543 * Can't hold the mplist lock across a dqput.
544 * XXXmust convert to marker based iterations here.
546 nrecl
= XFS_QI_MPLRECLAIMS(mp
);
547 xfs_qm_mplist_unlock(mp
);
550 xfs_qm_mplist_lock(mp
);
551 if (nrecl
!= XFS_QI_MPLRECLAIMS(mp
))
559 * Go through all the incore dquots of this file system and take them
560 * off the mplist and hashlist, if the dquot type matches the dqtype
561 * parameter. This is used when turning off quota accounting for
562 * users and/or groups, as well as when the filesystem is unmounting.
567 uint flags
) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
572 xfs_dquot_t
*nextdqp
;
575 if (mp
->m_quotainfo
== NULL
)
578 dqtype
= (flags
& XFS_QMOPT_UQUOTA
) ? XFS_DQ_USER
: 0;
579 dqtype
|= (flags
& XFS_QMOPT_PQUOTA
) ? XFS_DQ_PROJ
: 0;
580 dqtype
|= (flags
& XFS_QMOPT_GQUOTA
) ? XFS_DQ_GROUP
: 0;
582 xfs_qm_mplist_lock(mp
);
585 * In the first pass through all incore dquots of this filesystem,
586 * we release the group dquot pointers the user dquots may be
587 * carrying around as a hint. We need to do this irrespective of
588 * what's being turned off.
590 xfs_qm_detach_gdquots(mp
);
594 ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp
));
596 * Try to get rid of all of the unwanted dquots. The idea is to
597 * get them off mplist and hashlist, but leave them on freelist.
599 dqp
= XFS_QI_MPLNEXT(mp
);
602 * It's OK to look at the type without taking dqlock here.
603 * We're holding the mplist lock here, and that's needed for
606 if ((dqp
->dq_flags
& dqtype
) == 0) {
611 if (! xfs_qm_dqhashlock_nowait(dqp
)) {
612 nrecl
= XFS_QI_MPLRECLAIMS(mp
);
613 xfs_qm_mplist_unlock(mp
);
614 XFS_DQ_HASH_LOCK(dqp
->q_hash
);
615 xfs_qm_mplist_lock(mp
);
618 * XXXTheoretically, we can get into a very long
619 * ping pong game here.
620 * No one can be adding dquots to the mplist at
621 * this point, but somebody might be taking things off.
623 if (nrecl
!= XFS_QI_MPLRECLAIMS(mp
)) {
624 XFS_DQ_HASH_UNLOCK(dqp
->q_hash
);
630 * Take the dquot off the mplist and hashlist. It may remain on
631 * freelist in INACTIVE state.
633 nextdqp
= dqp
->MPL_NEXT
;
634 nmisses
+= xfs_qm_dqpurge(dqp
, flags
);
637 xfs_qm_mplist_unlock(mp
);
649 * Purge the dquot cache.
650 * None of the dquots should really be busy at this point.
652 if (mp
->m_quotainfo
) {
653 while ((ndquots
= xfs_qm_dqpurge_int(mp
, flags
))) {
667 xfs_dquot_t
*udqhint
, /* hint */
668 xfs_dquot_t
**IO_idqpp
)
673 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
676 * See if we already have it in the inode itself. IO_idqpp is
677 * &i_udquot or &i_gdquot. This made the code look weird, but
678 * made the logic a lot simpler.
680 if ((dqp
= *IO_idqpp
)) {
683 xfs_dqtrace_entry(dqp
, "DQATTACH: found in ip");
688 * udqhint is the i_udquot field in inode, and is non-NULL only
689 * when the type arg is group/project. Its purpose is to save a
690 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
693 ASSERT(!udqhint
|| type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
694 if (udqhint
&& !dolock
)
698 * No need to take dqlock to look at the id.
699 * The ID can't change until it gets reclaimed, and it won't
700 * be reclaimed as long as we have a ref from inode and we hold
704 (dqp
= udqhint
->q_gdquot
) &&
705 (be32_to_cpu(dqp
->q_core
.d_id
) == id
)) {
706 ASSERT(XFS_DQ_IS_LOCKED(udqhint
));
709 ASSERT(*IO_idqpp
== NULL
);
713 xfs_dqunlock(udqhint
);
718 * We can't hold a dquot lock when we call the dqget code.
719 * We'll deadlock in no time, because of (not conforming to)
720 * lock ordering - the inodelock comes before any dquot lock,
721 * and we may drop and reacquire the ilock in xfs_qm_dqget().
724 xfs_dqunlock(udqhint
);
726 * Find the dquot from somewhere. This bumps the
727 * reference count of dquot and returns it locked.
728 * This can return ENOENT if dquot didn't exist on
729 * disk and we didn't ask it to allocate;
730 * ESRCH if quotas got turned off suddenly.
732 if ((error
= xfs_qm_dqget(ip
->i_mount
, ip
, id
, type
,
733 doalloc
|XFS_QMOPT_DOWARN
, &dqp
))) {
734 if (udqhint
&& dolock
)
739 xfs_dqtrace_entry(dqp
, "DQATTACH: found by dqget");
741 * dqget may have dropped and re-acquired the ilock, but it guarantees
742 * that the dquot returned is the one that should go in the inode.
746 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
756 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
757 if (! xfs_qm_dqlock_nowait(udqhint
)) {
766 ASSERT(XFS_DQ_IS_LOCKED(udqhint
));
770 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
778 * Given a udquot and gdquot, attach a ptr to the group dquot in the
779 * udquot as a hint for future lookups. The idea sounds simple, but the
780 * execution isn't, because the udquot might have a group dquot attached
781 * already and getting rid of that gets us into lock ordering constraints.
782 * The process is complicated more by the fact that the dquots may or may not
783 * be locked on entry.
786 xfs_qm_dqattach_grouphint(
795 ASSERT(XFS_DQ_IS_LOCKED(udq
));
796 ASSERT(XFS_DQ_IS_LOCKED(gdq
));
802 if ((tmp
= udq
->q_gdquot
)) {
809 udq
->q_gdquot
= NULL
;
811 * We can't keep any dqlocks when calling dqrele,
812 * because the freelist lock comes before dqlocks.
818 * we took a hard reference once upon a time in dqget,
819 * so give it back when the udquot no longer points at it
820 * dqput() does the unlocking of the dquot.
828 ASSERT(XFS_DQ_IS_LOCKED(udq
));
834 ASSERT(XFS_DQ_IS_LOCKED(udq
));
835 ASSERT(XFS_DQ_IS_LOCKED(gdq
));
837 * Somebody could have attached a gdquot here,
838 * when we dropped the uqlock. If so, just do nothing.
840 if (udq
->q_gdquot
== NULL
) {
852 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
854 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
855 * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
856 * much made this code a complete mess, but it has been pretty useful.
857 * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
858 * Inode may get unlocked and relocked in here, and the caller must deal with
866 xfs_mount_t
*mp
= ip
->i_mount
;
870 if ((! XFS_IS_QUOTA_ON(mp
)) ||
871 (! XFS_NOT_DQATTACHED(mp
, ip
)) ||
872 (ip
->i_ino
== mp
->m_sb
.sb_uquotino
) ||
873 (ip
->i_ino
== mp
->m_sb
.sb_gquotino
))
876 ASSERT((flags
& XFS_QMOPT_ILOCKED
) == 0 ||
877 XFS_ISLOCKED_INODE_EXCL(ip
));
879 if (! (flags
& XFS_QMOPT_ILOCKED
))
880 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
882 if (XFS_IS_UQUOTA_ON(mp
)) {
883 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
884 flags
& XFS_QMOPT_DQALLOC
,
885 flags
& XFS_QMOPT_DQLOCK
,
886 NULL
, &ip
->i_udquot
);
891 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
892 if (XFS_IS_OQUOTA_ON(mp
)) {
893 error
= XFS_IS_GQUOTA_ON(mp
) ?
894 xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
895 flags
& XFS_QMOPT_DQALLOC
,
896 flags
& XFS_QMOPT_DQLOCK
,
897 ip
->i_udquot
, &ip
->i_gdquot
) :
898 xfs_qm_dqattach_one(ip
, ip
->i_d
.di_projid
, XFS_DQ_PROJ
,
899 flags
& XFS_QMOPT_DQALLOC
,
900 flags
& XFS_QMOPT_DQLOCK
,
901 ip
->i_udquot
, &ip
->i_gdquot
);
903 * Don't worry about the udquot that we may have
904 * attached above. It'll get detached, if not already.
912 * Attach this group quota to the user quota as a hint.
913 * This WON'T, in general, result in a thrash.
916 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
917 ASSERT(ip
->i_udquot
);
918 ASSERT(ip
->i_gdquot
);
921 * We may or may not have the i_udquot locked at this point,
922 * but this check is OK since we don't depend on the i_gdquot to
923 * be accurate 100% all the time. It is just a hint, and this
924 * will succeed in general.
926 if (ip
->i_udquot
->q_gdquot
== ip
->i_gdquot
)
929 * Attach i_gdquot to the gdquot hint inside the i_udquot.
931 xfs_qm_dqattach_grouphint(ip
->i_udquot
, ip
->i_gdquot
,
932 flags
& XFS_QMOPT_DQLOCK
);
940 if (flags
& XFS_QMOPT_DQLOCK
)
941 ASSERT(XFS_DQ_IS_LOCKED(ip
->i_udquot
));
944 if (flags
& XFS_QMOPT_DQLOCK
)
945 ASSERT(XFS_DQ_IS_LOCKED(ip
->i_gdquot
));
947 if (XFS_IS_UQUOTA_ON(mp
))
948 ASSERT(ip
->i_udquot
);
949 if (XFS_IS_OQUOTA_ON(mp
))
950 ASSERT(ip
->i_gdquot
);
954 if (! (flags
& XFS_QMOPT_ILOCKED
))
955 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
959 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
965 * Release dquots (and their references) if any.
966 * The inode should be locked EXCL except when this's called by
973 if (!(ip
->i_udquot
|| ip
->i_gdquot
))
976 ASSERT(ip
->i_ino
!= ip
->i_mount
->m_sb
.sb_uquotino
);
977 ASSERT(ip
->i_ino
!= ip
->i_mount
->m_sb
.sb_gquotino
);
979 xfs_dqtrace_entry_ino(ip
->i_udquot
, "DQDETTACH", ip
);
980 xfs_qm_dqrele(ip
->i_udquot
);
984 xfs_dqtrace_entry_ino(ip
->i_gdquot
, "DQDETTACH", ip
);
985 xfs_qm_dqrele(ip
->i_gdquot
);
991 * This is called by VFS_SYNC and flags arg determines the caller,
992 * and its motives, as done in xfs_sync.
994 * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
995 * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
996 * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
1010 if (! XFS_IS_QUOTA_ON(mp
))
1015 * We won't block unless we are asked to.
1017 nowait
= (boolean_t
)(flags
& SYNC_BDFLUSH
|| (flags
& SYNC_WAIT
) == 0);
1020 xfs_qm_mplist_lock(mp
);
1022 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
1023 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
1024 * when we have the mplist lock, we know that dquots will be consistent
1025 * as long as we have it locked.
1027 if (! XFS_IS_QUOTA_ON(mp
)) {
1028 xfs_qm_mplist_unlock(mp
);
1031 FOREACH_DQUOT_IN_MP(dqp
, mp
) {
1033 * If this is vfs_sync calling, then skip the dquots that
1034 * don't 'seem' to be dirty. ie. don't acquire dqlock.
1035 * This is very similar to what xfs_sync does with inodes.
1037 if (flags
& SYNC_BDFLUSH
) {
1038 if (! XFS_DQ_IS_DIRTY(dqp
))
1044 * Try to acquire the dquot lock. We are NOT out of
1045 * lock order, but we just don't want to wait for this
1046 * lock, unless somebody wanted us to.
1048 if (! xfs_qm_dqlock_nowait(dqp
))
1055 * Now, find out for sure if this dquot is dirty or not.
1057 if (! XFS_DQ_IS_DIRTY(dqp
)) {
1062 /* XXX a sentinel would be better */
1063 recl
= XFS_QI_MPLRECLAIMS(mp
);
1064 if (! xfs_qm_dqflock_nowait(dqp
)) {
1070 * If we can't grab the flush lock then if the caller
1071 * really wanted us to give this our best shot, so
1072 * see if we can give a push to the buffer before we wait
1073 * on the flush lock. At this point, we know that
1074 * even though the dquot is being flushed,
1075 * it has (new) dirty data.
1077 xfs_qm_dqflock_pushbuf_wait(dqp
);
1080 * Let go of the mplist lock. We don't want to hold it
1081 * across a disk write
1083 flush_flags
= (nowait
) ? XFS_QMOPT_DELWRI
: XFS_QMOPT_SYNC
;
1084 xfs_qm_mplist_unlock(mp
);
1085 xfs_dqtrace_entry(dqp
, "XQM_SYNC: DQFLUSH");
1086 error
= xfs_qm_dqflush(dqp
, flush_flags
);
1088 if (error
&& XFS_FORCED_SHUTDOWN(mp
))
1089 return 0; /* Need to prevent umount failure */
1093 xfs_qm_mplist_lock(mp
);
1094 if (recl
!= XFS_QI_MPLRECLAIMS(mp
)) {
1095 if (++restarts
>= XFS_QM_SYNC_MAX_RESTARTS
)
1098 xfs_qm_mplist_unlock(mp
);
1103 xfs_qm_mplist_unlock(mp
);
1109 * This initializes all the quota information that's kept in the
1113 xfs_qm_init_quotainfo(
1116 xfs_quotainfo_t
*qinf
;
1120 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1123 * Tell XQM that we exist as soon as possible.
1125 if ((error
= xfs_qm_hold_quotafs_ref(mp
))) {
1129 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
1132 * See if quotainodes are setup, and if not, allocate them,
1133 * and change the superblock accordingly.
1135 if ((error
= xfs_qm_init_quotainos(mp
))) {
1136 kmem_free(qinf
, sizeof(xfs_quotainfo_t
));
1137 mp
->m_quotainfo
= NULL
;
1141 spin_lock_init(&qinf
->qi_pinlock
);
1142 xfs_qm_list_init(&qinf
->qi_dqlist
, "mpdqlist", 0);
1143 qinf
->qi_dqreclaims
= 0;
1145 /* mutex used to serialize quotaoffs */
1146 mutex_init(&qinf
->qi_quotaofflock
);
1148 /* Precalc some constants */
1149 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
1150 ASSERT(qinf
->qi_dqchunklen
);
1151 qinf
->qi_dqperchunk
= BBTOB(qinf
->qi_dqchunklen
);
1152 do_div(qinf
->qi_dqperchunk
, sizeof(xfs_dqblk_t
));
1154 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
1157 * We try to get the limits from the superuser's limits fields.
1158 * This is quite hacky, but it is standard quota practice.
1159 * We look at the USR dquot with id == 0 first, but if user quotas
1160 * are not enabled we goto the GRP dquot with id == 0.
1161 * We don't really care to keep separate default limits for user
1162 * and group quotas, at least not at this point.
1164 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)0,
1165 XFS_IS_UQUOTA_RUNNING(mp
) ? XFS_DQ_USER
:
1166 (XFS_IS_GQUOTA_RUNNING(mp
) ? XFS_DQ_GROUP
:
1168 XFS_QMOPT_DQSUSER
|XFS_QMOPT_DOWARN
,
1171 xfs_disk_dquot_t
*ddqp
= &dqp
->q_core
;
1174 * The warnings and timers set the grace period given to
1175 * a user or group before he or she can not perform any
1176 * more writing. If it is zero, a default is used.
1178 qinf
->qi_btimelimit
= ddqp
->d_btimer
?
1179 be32_to_cpu(ddqp
->d_btimer
) : XFS_QM_BTIMELIMIT
;
1180 qinf
->qi_itimelimit
= ddqp
->d_itimer
?
1181 be32_to_cpu(ddqp
->d_itimer
) : XFS_QM_ITIMELIMIT
;
1182 qinf
->qi_rtbtimelimit
= ddqp
->d_rtbtimer
?
1183 be32_to_cpu(ddqp
->d_rtbtimer
) : XFS_QM_RTBTIMELIMIT
;
1184 qinf
->qi_bwarnlimit
= ddqp
->d_bwarns
?
1185 be16_to_cpu(ddqp
->d_bwarns
) : XFS_QM_BWARNLIMIT
;
1186 qinf
->qi_iwarnlimit
= ddqp
->d_iwarns
?
1187 be16_to_cpu(ddqp
->d_iwarns
) : XFS_QM_IWARNLIMIT
;
1188 qinf
->qi_rtbwarnlimit
= ddqp
->d_rtbwarns
?
1189 be16_to_cpu(ddqp
->d_rtbwarns
) : XFS_QM_RTBWARNLIMIT
;
1190 qinf
->qi_bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
1191 qinf
->qi_bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
1192 qinf
->qi_ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
1193 qinf
->qi_isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
1194 qinf
->qi_rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
1195 qinf
->qi_rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
1198 * We sent the XFS_QMOPT_DQSUSER flag to dqget because
1199 * we don't want this dquot cached. We haven't done a
1200 * quotacheck yet, and quotacheck doesn't like incore dquots.
1202 xfs_qm_dqdestroy(dqp
);
1204 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
1205 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
1206 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
1207 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
1208 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
1209 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
1217 * Gets called when unmounting a filesystem or when all quotas get
1219 * This purges the quota inodes, destroys locks and frees itself.
1222 xfs_qm_destroy_quotainfo(
1225 xfs_quotainfo_t
*qi
;
1227 qi
= mp
->m_quotainfo
;
1229 ASSERT(xfs_Gqm
!= NULL
);
1232 * Release the reference that XQM kept, so that we know
1233 * when the XQM structure should be freed. We cannot assume
1234 * that xfs_Gqm is non-null after this point.
1236 xfs_qm_rele_quotafs_ref(mp
);
1238 spinlock_destroy(&qi
->qi_pinlock
);
1239 xfs_qm_list_destroy(&qi
->qi_dqlist
);
1241 if (qi
->qi_uquotaip
) {
1242 XFS_PURGE_INODE(qi
->qi_uquotaip
);
1243 qi
->qi_uquotaip
= NULL
; /* paranoia */
1245 if (qi
->qi_gquotaip
) {
1246 XFS_PURGE_INODE(qi
->qi_gquotaip
);
1247 qi
->qi_gquotaip
= NULL
;
1249 mutex_destroy(&qi
->qi_quotaofflock
);
1250 kmem_free(qi
, sizeof(xfs_quotainfo_t
));
1251 mp
->m_quotainfo
= NULL
;
1256 /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
1265 mutex_init(&list
->qh_lock
);
1266 list
->qh_next
= NULL
;
1267 list
->qh_version
= 0;
1268 list
->qh_nelems
= 0;
1272 xfs_qm_list_destroy(
1275 mutex_destroy(&(list
->qh_lock
));
1280 * Stripped down version of dqattach. This doesn't attach, or even look at the
1281 * dquots attached to the inode. The rationale is that there won't be any
1282 * attached at the time this is called from quotacheck.
1285 xfs_qm_dqget_noattach(
1287 xfs_dquot_t
**O_udqpp
,
1288 xfs_dquot_t
**O_gdqpp
)
1292 xfs_dquot_t
*udqp
, *gdqp
;
1294 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
1299 if (XFS_IS_UQUOTA_ON(mp
)) {
1300 ASSERT(ip
->i_udquot
== NULL
);
1302 * We want the dquot allocated if it doesn't exist.
1304 if ((error
= xfs_qm_dqget(mp
, ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
1305 XFS_QMOPT_DQALLOC
| XFS_QMOPT_DOWARN
,
1308 * Shouldn't be able to turn off quotas here.
1310 ASSERT(error
!= ESRCH
);
1311 ASSERT(error
!= ENOENT
);
1317 if (XFS_IS_OQUOTA_ON(mp
)) {
1318 ASSERT(ip
->i_gdquot
== NULL
);
1321 error
= XFS_IS_GQUOTA_ON(mp
) ?
1322 xfs_qm_dqget(mp
, ip
,
1323 ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
1324 XFS_QMOPT_DQALLOC
|XFS_QMOPT_DOWARN
,
1326 xfs_qm_dqget(mp
, ip
,
1327 ip
->i_d
.di_projid
, XFS_DQ_PROJ
,
1328 XFS_QMOPT_DQALLOC
|XFS_QMOPT_DOWARN
,
1332 xfs_qm_dqrele(udqp
);
1333 ASSERT(error
!= ESRCH
);
1334 ASSERT(error
!= ENOENT
);
1339 /* Reacquire the locks in the right order */
1341 if (! xfs_qm_dqlock_nowait(udqp
)) {
1353 if (udqp
) ASSERT(XFS_DQ_IS_LOCKED(udqp
));
1354 if (gdqp
) ASSERT(XFS_DQ_IS_LOCKED(gdqp
));
1360 * Create an inode and return with a reference already taken, but unlocked
1361 * This is how we create quota inodes
1374 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QINOCREATE
);
1375 if ((error
= xfs_trans_reserve(tp
,
1376 XFS_QM_QINOCREATE_SPACE_RES(mp
),
1377 XFS_CREATE_LOG_RES(mp
), 0,
1378 XFS_TRANS_PERM_LOG_RES
,
1379 XFS_CREATE_LOG_COUNT
))) {
1380 xfs_trans_cancel(tp
, 0);
1384 if ((error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0,
1385 &xfs_zerocr
, 0, 1, ip
, &committed
))) {
1386 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|
1392 * Keep an extra reference to this quota inode. This inode is
1393 * locked exclusively and joined to the transaction already.
1395 ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip
));
1396 VN_HOLD(XFS_ITOV((*ip
)));
1399 * Make the changes in the superblock, and log those too.
1400 * sbfields arg may contain fields other than *QUOTINO;
1401 * VERSIONNUM for example.
1403 spin_lock(&mp
->m_sb_lock
);
1404 if (flags
& XFS_QMOPT_SBVERSION
) {
1405 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1406 unsigned oldv
= mp
->m_sb
.sb_versionnum
;
1408 ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp
->m_sb
));
1409 ASSERT((sbfields
& (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1410 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
)) ==
1411 (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1412 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
));
1414 XFS_SB_VERSION_ADDQUOTA(&mp
->m_sb
);
1415 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
1416 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
1418 /* qflags will get updated _after_ quotacheck */
1419 mp
->m_sb
.sb_qflags
= 0;
1420 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1422 "Old superblock version %x, converting to %x.",
1423 oldv
, mp
->m_sb
.sb_versionnum
);
1426 if (flags
& XFS_QMOPT_UQUOTA
)
1427 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
1429 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
1430 spin_unlock(&mp
->m_sb_lock
);
1431 xfs_mod_sb(tp
, sbfields
);
1433 if ((error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
))) {
1434 xfs_fs_cmn_err(CE_ALERT
, mp
, "XFS qino_alloc failed!");
1442 xfs_qm_reset_dqcounts(
1448 xfs_disk_dquot_t
*ddq
;
1451 xfs_buftrace("RESET DQUOTS", bp
);
1453 * Reset all counters and timers. They'll be
1454 * started afresh by xfs_qm_quotacheck.
1457 j
= XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
1458 do_div(j
, sizeof(xfs_dqblk_t
));
1459 ASSERT(XFS_QM_DQPERBLK(mp
) == j
);
1461 ddq
= (xfs_disk_dquot_t
*)XFS_BUF_PTR(bp
);
1462 for (j
= 0; j
< XFS_QM_DQPERBLK(mp
); j
++) {
1464 * Do a sanity check, and if needed, repair the dqblk. Don't
1465 * output any warnings because it's perfectly possible to
1466 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1468 (void) xfs_qm_dqcheck(ddq
, id
+j
, type
, XFS_QMOPT_DQREPAIR
,
1472 ddq
->d_rtbcount
= 0;
1475 ddq
->d_rtbtimer
= 0;
1478 ddq
->d_rtbwarns
= 0;
1479 ddq
= (xfs_disk_dquot_t
*) ((xfs_dqblk_t
*)ddq
+ 1);
1490 xfs_filblks_t blkcnt
,
1501 incr
= (blkcnt
> XFS_QM_MAX_DQCLUSTER_LOGSZ
) ?
1502 XFS_QM_MAX_DQCLUSTER_LOGSZ
: blkcnt
;
1503 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
1504 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
1508 * Blkcnt arg can be a very big number, and might even be
1509 * larger than the log itself. So, we have to break it up into
1510 * manageable-sized transactions.
1511 * Note that we don't start a permanent transaction here; we might
1512 * not be able to get a log reservation for the whole thing up front,
1513 * and we don't really care to either, because we just discard
1514 * everything if we were to crash in the middle of this loop.
1517 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
1518 XFS_FSB_TO_DADDR(mp
, bno
),
1519 (int)XFS_QI_DQCHUNKLEN(mp
), 0, &bp
);
1523 (void) xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
1524 xfs_bdwrite(mp
, bp
);
1526 * goto the next block.
1529 firstid
+= XFS_QM_DQPERBLK(mp
);
1535 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1536 * caller supplied function for every chunk of dquots that we find.
1544 xfs_bmbt_irec_t
*map
;
1545 int i
, nmaps
; /* number of map entries */
1546 int error
; /* return value */
1547 xfs_fileoff_t lblkno
;
1548 xfs_filblks_t maxlblkcnt
;
1550 xfs_fsblock_t rablkno
;
1551 xfs_filblks_t rablkcnt
;
1555 * This looks racy, but we can't keep an inode lock across a
1556 * trans_reserve. But, this gets called during quotacheck, and that
1557 * happens only at mount time which is single threaded.
1559 if (qip
->i_d
.di_nblocks
== 0)
1562 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
1565 maxlblkcnt
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_MAXIOFFSET(mp
));
1567 nmaps
= XFS_DQITER_MAP_SIZE
;
1569 * We aren't changing the inode itself. Just changing
1570 * some of its data. No new blocks are added here, and
1571 * the inode is never added to the transaction.
1573 xfs_ilock(qip
, XFS_ILOCK_SHARED
);
1574 error
= xfs_bmapi(NULL
, qip
, lblkno
,
1575 maxlblkcnt
- lblkno
,
1578 0, map
, &nmaps
, NULL
, NULL
);
1579 xfs_iunlock(qip
, XFS_ILOCK_SHARED
);
1583 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
1584 for (i
= 0; i
< nmaps
; i
++) {
1585 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1586 ASSERT(map
[i
].br_blockcount
);
1589 lblkno
+= map
[i
].br_blockcount
;
1591 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1594 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1595 XFS_QM_DQPERBLK(mp
);
1597 * Do a read-ahead on the next extent.
1599 if ((i
+1 < nmaps
) &&
1600 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1601 rablkcnt
= map
[i
+1].br_blockcount
;
1602 rablkno
= map
[i
+1].br_startblock
;
1603 while (rablkcnt
--) {
1604 xfs_baread(mp
->m_ddev_targp
,
1605 XFS_FSB_TO_DADDR(mp
, rablkno
),
1606 (int)XFS_QI_DQCHUNKLEN(mp
));
1611 * Iterate thru all the blks in the extent and
1612 * reset the counters of all the dquots inside them.
1614 if ((error
= xfs_qm_dqiter_bufs(mp
,
1616 map
[i
].br_startblock
,
1617 map
[i
].br_blockcount
,
1625 } while (nmaps
> 0);
1627 kmem_free(map
, XFS_DQITER_MAP_SIZE
* sizeof(*map
));
1633 * Called by dqusage_adjust in doing a quotacheck.
1634 * Given the inode, and a dquot (either USR or GRP, doesn't matter),
1635 * this updates its incore copy as well as the buffer copy. This is
1636 * so that once the quotacheck is done, we can just log all the buffers,
1637 * as opposed to logging numerous updates to individual dquots.
1640 xfs_qm_quotacheck_dqadjust(
1645 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
1646 xfs_dqtrace_entry(dqp
, "QCHECK DQADJUST");
1648 * Adjust the inode count and the block count to reflect this inode's
1651 be64_add_cpu(&dqp
->q_core
.d_icount
, 1);
1652 dqp
->q_res_icount
++;
1654 be64_add_cpu(&dqp
->q_core
.d_bcount
, nblks
);
1655 dqp
->q_res_bcount
+= nblks
;
1658 be64_add_cpu(&dqp
->q_core
.d_rtbcount
, rtblks
);
1659 dqp
->q_res_rtbcount
+= rtblks
;
1663 * Set default limits, adjust timers (since we changed usages)
1665 if (! XFS_IS_SUSER_DQUOT(dqp
)) {
1666 xfs_qm_adjust_dqlimits(dqp
->q_mount
, &dqp
->q_core
);
1667 xfs_qm_adjust_dqtimers(dqp
->q_mount
, &dqp
->q_core
);
1670 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1676 xfs_qcnt_t
*O_rtblks
)
1678 xfs_filblks_t rtblks
; /* total rt blks */
1679 xfs_extnum_t idx
; /* extent record index */
1680 xfs_ifork_t
*ifp
; /* inode fork pointer */
1681 xfs_extnum_t nextents
; /* number of extent entries */
1684 ASSERT(XFS_IS_REALTIME_INODE(ip
));
1685 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1686 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1687 if ((error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
)))
1691 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
1692 for (idx
= 0; idx
< nextents
; idx
++)
1693 rtblks
+= xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp
, idx
));
1694 *O_rtblks
= (xfs_qcnt_t
)rtblks
;
1699 * callback routine supplied to bulkstat(). Given an inumber, find its
1700 * dquots and update them to account for resources taken by that inode.
1704 xfs_qm_dqusage_adjust(
1705 xfs_mount_t
*mp
, /* mount point for filesystem */
1706 xfs_ino_t ino
, /* inode number to get data for */
1707 void __user
*buffer
, /* not used */
1708 int ubsize
, /* not used */
1709 void *private_data
, /* not used */
1710 xfs_daddr_t bno
, /* starting block of inode cluster */
1711 int *ubused
, /* not used */
1712 void *dip
, /* on-disk inode pointer (not used) */
1713 int *res
) /* result code value */
1716 xfs_dquot_t
*udqp
, *gdqp
;
1717 xfs_qcnt_t nblks
, rtblks
;
1720 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1723 * rootino must have its resources accounted for, not so with the quota
1726 if (ino
== mp
->m_sb
.sb_uquotino
|| ino
== mp
->m_sb
.sb_gquotino
) {
1727 *res
= BULKSTAT_RV_NOTHING
;
1728 return XFS_ERROR(EINVAL
);
1732 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1733 * interface expects the inode to be exclusively locked because that's
1734 * the case in all other instances. It's OK that we do this because
1735 * quotacheck is done only at mount time.
1737 if ((error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_EXCL
, &ip
, bno
))) {
1738 *res
= BULKSTAT_RV_NOTHING
;
1742 if (ip
->i_d
.di_mode
== 0) {
1743 xfs_iput_new(ip
, XFS_ILOCK_EXCL
);
1744 *res
= BULKSTAT_RV_NOTHING
;
1745 return XFS_ERROR(ENOENT
);
1749 * Obtain the locked dquots. In case of an error (eg. allocation
1750 * fails for ENOSPC), we return the negative of the error number
1751 * to bulkstat, so that it can get propagated to quotacheck() and
1752 * making us disable quotas for the file system.
1754 if ((error
= xfs_qm_dqget_noattach(ip
, &udqp
, &gdqp
))) {
1755 xfs_iput(ip
, XFS_ILOCK_EXCL
);
1756 *res
= BULKSTAT_RV_GIVEUP
;
1761 if (! XFS_IS_REALTIME_INODE(ip
)) {
1762 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
;
1765 * Walk thru the extent list and count the realtime blocks.
1767 if ((error
= xfs_qm_get_rtblks(ip
, &rtblks
))) {
1768 xfs_iput(ip
, XFS_ILOCK_EXCL
);
1773 *res
= BULKSTAT_RV_GIVEUP
;
1776 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1778 ASSERT(ip
->i_delayed_blks
== 0);
1781 * We can't release the inode while holding its dquot locks.
1782 * The inode can go into inactive and might try to acquire the dquotlocks.
1783 * So, just unlock here and do a vn_rele at the end.
1785 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1788 * Add the (disk blocks and inode) resources occupied by this
1789 * inode to its dquots. We do this adjustment in the incore dquot,
1790 * and also copy the changes to its buffer.
1791 * We don't care about putting these changes in a transaction
1792 * envelope because if we crash in the middle of a 'quotacheck'
1793 * we have to start from the beginning anyway.
1794 * Once we're done, we'll log all the dquot bufs.
1796 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1797 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1799 if (XFS_IS_UQUOTA_ON(mp
)) {
1801 xfs_qm_quotacheck_dqadjust(udqp
, nblks
, rtblks
);
1804 if (XFS_IS_OQUOTA_ON(mp
)) {
1806 xfs_qm_quotacheck_dqadjust(gdqp
, nblks
, rtblks
);
1810 * Now release the inode. This will send it to 'inactive', and
1811 * possibly even free blocks.
1813 VN_RELE(XFS_ITOV(ip
));
1818 *res
= BULKSTAT_RV_DIDONE
;
1823 * Walk thru all the filesystem inodes and construct a consistent view
1824 * of the disk quota world. If the quotacheck fails, disable quotas.
1830 int done
, count
, error
;
1833 xfs_inode_t
*uip
, *gip
;
1841 ASSERT(XFS_QI_UQIP(mp
) || XFS_QI_GQIP(mp
));
1842 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1845 * There should be no cached dquots. The (simplistic) quotacheck
1846 * algorithm doesn't like that.
1848 ASSERT(XFS_QI_MPLNDQUOTS(mp
) == 0);
1850 cmn_err(CE_NOTE
, "XFS quotacheck %s: Please wait.", mp
->m_fsname
);
1853 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1854 * their counters to zero. We need a clean slate.
1855 * We don't log our changes till later.
1857 if ((uip
= XFS_QI_UQIP(mp
))) {
1858 if ((error
= xfs_qm_dqiterate(mp
, uip
, XFS_QMOPT_UQUOTA
)))
1860 flags
|= XFS_UQUOTA_CHKD
;
1863 if ((gip
= XFS_QI_GQIP(mp
))) {
1864 if ((error
= xfs_qm_dqiterate(mp
, gip
, XFS_IS_GQUOTA_ON(mp
) ?
1865 XFS_QMOPT_GQUOTA
: XFS_QMOPT_PQUOTA
)))
1867 flags
|= XFS_OQUOTA_CHKD
;
1872 * Iterate thru all the inodes in the file system,
1873 * adjusting the corresponding dquot counters in core.
1875 if ((error
= xfs_bulkstat(mp
, &lastino
, &count
,
1876 xfs_qm_dqusage_adjust
, NULL
,
1877 structsz
, NULL
, BULKSTAT_FG_IGET
, &done
)))
1883 * We can get this error if we couldn't do a dquot allocation inside
1884 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1885 * dirty dquots that might be cached, we just want to get rid of them
1886 * and turn quotaoff. The dquots won't be attached to any of the inodes
1887 * at this point (because we intentionally didn't in dqget_noattach).
1890 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
| XFS_QMOPT_QUOTAOFF
);
1894 * We've made all the changes that we need to make incore.
1895 * Now flush_them down to disk buffers.
1897 xfs_qm_dqflush_all(mp
, XFS_QMOPT_DELWRI
);
1900 * We didn't log anything, because if we crashed, we'll have to
1901 * start the quotacheck from scratch anyway. However, we must make
1902 * sure that our dquot changes are secure before we put the
1903 * quotacheck'd stamp on the superblock. So, here we do a synchronous
1906 XFS_bflush(mp
->m_ddev_targp
);
1909 * If one type of quotas is off, then it will lose its
1910 * quotachecked status, since we won't be doing accounting for
1911 * that type anymore.
1913 mp
->m_qflags
&= ~(XFS_OQUOTA_CHKD
| XFS_UQUOTA_CHKD
);
1914 mp
->m_qflags
|= flags
;
1916 XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp
)), MPL_NEXT
, "++++ Mp list +++");
1920 cmn_err(CE_WARN
, "XFS quotacheck %s: Unsuccessful (Error %d): "
1921 "Disabling quotas.",
1922 mp
->m_fsname
, error
);
1924 * We must turn off quotas.
1926 ASSERT(mp
->m_quotainfo
!= NULL
);
1927 ASSERT(xfs_Gqm
!= NULL
);
1928 xfs_qm_destroy_quotainfo(mp
);
1929 (void)xfs_mount_reset_sbqflags(mp
);
1931 cmn_err(CE_NOTE
, "XFS quotacheck %s: Done.", mp
->m_fsname
);
1937 * This is called after the superblock has been read in and we're ready to
1938 * iget the quota inodes.
1941 xfs_qm_init_quotainos(
1944 xfs_inode_t
*uip
, *gip
;
1949 ASSERT(mp
->m_quotainfo
);
1955 * Get the uquota and gquota inodes
1957 if (XFS_SB_VERSION_HASQUOTA(&mp
->m_sb
)) {
1958 if (XFS_IS_UQUOTA_ON(mp
) &&
1959 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1960 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1961 if ((error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1963 return XFS_ERROR(error
);
1965 if (XFS_IS_OQUOTA_ON(mp
) &&
1966 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1967 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1968 if ((error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1971 VN_RELE(XFS_ITOV(uip
));
1972 return XFS_ERROR(error
);
1976 flags
|= XFS_QMOPT_SBVERSION
;
1977 sbflags
|= (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1978 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
);
1982 * Create the two inodes, if they don't exist already. The changes
1983 * made above will get added to a transaction and logged in one of
1984 * the qino_alloc calls below. If the device is readonly,
1985 * temporarily switch to read-write to do this.
1987 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1988 if ((error
= xfs_qm_qino_alloc(mp
, &uip
,
1989 sbflags
| XFS_SB_UQUOTINO
,
1990 flags
| XFS_QMOPT_UQUOTA
)))
1991 return XFS_ERROR(error
);
1993 flags
&= ~XFS_QMOPT_SBVERSION
;
1995 if (XFS_IS_OQUOTA_ON(mp
) && gip
== NULL
) {
1996 flags
|= (XFS_IS_GQUOTA_ON(mp
) ?
1997 XFS_QMOPT_GQUOTA
: XFS_QMOPT_PQUOTA
);
1998 error
= xfs_qm_qino_alloc(mp
, &gip
,
1999 sbflags
| XFS_SB_GQUOTINO
, flags
);
2002 VN_RELE(XFS_ITOV(uip
));
2004 return XFS_ERROR(error
);
2008 XFS_QI_UQIP(mp
) = uip
;
2009 XFS_QI_GQIP(mp
) = gip
;
2016 * Traverse the freelist of dquots and attempt to reclaim a maximum of
2017 * 'howmany' dquots. This operation races with dqlookup(), and attempts to
2018 * favor the lookup function ...
2019 * XXXsup merge this with qm_reclaim_one().
2022 xfs_qm_shake_freelist(
2027 xfs_dquot_t
*dqp
, *nextdqp
;
2039 cmn_err(CE_DEBUG
, "Shake free 0x%x", howmany
);
2041 /* lock order is : hashchainlock, freelistlock, mplistlock */
2043 xfs_qm_freelist_lock(xfs_Gqm
);
2045 for (dqp
= xfs_Gqm
->qm_dqfreelist
.qh_next
;
2046 ((dqp
!= (xfs_dquot_t
*) &xfs_Gqm
->qm_dqfreelist
) &&
2047 nreclaimed
< howmany
); ) {
2051 * We are racing with dqlookup here. Naturally we don't
2052 * want to reclaim a dquot that lookup wants.
2054 if (dqp
->dq_flags
& XFS_DQ_WANT
) {
2056 xfs_qm_freelist_unlock(xfs_Gqm
);
2057 if (++restarts
>= XFS_QM_RECLAIM_MAX_RESTARTS
)
2059 XQM_STATS_INC(xqmstats
.xs_qm_dqwants
);
2064 * If the dquot is inactive, we are assured that it is
2065 * not on the mplist or the hashlist, and that makes our
2068 if (dqp
->dq_flags
& XFS_DQ_INACTIVE
) {
2069 ASSERT(dqp
->q_mount
== NULL
);
2070 ASSERT(! XFS_DQ_IS_DIRTY(dqp
));
2071 ASSERT(dqp
->HL_PREVP
== NULL
);
2072 ASSERT(dqp
->MPL_PREVP
== NULL
);
2073 XQM_STATS_INC(xqmstats
.xs_qm_dqinact_reclaims
);
2074 nextdqp
= dqp
->dq_flnext
;
2078 ASSERT(dqp
->MPL_PREVP
);
2080 * Try to grab the flush lock. If this dquot is in the process of
2081 * getting flushed to disk, we don't want to reclaim it.
2083 if (! xfs_qm_dqflock_nowait(dqp
)) {
2085 dqp
= dqp
->dq_flnext
;
2090 * We have the flush lock so we know that this is not in the
2091 * process of being flushed. So, if this is dirty, flush it
2092 * DELWRI so that we don't get a freelist infested with
2095 if (XFS_DQ_IS_DIRTY(dqp
)) {
2096 xfs_dqtrace_entry(dqp
, "DQSHAKE: DQDIRTY");
2098 * We flush it delayed write, so don't bother
2099 * releasing the mplock.
2101 (void) xfs_qm_dqflush(dqp
, XFS_QMOPT_DELWRI
);
2102 xfs_dqunlock(dqp
); /* dqflush unlocks dqflock */
2103 dqp
= dqp
->dq_flnext
;
2107 * We're trying to get the hashlock out of order. This races
2108 * with dqlookup; so, we giveup and goto the next dquot if
2109 * we couldn't get the hashlock. This way, we won't starve
2110 * a dqlookup process that holds the hashlock that is
2111 * waiting for the freelist lock.
2113 if (! xfs_qm_dqhashlock_nowait(dqp
)) {
2116 dqp
= dqp
->dq_flnext
;
2120 * This races with dquot allocation code as well as dqflush_all
2121 * and reclaim code. So, if we failed to grab the mplist lock,
2122 * giveup everything and start over.
2126 if (! xfs_qm_mplist_nowait(dqp
->q_mount
)) {
2127 /* XXX put a sentinel so that we can come back here */
2130 XFS_DQ_HASH_UNLOCK(hash
);
2131 xfs_qm_freelist_unlock(xfs_Gqm
);
2132 if (++restarts
>= XFS_QM_RECLAIM_MAX_RESTARTS
)
2136 xfs_dqtrace_entry(dqp
, "DQSHAKE: UNLINKING");
2138 cmn_err(CE_DEBUG
, "Shake 0x%p, ID 0x%x\n",
2139 dqp
, be32_to_cpu(dqp
->q_core
.d_id
));
2141 ASSERT(dqp
->q_nrefs
== 0);
2142 nextdqp
= dqp
->dq_flnext
;
2143 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp
->q_mount
)), dqp
);
2144 XQM_HASHLIST_REMOVE(hash
, dqp
);
2146 xfs_qm_mplist_unlock(dqp
->q_mount
);
2147 XFS_DQ_HASH_UNLOCK(hash
);
2150 XQM_FREELIST_REMOVE(dqp
);
2153 XQM_STATS_INC(xqmstats
.xs_qm_dqshake_reclaims
);
2154 xfs_qm_dqdestroy(dqp
);
2157 xfs_qm_freelist_unlock(xfs_Gqm
);
2163 * The kmem_shake interface is invoked when memory is running low.
2167 xfs_qm_shake(int nr_to_scan
, gfp_t gfp_mask
)
2169 int ndqused
, nfree
, n
;
2171 if (!kmem_shake_allow(gfp_mask
))
2176 nfree
= xfs_Gqm
->qm_dqfreelist
.qh_nelems
; /* free dquots */
2177 /* incore dquots in all f/s's */
2178 ndqused
= atomic_read(&xfs_Gqm
->qm_totaldquots
) - nfree
;
2180 ASSERT(ndqused
>= 0);
2182 if (nfree
<= ndqused
&& nfree
< ndquot
)
2185 ndqused
*= xfs_Gqm
->qm_dqfree_ratio
; /* target # of free dquots */
2186 n
= nfree
- ndqused
- ndquot
; /* # over target */
2188 return xfs_qm_shake_freelist(MAX(nfree
, n
));
2193 * Just pop the least recently used dquot off the freelist and
2194 * recycle it. The returned dquot is locked.
2196 STATIC xfs_dquot_t
*
2197 xfs_qm_dqreclaim_one(void)
2199 xfs_dquot_t
*dqpout
;
2208 /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
2210 xfs_qm_freelist_lock(xfs_Gqm
);
2212 FOREACH_DQUOT_IN_FREELIST(dqp
, &(xfs_Gqm
->qm_dqfreelist
)) {
2216 * We are racing with dqlookup here. Naturally we don't
2217 * want to reclaim a dquot that lookup wants. We release the
2218 * freelist lock and start over, so that lookup will grab
2219 * both the dquot and the freelistlock.
2221 if (dqp
->dq_flags
& XFS_DQ_WANT
) {
2222 ASSERT(! (dqp
->dq_flags
& XFS_DQ_INACTIVE
));
2223 xfs_dqtrace_entry(dqp
, "DQRECLAIM: DQWANT");
2225 xfs_qm_freelist_unlock(xfs_Gqm
);
2226 if (++restarts
>= XFS_QM_RECLAIM_MAX_RESTARTS
)
2228 XQM_STATS_INC(xqmstats
.xs_qm_dqwants
);
2233 * If the dquot is inactive, we are assured that it is
2234 * not on the mplist or the hashlist, and that makes our
2237 if (dqp
->dq_flags
& XFS_DQ_INACTIVE
) {
2238 ASSERT(dqp
->q_mount
== NULL
);
2239 ASSERT(! XFS_DQ_IS_DIRTY(dqp
));
2240 ASSERT(dqp
->HL_PREVP
== NULL
);
2241 ASSERT(dqp
->MPL_PREVP
== NULL
);
2242 XQM_FREELIST_REMOVE(dqp
);
2245 XQM_STATS_INC(xqmstats
.xs_qm_dqinact_reclaims
);
2249 ASSERT(dqp
->q_hash
);
2250 ASSERT(dqp
->MPL_PREVP
);
2253 * Try to grab the flush lock. If this dquot is in the process of
2254 * getting flushed to disk, we don't want to reclaim it.
2256 if (! xfs_qm_dqflock_nowait(dqp
)) {
2262 * We have the flush lock so we know that this is not in the
2263 * process of being flushed. So, if this is dirty, flush it
2264 * DELWRI so that we don't get a freelist infested with
2267 if (XFS_DQ_IS_DIRTY(dqp
)) {
2268 xfs_dqtrace_entry(dqp
, "DQRECLAIM: DQDIRTY");
2270 * We flush it delayed write, so don't bother
2271 * releasing the freelist lock.
2273 (void) xfs_qm_dqflush(dqp
, XFS_QMOPT_DELWRI
);
2274 xfs_dqunlock(dqp
); /* dqflush unlocks dqflock */
2278 if (! xfs_qm_mplist_nowait(dqp
->q_mount
)) {
2284 if (! xfs_qm_dqhashlock_nowait(dqp
))
2287 ASSERT(dqp
->q_nrefs
== 0);
2288 xfs_dqtrace_entry(dqp
, "DQRECLAIM: UNLINKING");
2289 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp
->q_mount
)), dqp
);
2290 XQM_HASHLIST_REMOVE(dqp
->q_hash
, dqp
);
2291 XQM_FREELIST_REMOVE(dqp
);
2293 XFS_DQ_HASH_UNLOCK(dqp
->q_hash
);
2295 xfs_qm_mplist_unlock(dqp
->q_mount
);
2302 xfs_qm_freelist_unlock(xfs_Gqm
);
2307 /*------------------------------------------------------------------*/
2310 * Return a new incore dquot. Depending on the number of
2311 * dquots in the system, we either allocate a new one on the kernel heap,
2312 * or reclaim a free one.
2313 * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
2314 * to reclaim an existing one from the freelist.
2317 xfs_qm_dqalloc_incore(
2318 xfs_dquot_t
**O_dqpp
)
2323 * Check against high water mark to see if we want to pop
2324 * a nincompoop dquot off the freelist.
2326 if (atomic_read(&xfs_Gqm
->qm_totaldquots
) >= ndquot
) {
2328 * Try to recycle a dquot from the freelist.
2330 if ((dqp
= xfs_qm_dqreclaim_one())) {
2331 XQM_STATS_INC(xqmstats
.xs_qm_dqreclaims
);
2333 * Just zero the core here. The rest will get
2334 * reinitialized by caller. XXX we shouldn't even
2337 memset(&dqp
->q_core
, 0, sizeof(dqp
->q_core
));
2341 XQM_STATS_INC(xqmstats
.xs_qm_dqreclaim_misses
);
2345 * Allocate a brand new dquot on the kernel heap and return it
2346 * to the caller to initialize.
2348 ASSERT(xfs_Gqm
->qm_dqzone
!= NULL
);
2349 *O_dqpp
= kmem_zone_zalloc(xfs_Gqm
->qm_dqzone
, KM_SLEEP
);
2350 atomic_inc(&xfs_Gqm
->qm_totaldquots
);
2357 * Start a transaction and write the incore superblock changes to
2358 * disk. flags parameter indicates which fields have changed.
2361 xfs_qm_write_sb_changes(
2369 cmn_err(CE_NOTE
, "Writing superblock quota changes :%s", mp
->m_fsname
);
2371 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
2372 if ((error
= xfs_trans_reserve(tp
, 0,
2373 mp
->m_sb
.sb_sectsize
+ 128, 0,
2375 XFS_DEFAULT_LOG_COUNT
))) {
2376 xfs_trans_cancel(tp
, 0);
2380 xfs_mod_sb(tp
, flags
);
2381 (void) xfs_trans_commit(tp
, 0);
2387 /* --------------- utility functions for vnodeops ---------------- */
2391 * Given an inode, a uid and gid (from cred_t) make sure that we have
2392 * allocated relevant dquot(s) on disk, and that we won't exceed inode
2393 * quotas by creating this file.
2394 * This also attaches dquot(s) to the given inode after locking it,
2395 * and returns the dquots corresponding to the uid and/or gid.
2397 * in : inode (unlocked)
2398 * out : udquot, gdquot with references taken and unlocked
2408 xfs_dquot_t
**O_udqpp
,
2409 xfs_dquot_t
**O_gdqpp
)
2412 xfs_dquot_t
*uq
, *gq
;
2415 if (!XFS_IS_QUOTA_ON(mp
))
2418 lockflags
= XFS_ILOCK_EXCL
;
2419 xfs_ilock(ip
, lockflags
);
2421 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
2422 gid
= ip
->i_d
.di_gid
;
2425 * Attach the dquot(s) to this inode, doing a dquot allocation
2426 * if necessary. The dquot(s) will not be locked.
2428 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
2429 if ((error
= xfs_qm_dqattach(ip
, XFS_QMOPT_DQALLOC
|
2430 XFS_QMOPT_ILOCKED
))) {
2431 xfs_iunlock(ip
, lockflags
);
2437 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
2438 if (ip
->i_d
.di_uid
!= uid
) {
2440 * What we need is the dquot that has this uid, and
2441 * if we send the inode to dqget, the uid of the inode
2442 * takes priority over what's sent in the uid argument.
2443 * We must unlock inode here before calling dqget if
2444 * we're not sending the inode, because otherwise
2445 * we'll deadlock by doing trans_reserve while
2448 xfs_iunlock(ip
, lockflags
);
2449 if ((error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
) uid
,
2454 ASSERT(error
!= ENOENT
);
2458 * Get the ilock in the right order.
2461 lockflags
= XFS_ILOCK_SHARED
;
2462 xfs_ilock(ip
, lockflags
);
2465 * Take an extra reference, because we'll return
2468 ASSERT(ip
->i_udquot
);
2475 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
2476 if (ip
->i_d
.di_gid
!= gid
) {
2477 xfs_iunlock(ip
, lockflags
);
2478 if ((error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)gid
,
2485 ASSERT(error
!= ENOENT
);
2489 lockflags
= XFS_ILOCK_SHARED
;
2490 xfs_ilock(ip
, lockflags
);
2492 ASSERT(ip
->i_gdquot
);
2498 } else if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
2499 if (ip
->i_d
.di_projid
!= prid
) {
2500 xfs_iunlock(ip
, lockflags
);
2501 if ((error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)prid
,
2508 ASSERT(error
!= ENOENT
);
2512 lockflags
= XFS_ILOCK_SHARED
;
2513 xfs_ilock(ip
, lockflags
);
2515 ASSERT(ip
->i_gdquot
);
2523 xfs_dqtrace_entry_ino(uq
, "DQALLOC", ip
);
2525 xfs_iunlock(ip
, lockflags
);
2538 * Actually transfer ownership, and do dquot modifications.
2539 * These were already reserved.
2545 xfs_dquot_t
**IO_olddq
,
2548 xfs_dquot_t
*prevdq
;
2549 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
2550 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
2552 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
2553 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
2558 ASSERT(prevdq
!= newdq
);
2560 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
2561 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
2563 /* the sparkling new dquot */
2564 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
2565 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
2568 * Take an extra reference, because the inode
2569 * is going to keep this dquot pointer even
2570 * after the trans_commit.
2574 xfs_dqunlock(newdq
);
2581 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
2584 xfs_qm_vop_chown_reserve(
2593 uint delblks
, blkflags
, prjflags
= 0;
2594 xfs_dquot_t
*unresudq
, *unresgdq
, *delblksudq
, *delblksgdq
;
2596 ASSERT(XFS_ISLOCKED_INODE(ip
));
2598 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
2600 delblks
= ip
->i_delayed_blks
;
2601 delblksudq
= delblksgdq
= unresudq
= unresgdq
= NULL
;
2602 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
2603 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
2605 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
2606 ip
->i_d
.di_uid
!= (uid_t
)be32_to_cpu(udqp
->q_core
.d_id
)) {
2609 * If there are delayed allocation blocks, then we have to
2610 * unreserve those from the old dquot, and add them to the
2614 ASSERT(ip
->i_udquot
);
2615 unresudq
= ip
->i_udquot
;
2618 if (XFS_IS_OQUOTA_ON(ip
->i_mount
) && gdqp
) {
2619 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) &&
2620 ip
->i_d
.di_projid
!= be32_to_cpu(gdqp
->q_core
.d_id
))
2621 prjflags
= XFS_QMOPT_ENOSPC
;
2624 (XFS_IS_GQUOTA_ON(ip
->i_mount
) &&
2625 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
))) {
2628 ASSERT(ip
->i_gdquot
);
2629 unresgdq
= ip
->i_gdquot
;
2634 if ((error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
2635 delblksudq
, delblksgdq
, ip
->i_d
.di_nblocks
, 1,
2636 flags
| blkflags
| prjflags
)))
2640 * Do the delayed blks reservations/unreservations now. Since, these
2641 * are done without the help of a transaction, if a reservation fails
2642 * its previous reservations won't be automatically undone by trans
2643 * code. So, we have to do it manually here.
2647 * Do the reservations first. Unreservation can't fail.
2649 ASSERT(delblksudq
|| delblksgdq
);
2650 ASSERT(unresudq
|| unresgdq
);
2651 if ((error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2652 delblksudq
, delblksgdq
, (xfs_qcnt_t
)delblks
, 0,
2653 flags
| blkflags
| prjflags
)))
2655 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2656 unresudq
, unresgdq
, -((xfs_qcnt_t
)delblks
), 0,
2664 xfs_qm_vop_rename_dqattach(
2665 xfs_inode_t
**i_tab
)
2673 if (! XFS_IS_QUOTA_ON(ip
->i_mount
))
2676 if (XFS_NOT_DQATTACHED(ip
->i_mount
, ip
)) {
2677 error
= xfs_qm_dqattach(ip
, 0);
2681 for (i
= 1; (i
< 4 && i_tab
[i
]); i
++) {
2683 * Watch out for duplicate entries in the table.
2685 if ((ip
= i_tab
[i
]) != i_tab
[i
-1]) {
2686 if (XFS_NOT_DQATTACHED(ip
->i_mount
, ip
)) {
2687 error
= xfs_qm_dqattach(ip
, 0);
2697 xfs_qm_vop_dqattach_and_dqmod_newinode(
2703 if (!XFS_IS_QUOTA_ON(tp
->t_mountp
))
2706 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
2707 ASSERT(XFS_IS_QUOTA_RUNNING(tp
->t_mountp
));
2713 ASSERT(ip
->i_udquot
== NULL
);
2714 ip
->i_udquot
= udqp
;
2715 ASSERT(XFS_IS_UQUOTA_ON(tp
->t_mountp
));
2716 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
2717 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2723 ASSERT(ip
->i_gdquot
== NULL
);
2724 ip
->i_gdquot
= gdqp
;
2725 ASSERT(XFS_IS_OQUOTA_ON(tp
->t_mountp
));
2726 ASSERT((XFS_IS_GQUOTA_ON(tp
->t_mountp
) ?
2727 ip
->i_d
.di_gid
: ip
->i_d
.di_projid
) ==
2728 be32_to_cpu(gdqp
->q_core
.d_id
));
2729 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2733 /* ------------- list stuff -----------------*/
2735 xfs_qm_freelist_init(xfs_frlist_t
*ql
)
2737 ql
->qh_next
= ql
->qh_prev
= (xfs_dquot_t
*) ql
;
2738 mutex_init(&ql
->qh_lock
);
2744 xfs_qm_freelist_destroy(xfs_frlist_t
*ql
)
2746 xfs_dquot_t
*dqp
, *nextdqp
;
2748 mutex_lock(&ql
->qh_lock
);
2749 for (dqp
= ql
->qh_next
;
2750 dqp
!= (xfs_dquot_t
*)ql
; ) {
2752 nextdqp
= dqp
->dq_flnext
;
2754 cmn_err(CE_DEBUG
, "FREELIST destroy 0x%p", dqp
);
2756 XQM_FREELIST_REMOVE(dqp
);
2758 xfs_qm_dqdestroy(dqp
);
2761 mutex_unlock(&ql
->qh_lock
);
2762 mutex_destroy(&ql
->qh_lock
);
2764 ASSERT(ql
->qh_nelems
== 0);
2768 xfs_qm_freelist_insert(xfs_frlist_t
*ql
, xfs_dquot_t
*dq
)
2770 dq
->dq_flnext
= ql
->qh_next
;
2771 dq
->dq_flprev
= (xfs_dquot_t
*)ql
;
2773 dq
->dq_flnext
->dq_flprev
= dq
;
2774 xfs_Gqm
->qm_dqfreelist
.qh_nelems
++;
2775 xfs_Gqm
->qm_dqfreelist
.qh_version
++;
2779 xfs_qm_freelist_unlink(xfs_dquot_t
*dq
)
2781 xfs_dquot_t
*next
= dq
->dq_flnext
;
2782 xfs_dquot_t
*prev
= dq
->dq_flprev
;
2784 next
->dq_flprev
= prev
;
2785 prev
->dq_flnext
= next
;
2786 dq
->dq_flnext
= dq
->dq_flprev
= dq
;
2787 xfs_Gqm
->qm_dqfreelist
.qh_nelems
--;
2788 xfs_Gqm
->qm_dqfreelist
.qh_version
++;
2792 xfs_qm_freelist_append(xfs_frlist_t
*ql
, xfs_dquot_t
*dq
)
2794 xfs_qm_freelist_insert((xfs_frlist_t
*)ql
->qh_prev
, dq
);
2798 xfs_qm_dqhashlock_nowait(
2803 locked
= mutex_trylock(&((dqp
)->q_hash
->qh_lock
));
2808 xfs_qm_freelist_lock_nowait(
2813 locked
= mutex_trylock(&(xqm
->qm_dqfreelist
.qh_lock
));
2818 xfs_qm_mplist_nowait(
2823 ASSERT(mp
->m_quotainfo
);
2824 locked
= mutex_trylock(&(XFS_QI_MPLLOCK(mp
)));