2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_itable.h"
42 #include "xfs_rtalloc.h"
43 #include "xfs_error.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_trans_space.h"
50 #include "xfs_utils.h"
54 * The global quota manager. There is only one of these for the entire
55 * system, _not_ one per file system. XQM keeps track of the overall
56 * quota functionality, including maintaining the freelist and hash
60 struct xfs_qm
*xfs_Gqm
;
63 kmem_zone_t
*qm_dqzone
;
64 kmem_zone_t
*qm_dqtrxzone
;
66 static cred_t xfs_zerocr
;
68 STATIC
void xfs_qm_list_init(xfs_dqlist_t
*, char *, int);
69 STATIC
void xfs_qm_list_destroy(xfs_dqlist_t
*);
71 STATIC
void xfs_qm_freelist_init(xfs_frlist_t
*);
72 STATIC
void xfs_qm_freelist_destroy(xfs_frlist_t
*);
73 STATIC
int xfs_qm_mplist_nowait(xfs_mount_t
*);
74 STATIC
int xfs_qm_dqhashlock_nowait(xfs_dquot_t
*);
76 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
77 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
78 STATIC
int xfs_qm_shake(int, gfp_t
);
80 static struct shrinker xfs_qm_shaker
= {
81 .shrink
= xfs_qm_shake
,
82 .seeks
= DEFAULT_SEEKS
,
86 extern mutex_t qcheck_lock
;
90 #define XQM_LIST_PRINT(l, NXT, title) \
92 xfs_dquot_t *dqp; int i = 0; \
93 cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
94 for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
95 cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
96 "bcnt = %d, icnt = %d, refs = %d", \
97 ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
98 DQFLAGTO_TYPESTR(dqp), \
99 (int) be64_to_cpu(dqp->q_core.d_bcount), \
100 (int) be64_to_cpu(dqp->q_core.d_icount), \
101 (int) dqp->q_nrefs); } \
104 #define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
108 * Initialize the XQM structure.
109 * Note that there is not one quota manager per file system.
111 STATIC
struct xfs_qm
*
114 xfs_dqhash_t
*udqhash
, *gdqhash
;
120 * Initialize the dquot hash tables.
122 udqhash
= kmem_zalloc_greedy(&hsize
,
123 XFS_QM_HASHSIZE_LOW
* sizeof(xfs_dqhash_t
),
124 XFS_QM_HASHSIZE_HIGH
* sizeof(xfs_dqhash_t
),
125 KM_SLEEP
| KM_MAYFAIL
| KM_LARGE
);
126 gdqhash
= kmem_zalloc(hsize
, KM_SLEEP
| KM_LARGE
);
127 hsize
/= sizeof(xfs_dqhash_t
);
130 xqm
= kmem_zalloc(sizeof(xfs_qm_t
), KM_SLEEP
);
131 xqm
->qm_dqhashmask
= hsize
- 1;
132 xqm
->qm_usr_dqhtable
= udqhash
;
133 xqm
->qm_grp_dqhtable
= gdqhash
;
134 ASSERT(xqm
->qm_usr_dqhtable
!= NULL
);
135 ASSERT(xqm
->qm_grp_dqhtable
!= NULL
);
137 for (i
= 0; i
< hsize
; i
++) {
138 xfs_qm_list_init(&(xqm
->qm_usr_dqhtable
[i
]), "uxdqh", i
);
139 xfs_qm_list_init(&(xqm
->qm_grp_dqhtable
[i
]), "gxdqh", i
);
143 * Freelist of all dquots of all file systems
145 xfs_qm_freelist_init(&(xqm
->qm_dqfreelist
));
148 * dquot zone. we register our own low-memory callback.
151 xqm
->qm_dqzone
= kmem_zone_init(sizeof(xfs_dquot_t
),
153 qm_dqzone
= xqm
->qm_dqzone
;
155 xqm
->qm_dqzone
= qm_dqzone
;
157 register_shrinker(&xfs_qm_shaker
);
160 * The t_dqinfo portion of transactions.
163 xqm
->qm_dqtrxzone
= kmem_zone_init(sizeof(xfs_dquot_acct_t
),
165 qm_dqtrxzone
= xqm
->qm_dqtrxzone
;
167 xqm
->qm_dqtrxzone
= qm_dqtrxzone
;
169 atomic_set(&xqm
->qm_totaldquots
, 0);
170 xqm
->qm_dqfree_ratio
= XFS_QM_DQFREE_RATIO
;
173 mutex_init(&qcheck_lock
);
179 * Destroy the global quota manager when its reference count goes to zero.
188 ASSERT(xqm
->qm_nrefs
== 0);
189 unregister_shrinker(&xfs_qm_shaker
);
190 hsize
= xqm
->qm_dqhashmask
+ 1;
191 for (i
= 0; i
< hsize
; i
++) {
192 xfs_qm_list_destroy(&(xqm
->qm_usr_dqhtable
[i
]));
193 xfs_qm_list_destroy(&(xqm
->qm_grp_dqhtable
[i
]));
195 kmem_free(xqm
->qm_usr_dqhtable
, hsize
* sizeof(xfs_dqhash_t
));
196 kmem_free(xqm
->qm_grp_dqhtable
, hsize
* sizeof(xfs_dqhash_t
));
197 xqm
->qm_usr_dqhtable
= NULL
;
198 xqm
->qm_grp_dqhtable
= NULL
;
199 xqm
->qm_dqhashmask
= 0;
200 xfs_qm_freelist_destroy(&(xqm
->qm_dqfreelist
));
202 mutex_destroy(&qcheck_lock
);
204 kmem_free(xqm
, sizeof(xfs_qm_t
));
208 * Called at mount time to let XQM know that another file system is
209 * starting quotas. This isn't crucial information as the individual mount
210 * structures are pretty independent, but it helps the XQM keep a
211 * global view of what's going on.
215 xfs_qm_hold_quotafs_ref(
216 struct xfs_mount
*mp
)
219 * Need to lock the xfs_Gqm structure for things like this. For example,
220 * the structure could disappear between the entry to this routine and
221 * a HOLD operation if not locked.
223 XFS_QM_LOCK(xfs_Gqm
);
226 xfs_Gqm
= xfs_Gqm_init();
228 * We can keep a list of all filesystems with quotas mounted for
229 * debugging and statistical purposes, but ...
230 * Just take a reference and get out.
232 XFS_QM_HOLD(xfs_Gqm
);
233 XFS_QM_UNLOCK(xfs_Gqm
);
240 * Release the reference that a filesystem took at mount time,
241 * so that we know when we need to destroy the entire quota manager.
245 xfs_qm_rele_quotafs_ref(
246 struct xfs_mount
*mp
)
248 xfs_dquot_t
*dqp
, *nextdqp
;
251 ASSERT(xfs_Gqm
->qm_nrefs
> 0);
254 * Go thru the freelist and destroy all inactive dquots.
256 xfs_qm_freelist_lock(xfs_Gqm
);
258 for (dqp
= xfs_Gqm
->qm_dqfreelist
.qh_next
;
259 dqp
!= (xfs_dquot_t
*)&(xfs_Gqm
->qm_dqfreelist
); ) {
261 nextdqp
= dqp
->dq_flnext
;
262 if (dqp
->dq_flags
& XFS_DQ_INACTIVE
) {
263 ASSERT(dqp
->q_mount
== NULL
);
264 ASSERT(! XFS_DQ_IS_DIRTY(dqp
));
265 ASSERT(dqp
->HL_PREVP
== NULL
);
266 ASSERT(dqp
->MPL_PREVP
== NULL
);
267 XQM_FREELIST_REMOVE(dqp
);
269 xfs_qm_dqdestroy(dqp
);
275 xfs_qm_freelist_unlock(xfs_Gqm
);
278 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
281 XFS_QM_LOCK(xfs_Gqm
);
282 XFS_QM_RELE(xfs_Gqm
);
283 if (xfs_Gqm
->qm_nrefs
== 0) {
284 xfs_qm_destroy(xfs_Gqm
);
287 XFS_QM_UNLOCK(xfs_Gqm
);
291 * Just destroy the quotainfo structure.
294 xfs_qm_unmount_quotadestroy(
298 xfs_qm_destroy_quotainfo(mp
);
303 * This is called from xfs_mountfs to start quotas and initialize all
304 * necessary data structures like quotainfo. This is also responsible for
305 * running a quotacheck as necessary. We are guaranteed that the superblock
306 * is consistently read in at this point.
319 * If quotas on realtime volumes is not supported, we disable
320 * quotas immediately.
322 if (mp
->m_sb
.sb_rextents
) {
324 "Cannot turn on quotas for realtime filesystem %s",
330 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
333 * Allocate the quotainfo structure inside the mount struct, and
334 * create quotainode(s), and change/rev superblock if necessary.
336 if ((error
= xfs_qm_init_quotainfo(mp
))) {
338 * We must turn off quotas.
340 ASSERT(mp
->m_quotainfo
== NULL
);
345 * If any of the quotas are not consistent, do a quotacheck.
347 if (XFS_QM_NEED_QUOTACHECK(mp
) &&
348 !(mfsi_flags
& XFS_MFSI_NO_QUOTACHECK
)) {
349 if ((error
= xfs_qm_quotacheck(mp
))) {
350 /* Quotacheck has failed and quotas have
353 return XFS_ERROR(error
);
357 * If one type of quotas is off, then it will lose its
358 * quotachecked status, since we won't be doing accounting for
361 if (!XFS_IS_UQUOTA_ON(mp
)) {
362 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
364 if (!(XFS_IS_GQUOTA_ON(mp
) || XFS_IS_PQUOTA_ON(mp
))) {
365 mp
->m_qflags
&= ~XFS_OQUOTA_CHKD
;
370 * We actually don't have to acquire the SB_LOCK at all.
371 * This can only be called from mount, and that's single threaded. XXX
374 sbf
= mp
->m_sb
.sb_qflags
;
375 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
376 XFS_SB_UNLOCK(mp
, s
);
378 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
379 if (xfs_qm_write_sb_changes(mp
, XFS_SB_QFLAGS
)) {
381 * We could only have been turning quotas off.
382 * We aren't in very good shape actually because
383 * the incore structures are convinced that quotas are
384 * off, but the on disk superblock doesn't know that !
386 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
387 xfs_fs_cmn_err(CE_ALERT
, mp
,
388 "XFS mount_quotas: Superblock update failed!");
393 xfs_fs_cmn_err(CE_WARN
, mp
,
394 "Failed to initialize disk quotas.");
396 return XFS_ERROR(error
);
400 * Called from the vfsops layer.
403 xfs_qm_unmount_quotas(
406 xfs_inode_t
*uqp
, *gqp
;
410 * Release the dquots that root inode, et al might be holding,
411 * before we flush quotas and blow away the quotainfo structure.
413 ASSERT(mp
->m_rootip
);
414 xfs_qm_dqdetach(mp
->m_rootip
);
416 xfs_qm_dqdetach(mp
->m_rbmip
);
418 xfs_qm_dqdetach(mp
->m_rsumip
);
421 * Flush out the quota inodes.
424 if (mp
->m_quotainfo
) {
425 if ((uqp
= mp
->m_quotainfo
->qi_uquotaip
) != NULL
) {
426 xfs_ilock(uqp
, XFS_ILOCK_EXCL
);
428 error
= xfs_iflush(uqp
, XFS_IFLUSH_SYNC
);
429 xfs_iunlock(uqp
, XFS_ILOCK_EXCL
);
430 if (unlikely(error
== EFSCORRUPTED
)) {
431 XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
432 XFS_ERRLEVEL_LOW
, mp
);
436 if ((gqp
= mp
->m_quotainfo
->qi_gquotaip
) != NULL
) {
437 xfs_ilock(gqp
, XFS_ILOCK_EXCL
);
439 error
= xfs_iflush(gqp
, XFS_IFLUSH_SYNC
);
440 xfs_iunlock(gqp
, XFS_ILOCK_EXCL
);
441 if (unlikely(error
== EFSCORRUPTED
)) {
442 XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
443 XFS_ERRLEVEL_LOW
, mp
);
449 XFS_PURGE_INODE(uqp
);
450 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
453 XFS_PURGE_INODE(gqp
);
454 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
457 return XFS_ERROR(error
);
461 * Flush all dquots of the given file system to disk. The dquots are
462 * _not_ purged from memory here, just their data written to disk.
474 if (mp
->m_quotainfo
== NULL
)
478 xfs_qm_mplist_lock(mp
);
479 FOREACH_DQUOT_IN_MP(dqp
, mp
) {
481 if (! XFS_DQ_IS_DIRTY(dqp
)) {
485 xfs_dqtrace_entry(dqp
, "FLUSHALL: DQDIRTY");
486 /* XXX a sentinel would be better */
487 recl
= XFS_QI_MPLRECLAIMS(mp
);
488 if (! xfs_qm_dqflock_nowait(dqp
)) {
490 * If we can't grab the flush lock then check
491 * to see if the dquot has been flushed delayed
492 * write. If so, grab its buffer and send it
493 * out immediately. We'll be able to acquire
494 * the flush lock when the I/O completes.
496 xfs_qm_dqflock_pushbuf_wait(dqp
);
499 * Let go of the mplist lock. We don't want to hold it
500 * across a disk write.
502 xfs_qm_mplist_unlock(mp
);
503 error
= xfs_qm_dqflush(dqp
, flags
);
508 xfs_qm_mplist_lock(mp
);
509 if (recl
!= XFS_QI_MPLRECLAIMS(mp
)) {
510 xfs_qm_mplist_unlock(mp
);
511 /* XXX restart limit */
516 xfs_qm_mplist_unlock(mp
);
521 * Release the group dquot pointers the user dquots may be
522 * carrying around as a hint. mplist is locked on entry and exit.
525 xfs_qm_detach_gdquots(
528 xfs_dquot_t
*dqp
, *gdqp
;
532 ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp
));
533 dqp
= XFS_QI_MPLNEXT(mp
);
536 if ((gdqp
= dqp
->q_gdquot
)) {
538 dqp
->q_gdquot
= NULL
;
544 * Can't hold the mplist lock across a dqput.
545 * XXXmust convert to marker based iterations here.
547 nrecl
= XFS_QI_MPLRECLAIMS(mp
);
548 xfs_qm_mplist_unlock(mp
);
551 xfs_qm_mplist_lock(mp
);
552 if (nrecl
!= XFS_QI_MPLRECLAIMS(mp
))
560 * Go through all the incore dquots of this file system and take them
561 * off the mplist and hashlist, if the dquot type matches the dqtype
562 * parameter. This is used when turning off quota accounting for
563 * users and/or groups, as well as when the filesystem is unmounting.
568 uint flags
) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
573 xfs_dquot_t
*nextdqp
;
576 if (mp
->m_quotainfo
== NULL
)
579 dqtype
= (flags
& XFS_QMOPT_UQUOTA
) ? XFS_DQ_USER
: 0;
580 dqtype
|= (flags
& XFS_QMOPT_PQUOTA
) ? XFS_DQ_PROJ
: 0;
581 dqtype
|= (flags
& XFS_QMOPT_GQUOTA
) ? XFS_DQ_GROUP
: 0;
583 xfs_qm_mplist_lock(mp
);
586 * In the first pass through all incore dquots of this filesystem,
587 * we release the group dquot pointers the user dquots may be
588 * carrying around as a hint. We need to do this irrespective of
589 * what's being turned off.
591 xfs_qm_detach_gdquots(mp
);
595 ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp
));
597 * Try to get rid of all of the unwanted dquots. The idea is to
598 * get them off mplist and hashlist, but leave them on freelist.
600 dqp
= XFS_QI_MPLNEXT(mp
);
603 * It's OK to look at the type without taking dqlock here.
604 * We're holding the mplist lock here, and that's needed for
607 if ((dqp
->dq_flags
& dqtype
) == 0) {
612 if (! xfs_qm_dqhashlock_nowait(dqp
)) {
613 nrecl
= XFS_QI_MPLRECLAIMS(mp
);
614 xfs_qm_mplist_unlock(mp
);
615 XFS_DQ_HASH_LOCK(dqp
->q_hash
);
616 xfs_qm_mplist_lock(mp
);
619 * XXXTheoretically, we can get into a very long
620 * ping pong game here.
621 * No one can be adding dquots to the mplist at
622 * this point, but somebody might be taking things off.
624 if (nrecl
!= XFS_QI_MPLRECLAIMS(mp
)) {
625 XFS_DQ_HASH_UNLOCK(dqp
->q_hash
);
631 * Take the dquot off the mplist and hashlist. It may remain on
632 * freelist in INACTIVE state.
634 nextdqp
= dqp
->MPL_NEXT
;
635 nmisses
+= xfs_qm_dqpurge(dqp
, flags
);
638 xfs_qm_mplist_unlock(mp
);
650 * Purge the dquot cache.
651 * None of the dquots should really be busy at this point.
653 if (mp
->m_quotainfo
) {
654 while ((ndquots
= xfs_qm_dqpurge_int(mp
, flags
))) {
668 xfs_dquot_t
*udqhint
, /* hint */
669 xfs_dquot_t
**IO_idqpp
)
674 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
677 * See if we already have it in the inode itself. IO_idqpp is
678 * &i_udquot or &i_gdquot. This made the code look weird, but
679 * made the logic a lot simpler.
681 if ((dqp
= *IO_idqpp
)) {
684 xfs_dqtrace_entry(dqp
, "DQATTACH: found in ip");
689 * udqhint is the i_udquot field in inode, and is non-NULL only
690 * when the type arg is group/project. Its purpose is to save a
691 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
694 ASSERT(!udqhint
|| type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
695 if (udqhint
&& !dolock
)
699 * No need to take dqlock to look at the id.
700 * The ID can't change until it gets reclaimed, and it won't
701 * be reclaimed as long as we have a ref from inode and we hold
705 (dqp
= udqhint
->q_gdquot
) &&
706 (be32_to_cpu(dqp
->q_core
.d_id
) == id
)) {
707 ASSERT(XFS_DQ_IS_LOCKED(udqhint
));
710 ASSERT(*IO_idqpp
== NULL
);
714 xfs_dqunlock(udqhint
);
719 * We can't hold a dquot lock when we call the dqget code.
720 * We'll deadlock in no time, because of (not conforming to)
721 * lock ordering - the inodelock comes before any dquot lock,
722 * and we may drop and reacquire the ilock in xfs_qm_dqget().
725 xfs_dqunlock(udqhint
);
727 * Find the dquot from somewhere. This bumps the
728 * reference count of dquot and returns it locked.
729 * This can return ENOENT if dquot didn't exist on
730 * disk and we didn't ask it to allocate;
731 * ESRCH if quotas got turned off suddenly.
733 if ((error
= xfs_qm_dqget(ip
->i_mount
, ip
, id
, type
,
734 doalloc
|XFS_QMOPT_DOWARN
, &dqp
))) {
735 if (udqhint
&& dolock
)
740 xfs_dqtrace_entry(dqp
, "DQATTACH: found by dqget");
742 * dqget may have dropped and re-acquired the ilock, but it guarantees
743 * that the dquot returned is the one that should go in the inode.
747 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
757 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
758 if (! xfs_qm_dqlock_nowait(udqhint
)) {
767 ASSERT(XFS_DQ_IS_LOCKED(udqhint
));
771 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
779 * Given a udquot and gdquot, attach a ptr to the group dquot in the
780 * udquot as a hint for future lookups. The idea sounds simple, but the
781 * execution isn't, because the udquot might have a group dquot attached
782 * already and getting rid of that gets us into lock ordering constraints.
783 * The process is complicated more by the fact that the dquots may or may not
784 * be locked on entry.
787 xfs_qm_dqattach_grouphint(
796 ASSERT(XFS_DQ_IS_LOCKED(udq
));
797 ASSERT(XFS_DQ_IS_LOCKED(gdq
));
803 if ((tmp
= udq
->q_gdquot
)) {
810 udq
->q_gdquot
= NULL
;
812 * We can't keep any dqlocks when calling dqrele,
813 * because the freelist lock comes before dqlocks.
819 * we took a hard reference once upon a time in dqget,
820 * so give it back when the udquot no longer points at it
821 * dqput() does the unlocking of the dquot.
829 ASSERT(XFS_DQ_IS_LOCKED(udq
));
835 ASSERT(XFS_DQ_IS_LOCKED(udq
));
836 ASSERT(XFS_DQ_IS_LOCKED(gdq
));
838 * Somebody could have attached a gdquot here,
839 * when we dropped the uqlock. If so, just do nothing.
841 if (udq
->q_gdquot
== NULL
) {
853 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
855 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
856 * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
857 * much made this code a complete mess, but it has been pretty useful.
858 * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
859 * Inode may get unlocked and relocked in here, and the caller must deal with
867 xfs_mount_t
*mp
= ip
->i_mount
;
871 if ((! XFS_IS_QUOTA_ON(mp
)) ||
872 (! XFS_NOT_DQATTACHED(mp
, ip
)) ||
873 (ip
->i_ino
== mp
->m_sb
.sb_uquotino
) ||
874 (ip
->i_ino
== mp
->m_sb
.sb_gquotino
))
877 ASSERT((flags
& XFS_QMOPT_ILOCKED
) == 0 ||
878 XFS_ISLOCKED_INODE_EXCL(ip
));
880 if (! (flags
& XFS_QMOPT_ILOCKED
))
881 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
883 if (XFS_IS_UQUOTA_ON(mp
)) {
884 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
885 flags
& XFS_QMOPT_DQALLOC
,
886 flags
& XFS_QMOPT_DQLOCK
,
887 NULL
, &ip
->i_udquot
);
892 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
893 if (XFS_IS_OQUOTA_ON(mp
)) {
894 error
= XFS_IS_GQUOTA_ON(mp
) ?
895 xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
896 flags
& XFS_QMOPT_DQALLOC
,
897 flags
& XFS_QMOPT_DQLOCK
,
898 ip
->i_udquot
, &ip
->i_gdquot
) :
899 xfs_qm_dqattach_one(ip
, ip
->i_d
.di_projid
, XFS_DQ_PROJ
,
900 flags
& XFS_QMOPT_DQALLOC
,
901 flags
& XFS_QMOPT_DQLOCK
,
902 ip
->i_udquot
, &ip
->i_gdquot
);
904 * Don't worry about the udquot that we may have
905 * attached above. It'll get detached, if not already.
913 * Attach this group quota to the user quota as a hint.
914 * This WON'T, in general, result in a thrash.
917 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
918 ASSERT(ip
->i_udquot
);
919 ASSERT(ip
->i_gdquot
);
922 * We may or may not have the i_udquot locked at this point,
923 * but this check is OK since we don't depend on the i_gdquot to
924 * be accurate 100% all the time. It is just a hint, and this
925 * will succeed in general.
927 if (ip
->i_udquot
->q_gdquot
== ip
->i_gdquot
)
930 * Attach i_gdquot to the gdquot hint inside the i_udquot.
932 xfs_qm_dqattach_grouphint(ip
->i_udquot
, ip
->i_gdquot
,
933 flags
& XFS_QMOPT_DQLOCK
);
941 if (flags
& XFS_QMOPT_DQLOCK
)
942 ASSERT(XFS_DQ_IS_LOCKED(ip
->i_udquot
));
945 if (flags
& XFS_QMOPT_DQLOCK
)
946 ASSERT(XFS_DQ_IS_LOCKED(ip
->i_gdquot
));
948 if (XFS_IS_UQUOTA_ON(mp
))
949 ASSERT(ip
->i_udquot
);
950 if (XFS_IS_OQUOTA_ON(mp
))
951 ASSERT(ip
->i_gdquot
);
955 if (! (flags
& XFS_QMOPT_ILOCKED
))
956 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
960 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
966 * Release dquots (and their references) if any.
967 * The inode should be locked EXCL except when this's called by
974 if (!(ip
->i_udquot
|| ip
->i_gdquot
))
977 ASSERT(ip
->i_ino
!= ip
->i_mount
->m_sb
.sb_uquotino
);
978 ASSERT(ip
->i_ino
!= ip
->i_mount
->m_sb
.sb_gquotino
);
980 xfs_dqtrace_entry_ino(ip
->i_udquot
, "DQDETTACH", ip
);
981 xfs_qm_dqrele(ip
->i_udquot
);
985 xfs_dqtrace_entry_ino(ip
->i_gdquot
, "DQDETTACH", ip
);
986 xfs_qm_dqrele(ip
->i_gdquot
);
992 * This is called by VFS_SYNC and flags arg determines the caller,
993 * and its motives, as done in xfs_sync.
995 * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
996 * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
997 * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
1011 if (! XFS_IS_QUOTA_ON(mp
))
1016 * We won't block unless we are asked to.
1018 nowait
= (boolean_t
)(flags
& SYNC_BDFLUSH
|| (flags
& SYNC_WAIT
) == 0);
1021 xfs_qm_mplist_lock(mp
);
1023 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
1024 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
1025 * when we have the mplist lock, we know that dquots will be consistent
1026 * as long as we have it locked.
1028 if (! XFS_IS_QUOTA_ON(mp
)) {
1029 xfs_qm_mplist_unlock(mp
);
1032 FOREACH_DQUOT_IN_MP(dqp
, mp
) {
1034 * If this is vfs_sync calling, then skip the dquots that
1035 * don't 'seem' to be dirty. ie. don't acquire dqlock.
1036 * This is very similar to what xfs_sync does with inodes.
1038 if (flags
& SYNC_BDFLUSH
) {
1039 if (! XFS_DQ_IS_DIRTY(dqp
))
1045 * Try to acquire the dquot lock. We are NOT out of
1046 * lock order, but we just don't want to wait for this
1047 * lock, unless somebody wanted us to.
1049 if (! xfs_qm_dqlock_nowait(dqp
))
1056 * Now, find out for sure if this dquot is dirty or not.
1058 if (! XFS_DQ_IS_DIRTY(dqp
)) {
1063 /* XXX a sentinel would be better */
1064 recl
= XFS_QI_MPLRECLAIMS(mp
);
1065 if (! xfs_qm_dqflock_nowait(dqp
)) {
1071 * If we can't grab the flush lock then if the caller
1072 * really wanted us to give this our best shot, so
1073 * see if we can give a push to the buffer before we wait
1074 * on the flush lock. At this point, we know that
1075 * even though the dquot is being flushed,
1076 * it has (new) dirty data.
1078 xfs_qm_dqflock_pushbuf_wait(dqp
);
1081 * Let go of the mplist lock. We don't want to hold it
1082 * across a disk write
1084 flush_flags
= (nowait
) ? XFS_QMOPT_DELWRI
: XFS_QMOPT_SYNC
;
1085 xfs_qm_mplist_unlock(mp
);
1086 xfs_dqtrace_entry(dqp
, "XQM_SYNC: DQFLUSH");
1087 error
= xfs_qm_dqflush(dqp
, flush_flags
);
1089 if (error
&& XFS_FORCED_SHUTDOWN(mp
))
1090 return 0; /* Need to prevent umount failure */
1094 xfs_qm_mplist_lock(mp
);
1095 if (recl
!= XFS_QI_MPLRECLAIMS(mp
)) {
1096 if (++restarts
>= XFS_QM_SYNC_MAX_RESTARTS
)
1099 xfs_qm_mplist_unlock(mp
);
1104 xfs_qm_mplist_unlock(mp
);
1110 * This initializes all the quota information that's kept in the
1114 xfs_qm_init_quotainfo(
1117 xfs_quotainfo_t
*qinf
;
1121 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1124 * Tell XQM that we exist as soon as possible.
1126 if ((error
= xfs_qm_hold_quotafs_ref(mp
))) {
1130 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
1133 * See if quotainodes are setup, and if not, allocate them,
1134 * and change the superblock accordingly.
1136 if ((error
= xfs_qm_init_quotainos(mp
))) {
1137 kmem_free(qinf
, sizeof(xfs_quotainfo_t
));
1138 mp
->m_quotainfo
= NULL
;
1142 spinlock_init(&qinf
->qi_pinlock
, "xfs_qinf_pin");
1143 xfs_qm_list_init(&qinf
->qi_dqlist
, "mpdqlist", 0);
1144 qinf
->qi_dqreclaims
= 0;
1146 /* mutex used to serialize quotaoffs */
1147 mutex_init(&qinf
->qi_quotaofflock
);
1149 /* Precalc some constants */
1150 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
1151 ASSERT(qinf
->qi_dqchunklen
);
1152 qinf
->qi_dqperchunk
= BBTOB(qinf
->qi_dqchunklen
);
1153 do_div(qinf
->qi_dqperchunk
, sizeof(xfs_dqblk_t
));
1155 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
1158 * We try to get the limits from the superuser's limits fields.
1159 * This is quite hacky, but it is standard quota practice.
1160 * We look at the USR dquot with id == 0 first, but if user quotas
1161 * are not enabled we goto the GRP dquot with id == 0.
1162 * We don't really care to keep separate default limits for user
1163 * and group quotas, at least not at this point.
1165 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)0,
1166 XFS_IS_UQUOTA_RUNNING(mp
) ? XFS_DQ_USER
:
1167 (XFS_IS_GQUOTA_RUNNING(mp
) ? XFS_DQ_GROUP
:
1169 XFS_QMOPT_DQSUSER
|XFS_QMOPT_DOWARN
,
1172 xfs_disk_dquot_t
*ddqp
= &dqp
->q_core
;
1175 * The warnings and timers set the grace period given to
1176 * a user or group before he or she can not perform any
1177 * more writing. If it is zero, a default is used.
1179 qinf
->qi_btimelimit
= ddqp
->d_btimer
?
1180 be32_to_cpu(ddqp
->d_btimer
) : XFS_QM_BTIMELIMIT
;
1181 qinf
->qi_itimelimit
= ddqp
->d_itimer
?
1182 be32_to_cpu(ddqp
->d_itimer
) : XFS_QM_ITIMELIMIT
;
1183 qinf
->qi_rtbtimelimit
= ddqp
->d_rtbtimer
?
1184 be32_to_cpu(ddqp
->d_rtbtimer
) : XFS_QM_RTBTIMELIMIT
;
1185 qinf
->qi_bwarnlimit
= ddqp
->d_bwarns
?
1186 be16_to_cpu(ddqp
->d_bwarns
) : XFS_QM_BWARNLIMIT
;
1187 qinf
->qi_iwarnlimit
= ddqp
->d_iwarns
?
1188 be16_to_cpu(ddqp
->d_iwarns
) : XFS_QM_IWARNLIMIT
;
1189 qinf
->qi_rtbwarnlimit
= ddqp
->d_rtbwarns
?
1190 be16_to_cpu(ddqp
->d_rtbwarns
) : XFS_QM_RTBWARNLIMIT
;
1191 qinf
->qi_bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
1192 qinf
->qi_bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
1193 qinf
->qi_ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
1194 qinf
->qi_isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
1195 qinf
->qi_rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
1196 qinf
->qi_rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
1199 * We sent the XFS_QMOPT_DQSUSER flag to dqget because
1200 * we don't want this dquot cached. We haven't done a
1201 * quotacheck yet, and quotacheck doesn't like incore dquots.
1203 xfs_qm_dqdestroy(dqp
);
1205 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
1206 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
1207 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
1208 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
1209 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
1210 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
1218 * Gets called when unmounting a filesystem or when all quotas get
1220 * This purges the quota inodes, destroys locks and frees itself.
1223 xfs_qm_destroy_quotainfo(
1226 xfs_quotainfo_t
*qi
;
1228 qi
= mp
->m_quotainfo
;
1230 ASSERT(xfs_Gqm
!= NULL
);
1233 * Release the reference that XQM kept, so that we know
1234 * when the XQM structure should be freed. We cannot assume
1235 * that xfs_Gqm is non-null after this point.
1237 xfs_qm_rele_quotafs_ref(mp
);
1239 spinlock_destroy(&qi
->qi_pinlock
);
1240 xfs_qm_list_destroy(&qi
->qi_dqlist
);
1242 if (qi
->qi_uquotaip
) {
1243 XFS_PURGE_INODE(qi
->qi_uquotaip
);
1244 qi
->qi_uquotaip
= NULL
; /* paranoia */
1246 if (qi
->qi_gquotaip
) {
1247 XFS_PURGE_INODE(qi
->qi_gquotaip
);
1248 qi
->qi_gquotaip
= NULL
;
1250 mutex_destroy(&qi
->qi_quotaofflock
);
1251 kmem_free(qi
, sizeof(xfs_quotainfo_t
));
1252 mp
->m_quotainfo
= NULL
;
1257 /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
1266 mutex_init(&list
->qh_lock
);
1267 list
->qh_next
= NULL
;
1268 list
->qh_version
= 0;
1269 list
->qh_nelems
= 0;
1273 xfs_qm_list_destroy(
1276 mutex_destroy(&(list
->qh_lock
));
1281 * Stripped down version of dqattach. This doesn't attach, or even look at the
1282 * dquots attached to the inode. The rationale is that there won't be any
1283 * attached at the time this is called from quotacheck.
1286 xfs_qm_dqget_noattach(
1288 xfs_dquot_t
**O_udqpp
,
1289 xfs_dquot_t
**O_gdqpp
)
1293 xfs_dquot_t
*udqp
, *gdqp
;
1295 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
1300 if (XFS_IS_UQUOTA_ON(mp
)) {
1301 ASSERT(ip
->i_udquot
== NULL
);
1303 * We want the dquot allocated if it doesn't exist.
1305 if ((error
= xfs_qm_dqget(mp
, ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
1306 XFS_QMOPT_DQALLOC
| XFS_QMOPT_DOWARN
,
1309 * Shouldn't be able to turn off quotas here.
1311 ASSERT(error
!= ESRCH
);
1312 ASSERT(error
!= ENOENT
);
1318 if (XFS_IS_OQUOTA_ON(mp
)) {
1319 ASSERT(ip
->i_gdquot
== NULL
);
1322 error
= XFS_IS_GQUOTA_ON(mp
) ?
1323 xfs_qm_dqget(mp
, ip
,
1324 ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
1325 XFS_QMOPT_DQALLOC
|XFS_QMOPT_DOWARN
,
1327 xfs_qm_dqget(mp
, ip
,
1328 ip
->i_d
.di_projid
, XFS_DQ_PROJ
,
1329 XFS_QMOPT_DQALLOC
|XFS_QMOPT_DOWARN
,
1333 xfs_qm_dqrele(udqp
);
1334 ASSERT(error
!= ESRCH
);
1335 ASSERT(error
!= ENOENT
);
1340 /* Reacquire the locks in the right order */
1342 if (! xfs_qm_dqlock_nowait(udqp
)) {
1354 if (udqp
) ASSERT(XFS_DQ_IS_LOCKED(udqp
));
1355 if (gdqp
) ASSERT(XFS_DQ_IS_LOCKED(gdqp
));
1361 * Create an inode and return with a reference already taken, but unlocked
1362 * This is how we create quota inodes
1376 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QINOCREATE
);
1377 if ((error
= xfs_trans_reserve(tp
,
1378 XFS_QM_QINOCREATE_SPACE_RES(mp
),
1379 XFS_CREATE_LOG_RES(mp
), 0,
1380 XFS_TRANS_PERM_LOG_RES
,
1381 XFS_CREATE_LOG_COUNT
))) {
1382 xfs_trans_cancel(tp
, 0);
1386 if ((error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0,
1387 &xfs_zerocr
, 0, 1, ip
, &committed
))) {
1388 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|
1394 * Keep an extra reference to this quota inode. This inode is
1395 * locked exclusively and joined to the transaction already.
1397 ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip
));
1398 VN_HOLD(XFS_ITOV((*ip
)));
1401 * Make the changes in the superblock, and log those too.
1402 * sbfields arg may contain fields other than *QUOTINO;
1403 * VERSIONNUM for example.
1405 s
= XFS_SB_LOCK(mp
);
1406 if (flags
& XFS_QMOPT_SBVERSION
) {
1407 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1408 unsigned oldv
= mp
->m_sb
.sb_versionnum
;
1410 ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp
->m_sb
));
1411 ASSERT((sbfields
& (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1412 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
)) ==
1413 (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1414 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
));
1416 XFS_SB_VERSION_ADDQUOTA(&mp
->m_sb
);
1417 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
1418 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
1420 /* qflags will get updated _after_ quotacheck */
1421 mp
->m_sb
.sb_qflags
= 0;
1422 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1424 "Old superblock version %x, converting to %x.",
1425 oldv
, mp
->m_sb
.sb_versionnum
);
1428 if (flags
& XFS_QMOPT_UQUOTA
)
1429 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
1431 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
1432 XFS_SB_UNLOCK(mp
, s
);
1433 xfs_mod_sb(tp
, sbfields
);
1435 if ((error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
))) {
1436 xfs_fs_cmn_err(CE_ALERT
, mp
, "XFS qino_alloc failed!");
1444 xfs_qm_reset_dqcounts(
1450 xfs_disk_dquot_t
*ddq
;
1453 xfs_buftrace("RESET DQUOTS", bp
);
1455 * Reset all counters and timers. They'll be
1456 * started afresh by xfs_qm_quotacheck.
1459 j
= XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
1460 do_div(j
, sizeof(xfs_dqblk_t
));
1461 ASSERT(XFS_QM_DQPERBLK(mp
) == j
);
1463 ddq
= (xfs_disk_dquot_t
*)XFS_BUF_PTR(bp
);
1464 for (j
= 0; j
< XFS_QM_DQPERBLK(mp
); j
++) {
1466 * Do a sanity check, and if needed, repair the dqblk. Don't
1467 * output any warnings because it's perfectly possible to
1468 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1470 (void) xfs_qm_dqcheck(ddq
, id
+j
, type
, XFS_QMOPT_DQREPAIR
,
1474 ddq
->d_rtbcount
= 0;
1477 ddq
->d_rtbtimer
= 0;
1480 ddq
->d_rtbwarns
= 0;
1481 ddq
= (xfs_disk_dquot_t
*) ((xfs_dqblk_t
*)ddq
+ 1);
1492 xfs_filblks_t blkcnt
,
1503 incr
= (blkcnt
> XFS_QM_MAX_DQCLUSTER_LOGSZ
) ?
1504 XFS_QM_MAX_DQCLUSTER_LOGSZ
: blkcnt
;
1505 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
1506 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
1510 * Blkcnt arg can be a very big number, and might even be
1511 * larger than the log itself. So, we have to break it up into
1512 * manageable-sized transactions.
1513 * Note that we don't start a permanent transaction here; we might
1514 * not be able to get a log reservation for the whole thing up front,
1515 * and we don't really care to either, because we just discard
1516 * everything if we were to crash in the middle of this loop.
1519 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
1520 XFS_FSB_TO_DADDR(mp
, bno
),
1521 (int)XFS_QI_DQCHUNKLEN(mp
), 0, &bp
);
1525 (void) xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
1526 xfs_bdwrite(mp
, bp
);
1528 * goto the next block.
1531 firstid
+= XFS_QM_DQPERBLK(mp
);
1537 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1538 * caller supplied function for every chunk of dquots that we find.
1546 xfs_bmbt_irec_t
*map
;
1547 int i
, nmaps
; /* number of map entries */
1548 int error
; /* return value */
1549 xfs_fileoff_t lblkno
;
1550 xfs_filblks_t maxlblkcnt
;
1552 xfs_fsblock_t rablkno
;
1553 xfs_filblks_t rablkcnt
;
1557 * This looks racy, but we can't keep an inode lock across a
1558 * trans_reserve. But, this gets called during quotacheck, and that
1559 * happens only at mount time which is single threaded.
1561 if (qip
->i_d
.di_nblocks
== 0)
1564 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
1567 maxlblkcnt
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_MAXIOFFSET(mp
));
1569 nmaps
= XFS_DQITER_MAP_SIZE
;
1571 * We aren't changing the inode itself. Just changing
1572 * some of its data. No new blocks are added here, and
1573 * the inode is never added to the transaction.
1575 xfs_ilock(qip
, XFS_ILOCK_SHARED
);
1576 error
= xfs_bmapi(NULL
, qip
, lblkno
,
1577 maxlblkcnt
- lblkno
,
1580 0, map
, &nmaps
, NULL
, NULL
);
1581 xfs_iunlock(qip
, XFS_ILOCK_SHARED
);
1585 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
1586 for (i
= 0; i
< nmaps
; i
++) {
1587 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1588 ASSERT(map
[i
].br_blockcount
);
1591 lblkno
+= map
[i
].br_blockcount
;
1593 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1596 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1597 XFS_QM_DQPERBLK(mp
);
1599 * Do a read-ahead on the next extent.
1601 if ((i
+1 < nmaps
) &&
1602 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1603 rablkcnt
= map
[i
+1].br_blockcount
;
1604 rablkno
= map
[i
+1].br_startblock
;
1605 while (rablkcnt
--) {
1606 xfs_baread(mp
->m_ddev_targp
,
1607 XFS_FSB_TO_DADDR(mp
, rablkno
),
1608 (int)XFS_QI_DQCHUNKLEN(mp
));
1613 * Iterate thru all the blks in the extent and
1614 * reset the counters of all the dquots inside them.
1616 if ((error
= xfs_qm_dqiter_bufs(mp
,
1618 map
[i
].br_startblock
,
1619 map
[i
].br_blockcount
,
1627 } while (nmaps
> 0);
1629 kmem_free(map
, XFS_DQITER_MAP_SIZE
* sizeof(*map
));
1635 * Called by dqusage_adjust in doing a quotacheck.
1636 * Given the inode, and a dquot (either USR or GRP, doesn't matter),
1637 * this updates its incore copy as well as the buffer copy. This is
1638 * so that once the quotacheck is done, we can just log all the buffers,
1639 * as opposed to logging numerous updates to individual dquots.
1642 xfs_qm_quotacheck_dqadjust(
1647 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
1648 xfs_dqtrace_entry(dqp
, "QCHECK DQADJUST");
1650 * Adjust the inode count and the block count to reflect this inode's
1653 be64_add(&dqp
->q_core
.d_icount
, 1);
1654 dqp
->q_res_icount
++;
1656 be64_add(&dqp
->q_core
.d_bcount
, nblks
);
1657 dqp
->q_res_bcount
+= nblks
;
1660 be64_add(&dqp
->q_core
.d_rtbcount
, rtblks
);
1661 dqp
->q_res_rtbcount
+= rtblks
;
1665 * Set default limits, adjust timers (since we changed usages)
1667 if (! XFS_IS_SUSER_DQUOT(dqp
)) {
1668 xfs_qm_adjust_dqlimits(dqp
->q_mount
, &dqp
->q_core
);
1669 xfs_qm_adjust_dqtimers(dqp
->q_mount
, &dqp
->q_core
);
1672 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1678 xfs_qcnt_t
*O_rtblks
)
1680 xfs_filblks_t rtblks
; /* total rt blks */
1681 xfs_extnum_t idx
; /* extent record index */
1682 xfs_ifork_t
*ifp
; /* inode fork pointer */
1683 xfs_extnum_t nextents
; /* number of extent entries */
1686 ASSERT(XFS_IS_REALTIME_INODE(ip
));
1687 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1688 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1689 if ((error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
)))
1693 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
1694 for (idx
= 0; idx
< nextents
; idx
++)
1695 rtblks
+= xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp
, idx
));
1696 *O_rtblks
= (xfs_qcnt_t
)rtblks
;
1701 * callback routine supplied to bulkstat(). Given an inumber, find its
1702 * dquots and update them to account for resources taken by that inode.
1706 xfs_qm_dqusage_adjust(
1707 xfs_mount_t
*mp
, /* mount point for filesystem */
1708 xfs_ino_t ino
, /* inode number to get data for */
1709 void __user
*buffer
, /* not used */
1710 int ubsize
, /* not used */
1711 void *private_data
, /* not used */
1712 xfs_daddr_t bno
, /* starting block of inode cluster */
1713 int *ubused
, /* not used */
1714 void *dip
, /* on-disk inode pointer (not used) */
1715 int *res
) /* result code value */
1718 xfs_dquot_t
*udqp
, *gdqp
;
1719 xfs_qcnt_t nblks
, rtblks
;
1722 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1725 * rootino must have its resources accounted for, not so with the quota
1728 if (ino
== mp
->m_sb
.sb_uquotino
|| ino
== mp
->m_sb
.sb_gquotino
) {
1729 *res
= BULKSTAT_RV_NOTHING
;
1730 return XFS_ERROR(EINVAL
);
1734 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1735 * interface expects the inode to be exclusively locked because that's
1736 * the case in all other instances. It's OK that we do this because
1737 * quotacheck is done only at mount time.
1739 if ((error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_EXCL
, &ip
, bno
))) {
1740 *res
= BULKSTAT_RV_NOTHING
;
1744 if (ip
->i_d
.di_mode
== 0) {
1745 xfs_iput_new(ip
, XFS_ILOCK_EXCL
);
1746 *res
= BULKSTAT_RV_NOTHING
;
1747 return XFS_ERROR(ENOENT
);
1751 * Obtain the locked dquots. In case of an error (eg. allocation
1752 * fails for ENOSPC), we return the negative of the error number
1753 * to bulkstat, so that it can get propagated to quotacheck() and
1754 * making us disable quotas for the file system.
1756 if ((error
= xfs_qm_dqget_noattach(ip
, &udqp
, &gdqp
))) {
1757 xfs_iput(ip
, XFS_ILOCK_EXCL
);
1758 *res
= BULKSTAT_RV_GIVEUP
;
1763 if (! XFS_IS_REALTIME_INODE(ip
)) {
1764 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
;
1767 * Walk thru the extent list and count the realtime blocks.
1769 if ((error
= xfs_qm_get_rtblks(ip
, &rtblks
))) {
1770 xfs_iput(ip
, XFS_ILOCK_EXCL
);
1775 *res
= BULKSTAT_RV_GIVEUP
;
1778 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1780 ASSERT(ip
->i_delayed_blks
== 0);
1783 * We can't release the inode while holding its dquot locks.
1784 * The inode can go into inactive and might try to acquire the dquotlocks.
1785 * So, just unlock here and do a vn_rele at the end.
1787 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1790 * Add the (disk blocks and inode) resources occupied by this
1791 * inode to its dquots. We do this adjustment in the incore dquot,
1792 * and also copy the changes to its buffer.
1793 * We don't care about putting these changes in a transaction
1794 * envelope because if we crash in the middle of a 'quotacheck'
1795 * we have to start from the beginning anyway.
1796 * Once we're done, we'll log all the dquot bufs.
1798 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1799 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1801 if (XFS_IS_UQUOTA_ON(mp
)) {
1803 xfs_qm_quotacheck_dqadjust(udqp
, nblks
, rtblks
);
1806 if (XFS_IS_OQUOTA_ON(mp
)) {
1808 xfs_qm_quotacheck_dqadjust(gdqp
, nblks
, rtblks
);
1812 * Now release the inode. This will send it to 'inactive', and
1813 * possibly even free blocks.
1815 VN_RELE(XFS_ITOV(ip
));
1820 *res
= BULKSTAT_RV_DIDONE
;
1825 * Walk thru all the filesystem inodes and construct a consistent view
1826 * of the disk quota world. If the quotacheck fails, disable quotas.
1832 int done
, count
, error
;
1835 xfs_inode_t
*uip
, *gip
;
1843 ASSERT(XFS_QI_UQIP(mp
) || XFS_QI_GQIP(mp
));
1844 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1847 * There should be no cached dquots. The (simplistic) quotacheck
1848 * algorithm doesn't like that.
1850 ASSERT(XFS_QI_MPLNDQUOTS(mp
) == 0);
1852 cmn_err(CE_NOTE
, "XFS quotacheck %s: Please wait.", mp
->m_fsname
);
1855 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1856 * their counters to zero. We need a clean slate.
1857 * We don't log our changes till later.
1859 if ((uip
= XFS_QI_UQIP(mp
))) {
1860 if ((error
= xfs_qm_dqiterate(mp
, uip
, XFS_QMOPT_UQUOTA
)))
1862 flags
|= XFS_UQUOTA_CHKD
;
1865 if ((gip
= XFS_QI_GQIP(mp
))) {
1866 if ((error
= xfs_qm_dqiterate(mp
, gip
, XFS_IS_GQUOTA_ON(mp
) ?
1867 XFS_QMOPT_GQUOTA
: XFS_QMOPT_PQUOTA
)))
1869 flags
|= XFS_OQUOTA_CHKD
;
1874 * Iterate thru all the inodes in the file system,
1875 * adjusting the corresponding dquot counters in core.
1877 if ((error
= xfs_bulkstat(mp
, &lastino
, &count
,
1878 xfs_qm_dqusage_adjust
, NULL
,
1879 structsz
, NULL
, BULKSTAT_FG_IGET
, &done
)))
1885 * We can get this error if we couldn't do a dquot allocation inside
1886 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1887 * dirty dquots that might be cached, we just want to get rid of them
1888 * and turn quotaoff. The dquots won't be attached to any of the inodes
1889 * at this point (because we intentionally didn't in dqget_noattach).
1892 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
| XFS_QMOPT_QUOTAOFF
);
1896 * We've made all the changes that we need to make incore.
1897 * Now flush_them down to disk buffers.
1899 xfs_qm_dqflush_all(mp
, XFS_QMOPT_DELWRI
);
1902 * We didn't log anything, because if we crashed, we'll have to
1903 * start the quotacheck from scratch anyway. However, we must make
1904 * sure that our dquot changes are secure before we put the
1905 * quotacheck'd stamp on the superblock. So, here we do a synchronous
1908 XFS_bflush(mp
->m_ddev_targp
);
1911 * If one type of quotas is off, then it will lose its
1912 * quotachecked status, since we won't be doing accounting for
1913 * that type anymore.
1915 mp
->m_qflags
&= ~(XFS_OQUOTA_CHKD
| XFS_UQUOTA_CHKD
);
1916 mp
->m_qflags
|= flags
;
1918 XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp
)), MPL_NEXT
, "++++ Mp list +++");
1922 cmn_err(CE_WARN
, "XFS quotacheck %s: Unsuccessful (Error %d): "
1923 "Disabling quotas.",
1924 mp
->m_fsname
, error
);
1926 * We must turn off quotas.
1928 ASSERT(mp
->m_quotainfo
!= NULL
);
1929 ASSERT(xfs_Gqm
!= NULL
);
1930 xfs_qm_destroy_quotainfo(mp
);
1931 (void)xfs_mount_reset_sbqflags(mp
);
1933 cmn_err(CE_NOTE
, "XFS quotacheck %s: Done.", mp
->m_fsname
);
1939 * This is called after the superblock has been read in and we're ready to
1940 * iget the quota inodes.
1943 xfs_qm_init_quotainos(
1946 xfs_inode_t
*uip
, *gip
;
1951 ASSERT(mp
->m_quotainfo
);
1957 * Get the uquota and gquota inodes
1959 if (XFS_SB_VERSION_HASQUOTA(&mp
->m_sb
)) {
1960 if (XFS_IS_UQUOTA_ON(mp
) &&
1961 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1962 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1963 if ((error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1965 return XFS_ERROR(error
);
1967 if (XFS_IS_OQUOTA_ON(mp
) &&
1968 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1969 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1970 if ((error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1973 VN_RELE(XFS_ITOV(uip
));
1974 return XFS_ERROR(error
);
1978 flags
|= XFS_QMOPT_SBVERSION
;
1979 sbflags
|= (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1980 XFS_SB_GQUOTINO
| XFS_SB_QFLAGS
);
1984 * Create the two inodes, if they don't exist already. The changes
1985 * made above will get added to a transaction and logged in one of
1986 * the qino_alloc calls below. If the device is readonly,
1987 * temporarily switch to read-write to do this.
1989 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1990 if ((error
= xfs_qm_qino_alloc(mp
, &uip
,
1991 sbflags
| XFS_SB_UQUOTINO
,
1992 flags
| XFS_QMOPT_UQUOTA
)))
1993 return XFS_ERROR(error
);
1995 flags
&= ~XFS_QMOPT_SBVERSION
;
1997 if (XFS_IS_OQUOTA_ON(mp
) && gip
== NULL
) {
1998 flags
|= (XFS_IS_GQUOTA_ON(mp
) ?
1999 XFS_QMOPT_GQUOTA
: XFS_QMOPT_PQUOTA
);
2000 error
= xfs_qm_qino_alloc(mp
, &gip
,
2001 sbflags
| XFS_SB_GQUOTINO
, flags
);
2004 VN_RELE(XFS_ITOV(uip
));
2006 return XFS_ERROR(error
);
2010 XFS_QI_UQIP(mp
) = uip
;
2011 XFS_QI_GQIP(mp
) = gip
;
2018 * Traverse the freelist of dquots and attempt to reclaim a maximum of
2019 * 'howmany' dquots. This operation races with dqlookup(), and attempts to
2020 * favor the lookup function ...
2021 * XXXsup merge this with qm_reclaim_one().
2024 xfs_qm_shake_freelist(
2029 xfs_dquot_t
*dqp
, *nextdqp
;
2041 cmn_err(CE_DEBUG
, "Shake free 0x%x", howmany
);
2043 /* lock order is : hashchainlock, freelistlock, mplistlock */
2045 xfs_qm_freelist_lock(xfs_Gqm
);
2047 for (dqp
= xfs_Gqm
->qm_dqfreelist
.qh_next
;
2048 ((dqp
!= (xfs_dquot_t
*) &xfs_Gqm
->qm_dqfreelist
) &&
2049 nreclaimed
< howmany
); ) {
2053 * We are racing with dqlookup here. Naturally we don't
2054 * want to reclaim a dquot that lookup wants.
2056 if (dqp
->dq_flags
& XFS_DQ_WANT
) {
2058 xfs_qm_freelist_unlock(xfs_Gqm
);
2059 if (++restarts
>= XFS_QM_RECLAIM_MAX_RESTARTS
)
2061 XQM_STATS_INC(xqmstats
.xs_qm_dqwants
);
2066 * If the dquot is inactive, we are assured that it is
2067 * not on the mplist or the hashlist, and that makes our
2070 if (dqp
->dq_flags
& XFS_DQ_INACTIVE
) {
2071 ASSERT(dqp
->q_mount
== NULL
);
2072 ASSERT(! XFS_DQ_IS_DIRTY(dqp
));
2073 ASSERT(dqp
->HL_PREVP
== NULL
);
2074 ASSERT(dqp
->MPL_PREVP
== NULL
);
2075 XQM_STATS_INC(xqmstats
.xs_qm_dqinact_reclaims
);
2076 nextdqp
= dqp
->dq_flnext
;
2080 ASSERT(dqp
->MPL_PREVP
);
2082 * Try to grab the flush lock. If this dquot is in the process of
2083 * getting flushed to disk, we don't want to reclaim it.
2085 if (! xfs_qm_dqflock_nowait(dqp
)) {
2087 dqp
= dqp
->dq_flnext
;
2092 * We have the flush lock so we know that this is not in the
2093 * process of being flushed. So, if this is dirty, flush it
2094 * DELWRI so that we don't get a freelist infested with
2097 if (XFS_DQ_IS_DIRTY(dqp
)) {
2098 xfs_dqtrace_entry(dqp
, "DQSHAKE: DQDIRTY");
2100 * We flush it delayed write, so don't bother
2101 * releasing the mplock.
2103 (void) xfs_qm_dqflush(dqp
, XFS_QMOPT_DELWRI
);
2104 xfs_dqunlock(dqp
); /* dqflush unlocks dqflock */
2105 dqp
= dqp
->dq_flnext
;
2109 * We're trying to get the hashlock out of order. This races
2110 * with dqlookup; so, we giveup and goto the next dquot if
2111 * we couldn't get the hashlock. This way, we won't starve
2112 * a dqlookup process that holds the hashlock that is
2113 * waiting for the freelist lock.
2115 if (! xfs_qm_dqhashlock_nowait(dqp
)) {
2118 dqp
= dqp
->dq_flnext
;
2122 * This races with dquot allocation code as well as dqflush_all
2123 * and reclaim code. So, if we failed to grab the mplist lock,
2124 * giveup everything and start over.
2128 if (! xfs_qm_mplist_nowait(dqp
->q_mount
)) {
2129 /* XXX put a sentinel so that we can come back here */
2132 XFS_DQ_HASH_UNLOCK(hash
);
2133 xfs_qm_freelist_unlock(xfs_Gqm
);
2134 if (++restarts
>= XFS_QM_RECLAIM_MAX_RESTARTS
)
2138 xfs_dqtrace_entry(dqp
, "DQSHAKE: UNLINKING");
2140 cmn_err(CE_DEBUG
, "Shake 0x%p, ID 0x%x\n",
2141 dqp
, be32_to_cpu(dqp
->q_core
.d_id
));
2143 ASSERT(dqp
->q_nrefs
== 0);
2144 nextdqp
= dqp
->dq_flnext
;
2145 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp
->q_mount
)), dqp
);
2146 XQM_HASHLIST_REMOVE(hash
, dqp
);
2148 xfs_qm_mplist_unlock(dqp
->q_mount
);
2149 XFS_DQ_HASH_UNLOCK(hash
);
2152 XQM_FREELIST_REMOVE(dqp
);
2155 XQM_STATS_INC(xqmstats
.xs_qm_dqshake_reclaims
);
2156 xfs_qm_dqdestroy(dqp
);
2159 xfs_qm_freelist_unlock(xfs_Gqm
);
2165 * The kmem_shake interface is invoked when memory is running low.
2169 xfs_qm_shake(int nr_to_scan
, gfp_t gfp_mask
)
2171 int ndqused
, nfree
, n
;
2173 if (!kmem_shake_allow(gfp_mask
))
2178 nfree
= xfs_Gqm
->qm_dqfreelist
.qh_nelems
; /* free dquots */
2179 /* incore dquots in all f/s's */
2180 ndqused
= atomic_read(&xfs_Gqm
->qm_totaldquots
) - nfree
;
2182 ASSERT(ndqused
>= 0);
2184 if (nfree
<= ndqused
&& nfree
< ndquot
)
2187 ndqused
*= xfs_Gqm
->qm_dqfree_ratio
; /* target # of free dquots */
2188 n
= nfree
- ndqused
- ndquot
; /* # over target */
2190 return xfs_qm_shake_freelist(MAX(nfree
, n
));
2195 * Just pop the least recently used dquot off the freelist and
2196 * recycle it. The returned dquot is locked.
2198 STATIC xfs_dquot_t
*
2199 xfs_qm_dqreclaim_one(void)
2201 xfs_dquot_t
*dqpout
;
2210 /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
2212 xfs_qm_freelist_lock(xfs_Gqm
);
2214 FOREACH_DQUOT_IN_FREELIST(dqp
, &(xfs_Gqm
->qm_dqfreelist
)) {
2218 * We are racing with dqlookup here. Naturally we don't
2219 * want to reclaim a dquot that lookup wants. We release the
2220 * freelist lock and start over, so that lookup will grab
2221 * both the dquot and the freelistlock.
2223 if (dqp
->dq_flags
& XFS_DQ_WANT
) {
2224 ASSERT(! (dqp
->dq_flags
& XFS_DQ_INACTIVE
));
2225 xfs_dqtrace_entry(dqp
, "DQRECLAIM: DQWANT");
2227 xfs_qm_freelist_unlock(xfs_Gqm
);
2228 if (++restarts
>= XFS_QM_RECLAIM_MAX_RESTARTS
)
2230 XQM_STATS_INC(xqmstats
.xs_qm_dqwants
);
2235 * If the dquot is inactive, we are assured that it is
2236 * not on the mplist or the hashlist, and that makes our
2239 if (dqp
->dq_flags
& XFS_DQ_INACTIVE
) {
2240 ASSERT(dqp
->q_mount
== NULL
);
2241 ASSERT(! XFS_DQ_IS_DIRTY(dqp
));
2242 ASSERT(dqp
->HL_PREVP
== NULL
);
2243 ASSERT(dqp
->MPL_PREVP
== NULL
);
2244 XQM_FREELIST_REMOVE(dqp
);
2247 XQM_STATS_INC(xqmstats
.xs_qm_dqinact_reclaims
);
2251 ASSERT(dqp
->q_hash
);
2252 ASSERT(dqp
->MPL_PREVP
);
2255 * Try to grab the flush lock. If this dquot is in the process of
2256 * getting flushed to disk, we don't want to reclaim it.
2258 if (! xfs_qm_dqflock_nowait(dqp
)) {
2264 * We have the flush lock so we know that this is not in the
2265 * process of being flushed. So, if this is dirty, flush it
2266 * DELWRI so that we don't get a freelist infested with
2269 if (XFS_DQ_IS_DIRTY(dqp
)) {
2270 xfs_dqtrace_entry(dqp
, "DQRECLAIM: DQDIRTY");
2272 * We flush it delayed write, so don't bother
2273 * releasing the freelist lock.
2275 (void) xfs_qm_dqflush(dqp
, XFS_QMOPT_DELWRI
);
2276 xfs_dqunlock(dqp
); /* dqflush unlocks dqflock */
2280 if (! xfs_qm_mplist_nowait(dqp
->q_mount
)) {
2286 if (! xfs_qm_dqhashlock_nowait(dqp
))
2289 ASSERT(dqp
->q_nrefs
== 0);
2290 xfs_dqtrace_entry(dqp
, "DQRECLAIM: UNLINKING");
2291 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp
->q_mount
)), dqp
);
2292 XQM_HASHLIST_REMOVE(dqp
->q_hash
, dqp
);
2293 XQM_FREELIST_REMOVE(dqp
);
2295 XFS_DQ_HASH_UNLOCK(dqp
->q_hash
);
2297 xfs_qm_mplist_unlock(dqp
->q_mount
);
2304 xfs_qm_freelist_unlock(xfs_Gqm
);
2309 /*------------------------------------------------------------------*/
2312 * Return a new incore dquot. Depending on the number of
2313 * dquots in the system, we either allocate a new one on the kernel heap,
2314 * or reclaim a free one.
2315 * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
2316 * to reclaim an existing one from the freelist.
2319 xfs_qm_dqalloc_incore(
2320 xfs_dquot_t
**O_dqpp
)
2325 * Check against high water mark to see if we want to pop
2326 * a nincompoop dquot off the freelist.
2328 if (atomic_read(&xfs_Gqm
->qm_totaldquots
) >= ndquot
) {
2330 * Try to recycle a dquot from the freelist.
2332 if ((dqp
= xfs_qm_dqreclaim_one())) {
2333 XQM_STATS_INC(xqmstats
.xs_qm_dqreclaims
);
2335 * Just zero the core here. The rest will get
2336 * reinitialized by caller. XXX we shouldn't even
2339 memset(&dqp
->q_core
, 0, sizeof(dqp
->q_core
));
2343 XQM_STATS_INC(xqmstats
.xs_qm_dqreclaim_misses
);
2347 * Allocate a brand new dquot on the kernel heap and return it
2348 * to the caller to initialize.
2350 ASSERT(xfs_Gqm
->qm_dqzone
!= NULL
);
2351 *O_dqpp
= kmem_zone_zalloc(xfs_Gqm
->qm_dqzone
, KM_SLEEP
);
2352 atomic_inc(&xfs_Gqm
->qm_totaldquots
);
2359 * Start a transaction and write the incore superblock changes to
2360 * disk. flags parameter indicates which fields have changed.
2363 xfs_qm_write_sb_changes(
2371 cmn_err(CE_NOTE
, "Writing superblock quota changes :%s", mp
->m_fsname
);
2373 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
2374 if ((error
= xfs_trans_reserve(tp
, 0,
2375 mp
->m_sb
.sb_sectsize
+ 128, 0,
2377 XFS_DEFAULT_LOG_COUNT
))) {
2378 xfs_trans_cancel(tp
, 0);
2382 xfs_mod_sb(tp
, flags
);
2383 (void) xfs_trans_commit(tp
, 0);
2389 /* --------------- utility functions for vnodeops ---------------- */
2393 * Given an inode, a uid and gid (from cred_t) make sure that we have
2394 * allocated relevant dquot(s) on disk, and that we won't exceed inode
2395 * quotas by creating this file.
2396 * This also attaches dquot(s) to the given inode after locking it,
2397 * and returns the dquots corresponding to the uid and/or gid.
2399 * in : inode (unlocked)
2400 * out : udquot, gdquot with references taken and unlocked
2410 xfs_dquot_t
**O_udqpp
,
2411 xfs_dquot_t
**O_gdqpp
)
2414 xfs_dquot_t
*uq
, *gq
;
2417 if (!XFS_IS_QUOTA_ON(mp
))
2420 lockflags
= XFS_ILOCK_EXCL
;
2421 xfs_ilock(ip
, lockflags
);
2423 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
2424 gid
= ip
->i_d
.di_gid
;
2427 * Attach the dquot(s) to this inode, doing a dquot allocation
2428 * if necessary. The dquot(s) will not be locked.
2430 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
2431 if ((error
= xfs_qm_dqattach(ip
, XFS_QMOPT_DQALLOC
|
2432 XFS_QMOPT_ILOCKED
))) {
2433 xfs_iunlock(ip
, lockflags
);
2439 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
2440 if (ip
->i_d
.di_uid
!= uid
) {
2442 * What we need is the dquot that has this uid, and
2443 * if we send the inode to dqget, the uid of the inode
2444 * takes priority over what's sent in the uid argument.
2445 * We must unlock inode here before calling dqget if
2446 * we're not sending the inode, because otherwise
2447 * we'll deadlock by doing trans_reserve while
2450 xfs_iunlock(ip
, lockflags
);
2451 if ((error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
) uid
,
2456 ASSERT(error
!= ENOENT
);
2460 * Get the ilock in the right order.
2463 lockflags
= XFS_ILOCK_SHARED
;
2464 xfs_ilock(ip
, lockflags
);
2467 * Take an extra reference, because we'll return
2470 ASSERT(ip
->i_udquot
);
2477 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
2478 if (ip
->i_d
.di_gid
!= gid
) {
2479 xfs_iunlock(ip
, lockflags
);
2480 if ((error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)gid
,
2487 ASSERT(error
!= ENOENT
);
2491 lockflags
= XFS_ILOCK_SHARED
;
2492 xfs_ilock(ip
, lockflags
);
2494 ASSERT(ip
->i_gdquot
);
2500 } else if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
2501 if (ip
->i_d
.di_projid
!= prid
) {
2502 xfs_iunlock(ip
, lockflags
);
2503 if ((error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)prid
,
2510 ASSERT(error
!= ENOENT
);
2514 lockflags
= XFS_ILOCK_SHARED
;
2515 xfs_ilock(ip
, lockflags
);
2517 ASSERT(ip
->i_gdquot
);
2525 xfs_dqtrace_entry_ino(uq
, "DQALLOC", ip
);
2527 xfs_iunlock(ip
, lockflags
);
2540 * Actually transfer ownership, and do dquot modifications.
2541 * These were already reserved.
2547 xfs_dquot_t
**IO_olddq
,
2550 xfs_dquot_t
*prevdq
;
2551 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
2552 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
2554 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
2555 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
2560 ASSERT(prevdq
!= newdq
);
2562 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
2563 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
2565 /* the sparkling new dquot */
2566 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
2567 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
2570 * Take an extra reference, because the inode
2571 * is going to keep this dquot pointer even
2572 * after the trans_commit.
2576 xfs_dqunlock(newdq
);
2583 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
2586 xfs_qm_vop_chown_reserve(
2595 uint delblks
, blkflags
, prjflags
= 0;
2596 xfs_dquot_t
*unresudq
, *unresgdq
, *delblksudq
, *delblksgdq
;
2598 ASSERT(XFS_ISLOCKED_INODE(ip
));
2600 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
2602 delblks
= ip
->i_delayed_blks
;
2603 delblksudq
= delblksgdq
= unresudq
= unresgdq
= NULL
;
2604 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
2605 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
2607 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
2608 ip
->i_d
.di_uid
!= (uid_t
)be32_to_cpu(udqp
->q_core
.d_id
)) {
2611 * If there are delayed allocation blocks, then we have to
2612 * unreserve those from the old dquot, and add them to the
2616 ASSERT(ip
->i_udquot
);
2617 unresudq
= ip
->i_udquot
;
2620 if (XFS_IS_OQUOTA_ON(ip
->i_mount
) && gdqp
) {
2621 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) &&
2622 ip
->i_d
.di_projid
!= be32_to_cpu(gdqp
->q_core
.d_id
))
2623 prjflags
= XFS_QMOPT_ENOSPC
;
2626 (XFS_IS_GQUOTA_ON(ip
->i_mount
) &&
2627 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
))) {
2630 ASSERT(ip
->i_gdquot
);
2631 unresgdq
= ip
->i_gdquot
;
2636 if ((error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
2637 delblksudq
, delblksgdq
, ip
->i_d
.di_nblocks
, 1,
2638 flags
| blkflags
| prjflags
)))
2642 * Do the delayed blks reservations/unreservations now. Since, these
2643 * are done without the help of a transaction, if a reservation fails
2644 * its previous reservations won't be automatically undone by trans
2645 * code. So, we have to do it manually here.
2649 * Do the reservations first. Unreservation can't fail.
2651 ASSERT(delblksudq
|| delblksgdq
);
2652 ASSERT(unresudq
|| unresgdq
);
2653 if ((error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2654 delblksudq
, delblksgdq
, (xfs_qcnt_t
)delblks
, 0,
2655 flags
| blkflags
| prjflags
)))
2657 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2658 unresudq
, unresgdq
, -((xfs_qcnt_t
)delblks
), 0,
2666 xfs_qm_vop_rename_dqattach(
2667 xfs_inode_t
**i_tab
)
2675 if (! XFS_IS_QUOTA_ON(ip
->i_mount
))
2678 if (XFS_NOT_DQATTACHED(ip
->i_mount
, ip
)) {
2679 error
= xfs_qm_dqattach(ip
, 0);
2683 for (i
= 1; (i
< 4 && i_tab
[i
]); i
++) {
2685 * Watch out for duplicate entries in the table.
2687 if ((ip
= i_tab
[i
]) != i_tab
[i
-1]) {
2688 if (XFS_NOT_DQATTACHED(ip
->i_mount
, ip
)) {
2689 error
= xfs_qm_dqattach(ip
, 0);
2699 xfs_qm_vop_dqattach_and_dqmod_newinode(
2705 if (!XFS_IS_QUOTA_ON(tp
->t_mountp
))
2708 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip
));
2709 ASSERT(XFS_IS_QUOTA_RUNNING(tp
->t_mountp
));
2715 ASSERT(ip
->i_udquot
== NULL
);
2716 ip
->i_udquot
= udqp
;
2717 ASSERT(XFS_IS_UQUOTA_ON(tp
->t_mountp
));
2718 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
2719 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2725 ASSERT(ip
->i_gdquot
== NULL
);
2726 ip
->i_gdquot
= gdqp
;
2727 ASSERT(XFS_IS_OQUOTA_ON(tp
->t_mountp
));
2728 ASSERT((XFS_IS_GQUOTA_ON(tp
->t_mountp
) ?
2729 ip
->i_d
.di_gid
: ip
->i_d
.di_projid
) ==
2730 be32_to_cpu(gdqp
->q_core
.d_id
));
2731 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2735 /* ------------- list stuff -----------------*/
2737 xfs_qm_freelist_init(xfs_frlist_t
*ql
)
2739 ql
->qh_next
= ql
->qh_prev
= (xfs_dquot_t
*) ql
;
2740 mutex_init(&ql
->qh_lock
);
2746 xfs_qm_freelist_destroy(xfs_frlist_t
*ql
)
2748 xfs_dquot_t
*dqp
, *nextdqp
;
2750 mutex_lock(&ql
->qh_lock
);
2751 for (dqp
= ql
->qh_next
;
2752 dqp
!= (xfs_dquot_t
*)ql
; ) {
2754 nextdqp
= dqp
->dq_flnext
;
2756 cmn_err(CE_DEBUG
, "FREELIST destroy 0x%p", dqp
);
2758 XQM_FREELIST_REMOVE(dqp
);
2760 xfs_qm_dqdestroy(dqp
);
2763 mutex_unlock(&ql
->qh_lock
);
2764 mutex_destroy(&ql
->qh_lock
);
2766 ASSERT(ql
->qh_nelems
== 0);
2770 xfs_qm_freelist_insert(xfs_frlist_t
*ql
, xfs_dquot_t
*dq
)
2772 dq
->dq_flnext
= ql
->qh_next
;
2773 dq
->dq_flprev
= (xfs_dquot_t
*)ql
;
2775 dq
->dq_flnext
->dq_flprev
= dq
;
2776 xfs_Gqm
->qm_dqfreelist
.qh_nelems
++;
2777 xfs_Gqm
->qm_dqfreelist
.qh_version
++;
2781 xfs_qm_freelist_unlink(xfs_dquot_t
*dq
)
2783 xfs_dquot_t
*next
= dq
->dq_flnext
;
2784 xfs_dquot_t
*prev
= dq
->dq_flprev
;
2786 next
->dq_flprev
= prev
;
2787 prev
->dq_flnext
= next
;
2788 dq
->dq_flnext
= dq
->dq_flprev
= dq
;
2789 xfs_Gqm
->qm_dqfreelist
.qh_nelems
--;
2790 xfs_Gqm
->qm_dqfreelist
.qh_version
++;
2794 xfs_qm_freelist_append(xfs_frlist_t
*ql
, xfs_dquot_t
*dq
)
2796 xfs_qm_freelist_insert((xfs_frlist_t
*)ql
->qh_prev
, dq
);
2800 xfs_qm_dqhashlock_nowait(
2805 locked
= mutex_trylock(&((dqp
)->q_hash
->qh_lock
));
2810 xfs_qm_freelist_lock_nowait(
2815 locked
= mutex_trylock(&(xqm
->qm_dqfreelist
.qh_lock
));
2820 xfs_qm_mplist_nowait(
2825 ASSERT(mp
->m_quotainfo
);
2826 locked
= mutex_trylock(&(XFS_QI_MPLLOCK(mp
)));