2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
23 #include "xfs_trans.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_ialloc.h"
34 #include "xfs_itable.h"
35 #include "xfs_rtalloc.h"
36 #include "xfs_error.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_trans_space.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_cksum.h"
47 * The global quota manager. There is only one of these for the entire
48 * system, _not_ one per file system. XQM keeps track of the overall
49 * quota functionality, including maintaining the freelist and hash
52 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
53 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
56 STATIC
void xfs_qm_dqfree_one(struct xfs_dquot
*dqp
);
58 * We use the batch lookup interface to iterate over the dquots as it
59 * currently is the only interface into the radix tree code that allows
60 * fuzzy lookups instead of exact matches. Holding the lock over multiple
61 * operations is fine as all callers are used either during mount/umount
64 #define XFS_DQ_LOOKUP_BATCH 32
70 int (*execute
)(struct xfs_dquot
*dqp
, void *data
),
73 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
74 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
86 struct xfs_dquot
*batch
[XFS_DQ_LOOKUP_BATCH
];
90 mutex_lock(&qi
->qi_tree_lock
);
91 nr_found
= radix_tree_gang_lookup(tree
, (void **)batch
,
92 next_index
, XFS_DQ_LOOKUP_BATCH
);
94 mutex_unlock(&qi
->qi_tree_lock
);
98 for (i
= 0; i
< nr_found
; i
++) {
99 struct xfs_dquot
*dqp
= batch
[i
];
101 next_index
= be32_to_cpu(dqp
->q_core
.d_id
) + 1;
103 error
= execute(batch
[i
], data
);
104 if (error
== EAGAIN
) {
108 if (error
&& last_error
!= EFSCORRUPTED
)
112 mutex_unlock(&qi
->qi_tree_lock
);
114 /* bail out if the filesystem is corrupted. */
115 if (last_error
== EFSCORRUPTED
) {
131 * Purge a dquot from all tracking data structures and free it.
135 struct xfs_dquot
*dqp
,
138 struct xfs_mount
*mp
= dqp
->q_mount
;
139 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
142 if ((dqp
->dq_flags
& XFS_DQ_FREEING
) || dqp
->q_nrefs
!= 0) {
147 dqp
->dq_flags
|= XFS_DQ_FREEING
;
152 * If we are turning this type of quotas off, we don't care
153 * about the dirty metadata sitting in this dquot. OTOH, if
154 * we're unmounting, we do care, so we flush it and wait.
156 if (XFS_DQ_IS_DIRTY(dqp
)) {
157 struct xfs_buf
*bp
= NULL
;
161 * We don't care about getting disk errors here. We need
162 * to purge this dquot anyway, so we go ahead regardless.
164 error
= xfs_qm_dqflush(dqp
, &bp
);
166 xfs_warn(mp
, "%s: dquot %p flush failed",
169 error
= xfs_bwrite(bp
);
175 ASSERT(atomic_read(&dqp
->q_pincount
) == 0);
176 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
177 !(dqp
->q_logitem
.qli_item
.li_flags
& XFS_LI_IN_AIL
));
182 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
183 be32_to_cpu(dqp
->q_core
.d_id
));
187 * We move dquots to the freelist as soon as their reference count
188 * hits zero, so it really should be on the freelist here.
190 ASSERT(!list_empty(&dqp
->q_lru
));
191 list_lru_del(&qi
->qi_lru
, &dqp
->q_lru
);
192 XFS_STATS_DEC(xs_qm_dquot_unused
);
194 xfs_qm_dqdestroy(dqp
);
199 * Release the group or project dquot pointers the user dquots maybe carrying
200 * around as a hint, and proceed to purge the user dquot cache if requested.
203 xfs_qm_dqpurge_hints(
204 struct xfs_dquot
*dqp
,
207 struct xfs_dquot
*gdqp
= NULL
;
208 struct xfs_dquot
*pdqp
= NULL
;
209 uint flags
= *((uint
*)data
);
212 if (dqp
->dq_flags
& XFS_DQ_FREEING
) {
217 /* If this quota has a hint attached, prepare for releasing it now */
218 gdqp
= dqp
->q_gdquot
;
220 dqp
->q_gdquot
= NULL
;
222 pdqp
= dqp
->q_pdquot
;
224 dqp
->q_pdquot
= NULL
;
233 if (flags
& XFS_QMOPT_UQUOTA
)
234 return xfs_qm_dqpurge(dqp
, NULL
);
240 * Purge the dquot cache.
244 struct xfs_mount
*mp
,
248 * We have to release group/project dquot hint(s) from the user dquot
249 * at first if they are there, otherwise we would run into an infinite
250 * loop while walking through radix tree to purge other type of dquots
251 * since their refcount is not zero if the user dquot refers to them
254 * Call the special xfs_qm_dqpurge_hints() will end up go through the
255 * general xfs_qm_dqpurge() against user dquot cache if requested.
257 xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_dqpurge_hints
, &flags
);
259 if (flags
& XFS_QMOPT_GQUOTA
)
260 xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_dqpurge
, NULL
);
261 if (flags
& XFS_QMOPT_PQUOTA
)
262 xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_dqpurge
, NULL
);
266 * Just destroy the quotainfo structure.
270 struct xfs_mount
*mp
)
272 if (mp
->m_quotainfo
) {
273 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
274 xfs_qm_destroy_quotainfo(mp
);
280 * This is called from xfs_mountfs to start quotas and initialize all
281 * necessary data structures like quotainfo. This is also responsible for
282 * running a quotacheck as necessary. We are guaranteed that the superblock
283 * is consistently read in at this point.
285 * If we fail here, the mount will continue with quota turned off. We don't
286 * need to inidicate success or failure at all.
296 * If quotas on realtime volumes is not supported, we disable
297 * quotas immediately.
299 if (mp
->m_sb
.sb_rextents
) {
300 xfs_notice(mp
, "Cannot turn on quotas for realtime filesystem");
305 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
308 * Allocate the quotainfo structure inside the mount struct, and
309 * create quotainode(s), and change/rev superblock if necessary.
311 error
= xfs_qm_init_quotainfo(mp
);
314 * We must turn off quotas.
316 ASSERT(mp
->m_quotainfo
== NULL
);
321 * If any of the quotas are not consistent, do a quotacheck.
323 if (XFS_QM_NEED_QUOTACHECK(mp
)) {
324 error
= xfs_qm_quotacheck(mp
);
326 /* Quotacheck failed and disabled quotas. */
331 * If one type of quotas is off, then it will lose its
332 * quotachecked status, since we won't be doing accounting for
335 if (!XFS_IS_UQUOTA_ON(mp
))
336 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
337 if (!XFS_IS_GQUOTA_ON(mp
))
338 mp
->m_qflags
&= ~XFS_GQUOTA_CHKD
;
339 if (!XFS_IS_PQUOTA_ON(mp
))
340 mp
->m_qflags
&= ~XFS_PQUOTA_CHKD
;
344 * We actually don't have to acquire the m_sb_lock at all.
345 * This can only be called from mount, and that's single threaded. XXX
347 spin_lock(&mp
->m_sb_lock
);
348 sbf
= mp
->m_sb
.sb_qflags
;
349 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
350 spin_unlock(&mp
->m_sb_lock
);
352 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
353 if (xfs_qm_write_sb_changes(mp
, XFS_SB_QFLAGS
)) {
355 * We could only have been turning quotas off.
356 * We aren't in very good shape actually because
357 * the incore structures are convinced that quotas are
358 * off, but the on disk superblock doesn't know that !
360 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
361 xfs_alert(mp
, "%s: Superblock update failed!",
367 xfs_warn(mp
, "Failed to initialize disk quotas.");
373 * Called from the vfsops layer.
376 xfs_qm_unmount_quotas(
380 * Release the dquots that root inode, et al might be holding,
381 * before we flush quotas and blow away the quotainfo structure.
383 ASSERT(mp
->m_rootip
);
384 xfs_qm_dqdetach(mp
->m_rootip
);
386 xfs_qm_dqdetach(mp
->m_rbmip
);
388 xfs_qm_dqdetach(mp
->m_rsumip
);
391 * Release the quota inodes.
393 if (mp
->m_quotainfo
) {
394 if (mp
->m_quotainfo
->qi_uquotaip
) {
395 IRELE(mp
->m_quotainfo
->qi_uquotaip
);
396 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
398 if (mp
->m_quotainfo
->qi_gquotaip
) {
399 IRELE(mp
->m_quotainfo
->qi_gquotaip
);
400 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
402 if (mp
->m_quotainfo
->qi_pquotaip
) {
403 IRELE(mp
->m_quotainfo
->qi_pquotaip
);
404 mp
->m_quotainfo
->qi_pquotaip
= NULL
;
415 xfs_dquot_t
*udqhint
, /* hint */
416 xfs_dquot_t
**IO_idqpp
)
421 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
425 * See if we already have it in the inode itself. IO_idqpp is
426 * &i_udquot or &i_gdquot. This made the code look weird, but
427 * made the logic a lot simpler.
431 trace_xfs_dqattach_found(dqp
);
436 * udqhint is the i_udquot field in inode, and is non-NULL only
437 * when the type arg is group/project. Its purpose is to save a
438 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
442 ASSERT(type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
446 * No need to take dqlock to look at the id.
448 * The ID can't change until it gets reclaimed, and it won't
449 * be reclaimed as long as we have a ref from inode and we
452 if (type
== XFS_DQ_GROUP
)
453 dqp
= udqhint
->q_gdquot
;
455 dqp
= udqhint
->q_pdquot
;
456 if (dqp
&& be32_to_cpu(dqp
->q_core
.d_id
) == id
) {
457 ASSERT(*IO_idqpp
== NULL
);
459 *IO_idqpp
= xfs_qm_dqhold(dqp
);
460 xfs_dqunlock(udqhint
);
465 * We can't hold a dquot lock when we call the dqget code.
466 * We'll deadlock in no time, because of (not conforming to)
467 * lock ordering - the inodelock comes before any dquot lock,
468 * and we may drop and reacquire the ilock in xfs_qm_dqget().
470 xfs_dqunlock(udqhint
);
474 * Find the dquot from somewhere. This bumps the
475 * reference count of dquot and returns it locked.
476 * This can return ENOENT if dquot didn't exist on
477 * disk and we didn't ask it to allocate;
478 * ESRCH if quotas got turned off suddenly.
480 error
= xfs_qm_dqget(ip
->i_mount
, ip
, id
, type
,
481 doalloc
| XFS_QMOPT_DOWARN
, &dqp
);
485 trace_xfs_dqattach_get(dqp
);
488 * dqget may have dropped and re-acquired the ilock, but it guarantees
489 * that the dquot returned is the one that should go in the inode.
498 * Given a udquot and group/project type, attach the group/project
499 * dquot pointer to the udquot as a hint for future lookups.
502 xfs_qm_dqattach_hint(
503 struct xfs_inode
*ip
,
506 struct xfs_dquot
**dqhintp
;
507 struct xfs_dquot
*dqp
;
508 struct xfs_dquot
*udq
= ip
->i_udquot
;
510 ASSERT(type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
514 if (type
== XFS_DQ_GROUP
) {
516 dqhintp
= &udq
->q_gdquot
;
519 dqhintp
= &udq
->q_pdquot
;
523 struct xfs_dquot
*tmp
;
533 *dqhintp
= xfs_qm_dqhold(dqp
);
539 xfs_qm_need_dqattach(
540 struct xfs_inode
*ip
)
542 struct xfs_mount
*mp
= ip
->i_mount
;
544 if (!XFS_IS_QUOTA_RUNNING(mp
))
546 if (!XFS_IS_QUOTA_ON(mp
))
548 if (!XFS_NOT_DQATTACHED(mp
, ip
))
550 if (xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
556 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
558 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
559 * Inode may get unlocked and relocked in here, and the caller must deal with
563 xfs_qm_dqattach_locked(
567 xfs_mount_t
*mp
= ip
->i_mount
;
571 if (!xfs_qm_need_dqattach(ip
))
574 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
576 if (XFS_IS_UQUOTA_ON(mp
)) {
577 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
578 flags
& XFS_QMOPT_DQALLOC
,
579 NULL
, &ip
->i_udquot
);
585 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
586 if (XFS_IS_GQUOTA_ON(mp
)) {
587 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
588 flags
& XFS_QMOPT_DQALLOC
,
589 ip
->i_udquot
, &ip
->i_gdquot
);
591 * Don't worry about the udquot that we may have
592 * attached above. It'll get detached, if not already.
599 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
600 if (XFS_IS_PQUOTA_ON(mp
)) {
601 error
= xfs_qm_dqattach_one(ip
, xfs_get_projid(ip
), XFS_DQ_PROJ
,
602 flags
& XFS_QMOPT_DQALLOC
,
603 ip
->i_udquot
, &ip
->i_pdquot
);
605 * Don't worry about the udquot that we may have
606 * attached above. It'll get detached, if not already.
614 * Attach this group/project quota to the user quota as a hint.
615 * This WON'T, in general, result in a thrash.
617 if (nquotas
> 1 && ip
->i_udquot
) {
618 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
619 ASSERT(ip
->i_gdquot
|| !XFS_IS_GQUOTA_ON(mp
));
620 ASSERT(ip
->i_pdquot
|| !XFS_IS_PQUOTA_ON(mp
));
623 * We do not have i_udquot locked at this point, but this check
624 * is OK since we don't depend on the i_gdquot to be accurate
625 * 100% all the time. It is just a hint, and this will
626 * succeed in general.
628 if (ip
->i_udquot
->q_gdquot
!= ip
->i_gdquot
)
629 xfs_qm_dqattach_hint(ip
, XFS_DQ_GROUP
);
631 if (ip
->i_udquot
->q_pdquot
!= ip
->i_pdquot
)
632 xfs_qm_dqattach_hint(ip
, XFS_DQ_PROJ
);
638 if (XFS_IS_UQUOTA_ON(mp
))
639 ASSERT(ip
->i_udquot
);
640 if (XFS_IS_GQUOTA_ON(mp
))
641 ASSERT(ip
->i_gdquot
);
642 if (XFS_IS_PQUOTA_ON(mp
))
643 ASSERT(ip
->i_pdquot
);
645 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
652 struct xfs_inode
*ip
,
657 if (!xfs_qm_need_dqattach(ip
))
660 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
661 error
= xfs_qm_dqattach_locked(ip
, flags
);
662 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
668 * Release dquots (and their references) if any.
669 * The inode should be locked EXCL except when this's called by
676 if (!(ip
->i_udquot
|| ip
->i_gdquot
|| ip
->i_pdquot
))
679 trace_xfs_dquot_dqdetach(ip
);
681 ASSERT(!xfs_is_quota_inode(&ip
->i_mount
->m_sb
, ip
->i_ino
));
683 xfs_qm_dqrele(ip
->i_udquot
);
687 xfs_qm_dqrele(ip
->i_gdquot
);
691 xfs_qm_dqrele(ip
->i_pdquot
);
697 xfs_qm_calc_dquots_per_chunk(
698 struct xfs_mount
*mp
,
699 unsigned int nbblks
) /* basic block units */
701 unsigned int ndquots
;
704 ndquots
= BBTOB(nbblks
);
705 do_div(ndquots
, sizeof(xfs_dqblk_t
));
710 struct xfs_qm_isolate
{
711 struct list_head buffers
;
712 struct list_head dispose
;
715 static enum lru_status
716 xfs_qm_dquot_isolate(
717 struct list_head
*item
,
718 spinlock_t
*lru_lock
,
721 struct xfs_dquot
*dqp
= container_of(item
,
722 struct xfs_dquot
, q_lru
);
723 struct xfs_qm_isolate
*isol
= arg
;
725 if (!xfs_dqlock_nowait(dqp
))
729 * This dquot has acquired a reference in the meantime remove it from
730 * the freelist and try again.
734 XFS_STATS_INC(xs_qm_dqwants
);
736 trace_xfs_dqreclaim_want(dqp
);
737 list_del_init(&dqp
->q_lru
);
738 XFS_STATS_DEC(xs_qm_dquot_unused
);
743 * If the dquot is dirty, flush it. If it's already being flushed, just
744 * skip it so there is time for the IO to complete before we try to
745 * reclaim it again on the next LRU pass.
747 if (!xfs_dqflock_nowait(dqp
)) {
752 if (XFS_DQ_IS_DIRTY(dqp
)) {
753 struct xfs_buf
*bp
= NULL
;
756 trace_xfs_dqreclaim_dirty(dqp
);
758 /* we have to drop the LRU lock to flush the dquot */
759 spin_unlock(lru_lock
);
761 error
= xfs_qm_dqflush(dqp
, &bp
);
763 xfs_warn(dqp
->q_mount
, "%s: dquot %p flush failed",
765 goto out_unlock_dirty
;
768 xfs_buf_delwri_queue(bp
, &isol
->buffers
);
770 goto out_unlock_dirty
;
775 * Prevent lookups now that we are past the point of no return.
777 dqp
->dq_flags
|= XFS_DQ_FREEING
;
780 ASSERT(dqp
->q_nrefs
== 0);
781 list_move_tail(&dqp
->q_lru
, &isol
->dispose
);
782 XFS_STATS_DEC(xs_qm_dquot_unused
);
783 trace_xfs_dqreclaim_done(dqp
);
784 XFS_STATS_INC(xs_qm_dqreclaims
);
788 trace_xfs_dqreclaim_busy(dqp
);
789 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
793 trace_xfs_dqreclaim_busy(dqp
);
794 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
802 struct shrinker
*shrink
,
803 struct shrink_control
*sc
)
805 struct xfs_quotainfo
*qi
= container_of(shrink
,
806 struct xfs_quotainfo
, qi_shrinker
);
807 struct xfs_qm_isolate isol
;
810 unsigned long nr_to_scan
= sc
->nr_to_scan
;
812 if ((sc
->gfp_mask
& (__GFP_FS
|__GFP_WAIT
)) != (__GFP_FS
|__GFP_WAIT
))
815 INIT_LIST_HEAD(&isol
.buffers
);
816 INIT_LIST_HEAD(&isol
.dispose
);
818 freed
= list_lru_walk_node(&qi
->qi_lru
, sc
->nid
, xfs_qm_dquot_isolate
, &isol
,
821 error
= xfs_buf_delwri_submit(&isol
.buffers
);
823 xfs_warn(NULL
, "%s: dquot reclaim failed", __func__
);
825 while (!list_empty(&isol
.dispose
)) {
826 struct xfs_dquot
*dqp
;
828 dqp
= list_first_entry(&isol
.dispose
, struct xfs_dquot
, q_lru
);
829 list_del_init(&dqp
->q_lru
);
830 xfs_qm_dqfree_one(dqp
);
838 struct shrinker
*shrink
,
839 struct shrink_control
*sc
)
841 struct xfs_quotainfo
*qi
= container_of(shrink
,
842 struct xfs_quotainfo
, qi_shrinker
);
844 return list_lru_count_node(&qi
->qi_lru
, sc
->nid
);
848 * This initializes all the quota information that's kept in the
852 xfs_qm_init_quotainfo(
855 xfs_quotainfo_t
*qinf
;
859 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
861 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
863 if ((error
= list_lru_init(&qinf
->qi_lru
))) {
865 mp
->m_quotainfo
= NULL
;
870 * See if quotainodes are setup, and if not, allocate them,
871 * and change the superblock accordingly.
873 if ((error
= xfs_qm_init_quotainos(mp
))) {
874 list_lru_destroy(&qinf
->qi_lru
);
876 mp
->m_quotainfo
= NULL
;
880 INIT_RADIX_TREE(&qinf
->qi_uquota_tree
, GFP_NOFS
);
881 INIT_RADIX_TREE(&qinf
->qi_gquota_tree
, GFP_NOFS
);
882 INIT_RADIX_TREE(&qinf
->qi_pquota_tree
, GFP_NOFS
);
883 mutex_init(&qinf
->qi_tree_lock
);
885 /* mutex used to serialize quotaoffs */
886 mutex_init(&qinf
->qi_quotaofflock
);
888 /* Precalc some constants */
889 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
890 qinf
->qi_dqperchunk
= xfs_qm_calc_dquots_per_chunk(mp
,
891 qinf
->qi_dqchunklen
);
893 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
896 * We try to get the limits from the superuser's limits fields.
897 * This is quite hacky, but it is standard quota practice.
899 * We look at the USR dquot with id == 0 first, but if user quotas
900 * are not enabled we goto the GRP dquot with id == 0.
901 * We don't really care to keep separate default limits for user
902 * and group quotas, at least not at this point.
904 * Since we may not have done a quotacheck by this point, just read
905 * the dquot without attaching it to any hashtables or lists.
907 error
= xfs_qm_dqread(mp
, 0,
908 XFS_IS_UQUOTA_RUNNING(mp
) ? XFS_DQ_USER
:
909 (XFS_IS_GQUOTA_RUNNING(mp
) ? XFS_DQ_GROUP
:
911 XFS_QMOPT_DOWARN
, &dqp
);
913 xfs_disk_dquot_t
*ddqp
= &dqp
->q_core
;
916 * The warnings and timers set the grace period given to
917 * a user or group before he or she can not perform any
918 * more writing. If it is zero, a default is used.
920 qinf
->qi_btimelimit
= ddqp
->d_btimer
?
921 be32_to_cpu(ddqp
->d_btimer
) : XFS_QM_BTIMELIMIT
;
922 qinf
->qi_itimelimit
= ddqp
->d_itimer
?
923 be32_to_cpu(ddqp
->d_itimer
) : XFS_QM_ITIMELIMIT
;
924 qinf
->qi_rtbtimelimit
= ddqp
->d_rtbtimer
?
925 be32_to_cpu(ddqp
->d_rtbtimer
) : XFS_QM_RTBTIMELIMIT
;
926 qinf
->qi_bwarnlimit
= ddqp
->d_bwarns
?
927 be16_to_cpu(ddqp
->d_bwarns
) : XFS_QM_BWARNLIMIT
;
928 qinf
->qi_iwarnlimit
= ddqp
->d_iwarns
?
929 be16_to_cpu(ddqp
->d_iwarns
) : XFS_QM_IWARNLIMIT
;
930 qinf
->qi_rtbwarnlimit
= ddqp
->d_rtbwarns
?
931 be16_to_cpu(ddqp
->d_rtbwarns
) : XFS_QM_RTBWARNLIMIT
;
932 qinf
->qi_bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
933 qinf
->qi_bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
934 qinf
->qi_ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
935 qinf
->qi_isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
936 qinf
->qi_rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
937 qinf
->qi_rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
939 xfs_qm_dqdestroy(dqp
);
941 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
942 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
943 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
944 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
945 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
946 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
949 qinf
->qi_shrinker
.count_objects
= xfs_qm_shrink_count
;
950 qinf
->qi_shrinker
.scan_objects
= xfs_qm_shrink_scan
;
951 qinf
->qi_shrinker
.seeks
= DEFAULT_SEEKS
;
952 qinf
->qi_shrinker
.flags
= SHRINKER_NUMA_AWARE
;
953 register_shrinker(&qinf
->qi_shrinker
);
959 * Gets called when unmounting a filesystem or when all quotas get
961 * This purges the quota inodes, destroys locks and frees itself.
964 xfs_qm_destroy_quotainfo(
969 qi
= mp
->m_quotainfo
;
972 unregister_shrinker(&qi
->qi_shrinker
);
973 list_lru_destroy(&qi
->qi_lru
);
975 if (qi
->qi_uquotaip
) {
976 IRELE(qi
->qi_uquotaip
);
977 qi
->qi_uquotaip
= NULL
; /* paranoia */
979 if (qi
->qi_gquotaip
) {
980 IRELE(qi
->qi_gquotaip
);
981 qi
->qi_gquotaip
= NULL
;
983 if (qi
->qi_pquotaip
) {
984 IRELE(qi
->qi_pquotaip
);
985 qi
->qi_pquotaip
= NULL
;
987 mutex_destroy(&qi
->qi_quotaofflock
);
989 mp
->m_quotainfo
= NULL
;
993 * Create an inode and return with a reference already taken, but unlocked
994 * This is how we create quota inodes
1009 * With superblock that doesn't have separate pquotino, we
1010 * share an inode between gquota and pquota. If the on-disk
1011 * superblock has GQUOTA and the filesystem is now mounted
1012 * with PQUOTA, just use sb_gquotino for sb_pquotino and
1015 if (!xfs_sb_version_has_pquotino(&mp
->m_sb
) &&
1016 (flags
& (XFS_QMOPT_PQUOTA
|XFS_QMOPT_GQUOTA
))) {
1017 xfs_ino_t ino
= NULLFSINO
;
1019 if ((flags
& XFS_QMOPT_PQUOTA
) &&
1020 (mp
->m_sb
.sb_gquotino
!= NULLFSINO
)) {
1021 ino
= mp
->m_sb
.sb_gquotino
;
1022 ASSERT(mp
->m_sb
.sb_pquotino
== NULLFSINO
);
1023 } else if ((flags
& XFS_QMOPT_GQUOTA
) &&
1024 (mp
->m_sb
.sb_pquotino
!= NULLFSINO
)) {
1025 ino
= mp
->m_sb
.sb_pquotino
;
1026 ASSERT(mp
->m_sb
.sb_gquotino
== NULLFSINO
);
1028 if (ino
!= NULLFSINO
) {
1029 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, ip
);
1032 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
1033 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
1037 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QINOCREATE
);
1038 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_create
,
1039 XFS_QM_QINOCREATE_SPACE_RES(mp
), 0);
1041 xfs_trans_cancel(tp
, 0);
1046 error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0, 0, 1, ip
,
1049 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|
1056 * Make the changes in the superblock, and log those too.
1057 * sbfields arg may contain fields other than *QUOTINO;
1058 * VERSIONNUM for example.
1060 spin_lock(&mp
->m_sb_lock
);
1061 if (flags
& XFS_QMOPT_SBVERSION
) {
1062 ASSERT(!xfs_sb_version_hasquota(&mp
->m_sb
));
1063 ASSERT((sbfields
& (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1064 XFS_SB_GQUOTINO
| XFS_SB_PQUOTINO
| XFS_SB_QFLAGS
)) ==
1065 (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1066 XFS_SB_GQUOTINO
| XFS_SB_PQUOTINO
|
1069 xfs_sb_version_addquota(&mp
->m_sb
);
1070 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
1071 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
1072 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
1074 /* qflags will get updated fully _after_ quotacheck */
1075 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
;
1077 if (flags
& XFS_QMOPT_UQUOTA
)
1078 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
1079 else if (flags
& XFS_QMOPT_GQUOTA
)
1080 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
1082 mp
->m_sb
.sb_pquotino
= (*ip
)->i_ino
;
1083 spin_unlock(&mp
->m_sb_lock
);
1084 xfs_mod_sb(tp
, sbfields
);
1086 if ((error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
))) {
1087 xfs_alert(mp
, "%s failed (error %d)!", __func__
, error
);
1095 xfs_qm_reset_dqcounts(
1101 struct xfs_dqblk
*dqb
;
1104 trace_xfs_reset_dqcounts(bp
, _RET_IP_
);
1107 * Reset all counters and timers. They'll be
1108 * started afresh by xfs_qm_quotacheck.
1111 j
= XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
1112 do_div(j
, sizeof(xfs_dqblk_t
));
1113 ASSERT(mp
->m_quotainfo
->qi_dqperchunk
== j
);
1116 for (j
= 0; j
< mp
->m_quotainfo
->qi_dqperchunk
; j
++) {
1117 struct xfs_disk_dquot
*ddq
;
1119 ddq
= (struct xfs_disk_dquot
*)&dqb
[j
];
1122 * Do a sanity check, and if needed, repair the dqblk. Don't
1123 * output any warnings because it's perfectly possible to
1124 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1126 (void) xfs_qm_dqcheck(mp
, ddq
, id
+j
, type
, XFS_QMOPT_DQREPAIR
,
1129 * Reset type in case we are reusing group quota file for
1130 * project quotas or vice versa
1132 ddq
->d_flags
= type
;
1135 ddq
->d_rtbcount
= 0;
1138 ddq
->d_rtbtimer
= 0;
1141 ddq
->d_rtbwarns
= 0;
1143 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
1144 xfs_update_cksum((char *)&dqb
[j
],
1145 sizeof(struct xfs_dqblk
),
1153 struct xfs_mount
*mp
,
1156 xfs_filblks_t blkcnt
,
1158 struct list_head
*buffer_list
)
1165 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
1166 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
1170 * Blkcnt arg can be a very big number, and might even be
1171 * larger than the log itself. So, we have to break it up into
1172 * manageable-sized transactions.
1173 * Note that we don't start a permanent transaction here; we might
1174 * not be able to get a log reservation for the whole thing up front,
1175 * and we don't really care to either, because we just discard
1176 * everything if we were to crash in the middle of this loop.
1179 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
1180 XFS_FSB_TO_DADDR(mp
, bno
),
1181 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
1182 &xfs_dquot_buf_ops
);
1185 * CRC and validation errors will return a EFSCORRUPTED here. If
1186 * this occurs, re-read without CRC validation so that we can
1187 * repair the damage via xfs_qm_reset_dqcounts(). This process
1188 * will leave a trace in the log indicating corruption has
1191 if (error
== EFSCORRUPTED
) {
1192 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
1193 XFS_FSB_TO_DADDR(mp
, bno
),
1194 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
1202 * A corrupt buffer might not have a verifier attached, so
1203 * make sure we have the correct one attached before writeback
1206 bp
->b_ops
= &xfs_dquot_buf_ops
;
1207 xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
1208 xfs_buf_delwri_queue(bp
, buffer_list
);
1211 /* goto the next block. */
1213 firstid
+= mp
->m_quotainfo
->qi_dqperchunk
;
1220 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1221 * caller supplied function for every chunk of dquots that we find.
1225 struct xfs_mount
*mp
,
1226 struct xfs_inode
*qip
,
1228 struct list_head
*buffer_list
)
1230 struct xfs_bmbt_irec
*map
;
1231 int i
, nmaps
; /* number of map entries */
1232 int error
; /* return value */
1233 xfs_fileoff_t lblkno
;
1234 xfs_filblks_t maxlblkcnt
;
1236 xfs_fsblock_t rablkno
;
1237 xfs_filblks_t rablkcnt
;
1241 * This looks racy, but we can't keep an inode lock across a
1242 * trans_reserve. But, this gets called during quotacheck, and that
1243 * happens only at mount time which is single threaded.
1245 if (qip
->i_d
.di_nblocks
== 0)
1248 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
1251 maxlblkcnt
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
1253 nmaps
= XFS_DQITER_MAP_SIZE
;
1255 * We aren't changing the inode itself. Just changing
1256 * some of its data. No new blocks are added here, and
1257 * the inode is never added to the transaction.
1259 xfs_ilock(qip
, XFS_ILOCK_SHARED
);
1260 error
= xfs_bmapi_read(qip
, lblkno
, maxlblkcnt
- lblkno
,
1262 xfs_iunlock(qip
, XFS_ILOCK_SHARED
);
1266 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
1267 for (i
= 0; i
< nmaps
; i
++) {
1268 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1269 ASSERT(map
[i
].br_blockcount
);
1272 lblkno
+= map
[i
].br_blockcount
;
1274 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1277 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1278 mp
->m_quotainfo
->qi_dqperchunk
;
1280 * Do a read-ahead on the next extent.
1282 if ((i
+1 < nmaps
) &&
1283 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1284 rablkcnt
= map
[i
+1].br_blockcount
;
1285 rablkno
= map
[i
+1].br_startblock
;
1286 while (rablkcnt
--) {
1287 xfs_buf_readahead(mp
->m_ddev_targp
,
1288 XFS_FSB_TO_DADDR(mp
, rablkno
),
1289 mp
->m_quotainfo
->qi_dqchunklen
,
1290 &xfs_dquot_buf_ops
);
1295 * Iterate thru all the blks in the extent and
1296 * reset the counters of all the dquots inside them.
1298 error
= xfs_qm_dqiter_bufs(mp
, firstid
,
1299 map
[i
].br_startblock
,
1300 map
[i
].br_blockcount
,
1301 flags
, buffer_list
);
1305 } while (nmaps
> 0);
1313 * Called by dqusage_adjust in doing a quotacheck.
1315 * Given the inode, and a dquot id this updates both the incore dqout as well
1316 * as the buffer copy. This is so that once the quotacheck is done, we can
1317 * just log all the buffers, as opposed to logging numerous updates to
1318 * individual dquots.
1321 xfs_qm_quotacheck_dqadjust(
1322 struct xfs_inode
*ip
,
1328 struct xfs_mount
*mp
= ip
->i_mount
;
1329 struct xfs_dquot
*dqp
;
1332 error
= xfs_qm_dqget(mp
, ip
, id
, type
,
1333 XFS_QMOPT_DQALLOC
| XFS_QMOPT_DOWARN
, &dqp
);
1336 * Shouldn't be able to turn off quotas here.
1338 ASSERT(error
!= ESRCH
);
1339 ASSERT(error
!= ENOENT
);
1343 trace_xfs_dqadjust(dqp
);
1346 * Adjust the inode count and the block count to reflect this inode's
1349 be64_add_cpu(&dqp
->q_core
.d_icount
, 1);
1350 dqp
->q_res_icount
++;
1352 be64_add_cpu(&dqp
->q_core
.d_bcount
, nblks
);
1353 dqp
->q_res_bcount
+= nblks
;
1356 be64_add_cpu(&dqp
->q_core
.d_rtbcount
, rtblks
);
1357 dqp
->q_res_rtbcount
+= rtblks
;
1361 * Set default limits, adjust timers (since we changed usages)
1363 * There are no timers for the default values set in the root dquot.
1365 if (dqp
->q_core
.d_id
) {
1366 xfs_qm_adjust_dqlimits(mp
, dqp
);
1367 xfs_qm_adjust_dqtimers(mp
, &dqp
->q_core
);
1370 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1378 xfs_qcnt_t
*O_rtblks
)
1380 xfs_filblks_t rtblks
; /* total rt blks */
1381 xfs_extnum_t idx
; /* extent record index */
1382 xfs_ifork_t
*ifp
; /* inode fork pointer */
1383 xfs_extnum_t nextents
; /* number of extent entries */
1386 ASSERT(XFS_IS_REALTIME_INODE(ip
));
1387 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1388 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1389 if ((error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
)))
1393 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
1394 for (idx
= 0; idx
< nextents
; idx
++)
1395 rtblks
+= xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp
, idx
));
1396 *O_rtblks
= (xfs_qcnt_t
)rtblks
;
1401 * callback routine supplied to bulkstat(). Given an inumber, find its
1402 * dquots and update them to account for resources taken by that inode.
1406 xfs_qm_dqusage_adjust(
1407 xfs_mount_t
*mp
, /* mount point for filesystem */
1408 xfs_ino_t ino
, /* inode number to get data for */
1409 void __user
*buffer
, /* not used */
1410 int ubsize
, /* not used */
1411 int *ubused
, /* not used */
1412 int *res
) /* result code value */
1415 xfs_qcnt_t nblks
, rtblks
= 0;
1418 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1421 * rootino must have its resources accounted for, not so with the quota
1424 if (xfs_is_quota_inode(&mp
->m_sb
, ino
)) {
1425 *res
= BULKSTAT_RV_NOTHING
;
1426 return XFS_ERROR(EINVAL
);
1430 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1431 * interface expects the inode to be exclusively locked because that's
1432 * the case in all other instances. It's OK that we do this because
1433 * quotacheck is done only at mount time.
1435 error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_EXCL
, &ip
);
1437 *res
= BULKSTAT_RV_NOTHING
;
1441 ASSERT(ip
->i_delayed_blks
== 0);
1443 if (XFS_IS_REALTIME_INODE(ip
)) {
1445 * Walk thru the extent list and count the realtime blocks.
1447 error
= xfs_qm_get_rtblks(ip
, &rtblks
);
1452 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1455 * Add the (disk blocks and inode) resources occupied by this
1456 * inode to its dquots. We do this adjustment in the incore dquot,
1457 * and also copy the changes to its buffer.
1458 * We don't care about putting these changes in a transaction
1459 * envelope because if we crash in the middle of a 'quotacheck'
1460 * we have to start from the beginning anyway.
1461 * Once we're done, we'll log all the dquot bufs.
1463 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1464 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1466 if (XFS_IS_UQUOTA_ON(mp
)) {
1467 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_uid
,
1468 XFS_DQ_USER
, nblks
, rtblks
);
1473 if (XFS_IS_GQUOTA_ON(mp
)) {
1474 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_gid
,
1475 XFS_DQ_GROUP
, nblks
, rtblks
);
1480 if (XFS_IS_PQUOTA_ON(mp
)) {
1481 error
= xfs_qm_quotacheck_dqadjust(ip
, xfs_get_projid(ip
),
1482 XFS_DQ_PROJ
, nblks
, rtblks
);
1487 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1489 *res
= BULKSTAT_RV_DIDONE
;
1493 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1495 *res
= BULKSTAT_RV_GIVEUP
;
1501 struct xfs_dquot
*dqp
,
1504 struct list_head
*buffer_list
= data
;
1505 struct xfs_buf
*bp
= NULL
;
1509 if (dqp
->dq_flags
& XFS_DQ_FREEING
)
1511 if (!XFS_DQ_IS_DIRTY(dqp
))
1515 error
= xfs_qm_dqflush(dqp
, &bp
);
1519 xfs_buf_delwri_queue(bp
, buffer_list
);
1527 * Walk thru all the filesystem inodes and construct a consistent view
1528 * of the disk quota world. If the quotacheck fails, disable quotas.
1534 int done
, count
, error
, error2
;
1538 LIST_HEAD (buffer_list
);
1539 struct xfs_inode
*uip
= mp
->m_quotainfo
->qi_uquotaip
;
1540 struct xfs_inode
*gip
= mp
->m_quotainfo
->qi_gquotaip
;
1541 struct xfs_inode
*pip
= mp
->m_quotainfo
->qi_pquotaip
;
1548 ASSERT(uip
|| gip
|| pip
);
1549 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1551 xfs_notice(mp
, "Quotacheck needed: Please wait.");
1554 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1555 * their counters to zero. We need a clean slate.
1556 * We don't log our changes till later.
1559 error
= xfs_qm_dqiterate(mp
, uip
, XFS_QMOPT_UQUOTA
,
1563 flags
|= XFS_UQUOTA_CHKD
;
1567 error
= xfs_qm_dqiterate(mp
, gip
, XFS_QMOPT_GQUOTA
,
1571 flags
|= XFS_GQUOTA_CHKD
;
1575 error
= xfs_qm_dqiterate(mp
, pip
, XFS_QMOPT_PQUOTA
,
1579 flags
|= XFS_PQUOTA_CHKD
;
1584 * Iterate thru all the inodes in the file system,
1585 * adjusting the corresponding dquot counters in core.
1587 error
= xfs_bulkstat(mp
, &lastino
, &count
,
1588 xfs_qm_dqusage_adjust
,
1589 structsz
, NULL
, &done
);
1596 * We've made all the changes that we need to make incore. Flush them
1597 * down to disk buffers if everything was updated successfully.
1599 if (XFS_IS_UQUOTA_ON(mp
)) {
1600 error
= xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_flush_one
,
1603 if (XFS_IS_GQUOTA_ON(mp
)) {
1604 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_flush_one
,
1609 if (XFS_IS_PQUOTA_ON(mp
)) {
1610 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_flush_one
,
1616 error2
= xfs_buf_delwri_submit(&buffer_list
);
1621 * We can get this error if we couldn't do a dquot allocation inside
1622 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1623 * dirty dquots that might be cached, we just want to get rid of them
1624 * and turn quotaoff. The dquots won't be attached to any of the inodes
1625 * at this point (because we intentionally didn't in dqget_noattach).
1628 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
1633 * If one type of quotas is off, then it will lose its
1634 * quotachecked status, since we won't be doing accounting for
1635 * that type anymore.
1637 mp
->m_qflags
&= ~XFS_ALL_QUOTA_CHKD
;
1638 mp
->m_qflags
|= flags
;
1641 while (!list_empty(&buffer_list
)) {
1642 struct xfs_buf
*bp
=
1643 list_first_entry(&buffer_list
, struct xfs_buf
, b_list
);
1644 list_del_init(&bp
->b_list
);
1650 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1653 * We must turn off quotas.
1655 ASSERT(mp
->m_quotainfo
!= NULL
);
1656 xfs_qm_destroy_quotainfo(mp
);
1657 if (xfs_mount_reset_sbqflags(mp
)) {
1659 "Quotacheck: Failed to reset quota flags.");
1662 xfs_notice(mp
, "Quotacheck: Done.");
1667 * This is called after the superblock has been read in and we're ready to
1668 * iget the quota inodes.
1671 xfs_qm_init_quotainos(
1674 struct xfs_inode
*uip
= NULL
;
1675 struct xfs_inode
*gip
= NULL
;
1676 struct xfs_inode
*pip
= NULL
;
1678 __int64_t sbflags
= 0;
1681 ASSERT(mp
->m_quotainfo
);
1684 * Get the uquota and gquota inodes
1686 if (xfs_sb_version_hasquota(&mp
->m_sb
)) {
1687 if (XFS_IS_UQUOTA_ON(mp
) &&
1688 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1689 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1690 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1693 return XFS_ERROR(error
);
1695 if (XFS_IS_GQUOTA_ON(mp
) &&
1696 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1697 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1698 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1703 if (XFS_IS_PQUOTA_ON(mp
) &&
1704 mp
->m_sb
.sb_pquotino
!= NULLFSINO
) {
1705 ASSERT(mp
->m_sb
.sb_pquotino
> 0);
1706 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_pquotino
,
1712 flags
|= XFS_QMOPT_SBVERSION
;
1713 sbflags
|= (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1714 XFS_SB_GQUOTINO
| XFS_SB_PQUOTINO
|
1719 * Create the three inodes, if they don't exist already. The changes
1720 * made above will get added to a transaction and logged in one of
1721 * the qino_alloc calls below. If the device is readonly,
1722 * temporarily switch to read-write to do this.
1724 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1725 error
= xfs_qm_qino_alloc(mp
, &uip
,
1726 sbflags
| XFS_SB_UQUOTINO
,
1727 flags
| XFS_QMOPT_UQUOTA
);
1731 flags
&= ~XFS_QMOPT_SBVERSION
;
1733 if (XFS_IS_GQUOTA_ON(mp
) && gip
== NULL
) {
1734 error
= xfs_qm_qino_alloc(mp
, &gip
,
1735 sbflags
| XFS_SB_GQUOTINO
,
1736 flags
| XFS_QMOPT_GQUOTA
);
1740 flags
&= ~XFS_QMOPT_SBVERSION
;
1742 if (XFS_IS_PQUOTA_ON(mp
) && pip
== NULL
) {
1743 error
= xfs_qm_qino_alloc(mp
, &pip
,
1744 sbflags
| XFS_SB_PQUOTINO
,
1745 flags
| XFS_QMOPT_PQUOTA
);
1750 mp
->m_quotainfo
->qi_uquotaip
= uip
;
1751 mp
->m_quotainfo
->qi_gquotaip
= gip
;
1752 mp
->m_quotainfo
->qi_pquotaip
= pip
;
1763 return XFS_ERROR(error
);
1768 struct xfs_dquot
*dqp
)
1770 struct xfs_mount
*mp
= dqp
->q_mount
;
1771 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1773 mutex_lock(&qi
->qi_tree_lock
);
1774 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
1775 be32_to_cpu(dqp
->q_core
.d_id
));
1778 mutex_unlock(&qi
->qi_tree_lock
);
1780 xfs_qm_dqdestroy(dqp
);
1784 * Start a transaction and write the incore superblock changes to
1785 * disk. flags parameter indicates which fields have changed.
1788 xfs_qm_write_sb_changes(
1795 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
1796 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_qm_sbchange
, 0, 0);
1798 xfs_trans_cancel(tp
, 0);
1802 xfs_mod_sb(tp
, flags
);
1803 error
= xfs_trans_commit(tp
, 0);
1809 /* --------------- utility functions for vnodeops ---------------- */
1813 * Given an inode, a uid, gid and prid make sure that we have
1814 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1815 * quotas by creating this file.
1816 * This also attaches dquot(s) to the given inode after locking it,
1817 * and returns the dquots corresponding to the uid and/or gid.
1819 * in : inode (unlocked)
1820 * out : udquot, gdquot with references taken and unlocked
1824 struct xfs_inode
*ip
,
1829 struct xfs_dquot
**O_udqpp
,
1830 struct xfs_dquot
**O_gdqpp
,
1831 struct xfs_dquot
**O_pdqpp
)
1833 struct xfs_mount
*mp
= ip
->i_mount
;
1834 struct xfs_dquot
*uq
= NULL
;
1835 struct xfs_dquot
*gq
= NULL
;
1836 struct xfs_dquot
*pq
= NULL
;
1840 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1843 lockflags
= XFS_ILOCK_EXCL
;
1844 xfs_ilock(ip
, lockflags
);
1846 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
1847 gid
= ip
->i_d
.di_gid
;
1850 * Attach the dquot(s) to this inode, doing a dquot allocation
1851 * if necessary. The dquot(s) will not be locked.
1853 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1854 error
= xfs_qm_dqattach_locked(ip
, XFS_QMOPT_DQALLOC
);
1856 xfs_iunlock(ip
, lockflags
);
1861 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
1862 if (ip
->i_d
.di_uid
!= uid
) {
1864 * What we need is the dquot that has this uid, and
1865 * if we send the inode to dqget, the uid of the inode
1866 * takes priority over what's sent in the uid argument.
1867 * We must unlock inode here before calling dqget if
1868 * we're not sending the inode, because otherwise
1869 * we'll deadlock by doing trans_reserve while
1872 xfs_iunlock(ip
, lockflags
);
1873 error
= xfs_qm_dqget(mp
, NULL
, uid
,
1879 ASSERT(error
!= ENOENT
);
1883 * Get the ilock in the right order.
1886 lockflags
= XFS_ILOCK_SHARED
;
1887 xfs_ilock(ip
, lockflags
);
1890 * Take an extra reference, because we'll return
1893 ASSERT(ip
->i_udquot
);
1894 uq
= xfs_qm_dqhold(ip
->i_udquot
);
1897 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
1898 if (ip
->i_d
.di_gid
!= gid
) {
1899 xfs_iunlock(ip
, lockflags
);
1900 error
= xfs_qm_dqget(mp
, NULL
, gid
,
1906 ASSERT(error
!= ENOENT
);
1910 lockflags
= XFS_ILOCK_SHARED
;
1911 xfs_ilock(ip
, lockflags
);
1913 ASSERT(ip
->i_gdquot
);
1914 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1917 if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
1918 if (xfs_get_projid(ip
) != prid
) {
1919 xfs_iunlock(ip
, lockflags
);
1920 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)prid
,
1926 ASSERT(error
!= ENOENT
);
1930 lockflags
= XFS_ILOCK_SHARED
;
1931 xfs_ilock(ip
, lockflags
);
1933 ASSERT(ip
->i_pdquot
);
1934 pq
= xfs_qm_dqhold(ip
->i_pdquot
);
1938 trace_xfs_dquot_dqalloc(ip
);
1940 xfs_iunlock(ip
, lockflags
);
1964 * Actually transfer ownership, and do dquot modifications.
1965 * These were already reserved.
1971 xfs_dquot_t
**IO_olddq
,
1974 xfs_dquot_t
*prevdq
;
1975 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
1976 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
1979 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1980 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
1985 ASSERT(prevdq
!= newdq
);
1987 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
1988 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
1990 /* the sparkling new dquot */
1991 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
1992 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
1995 * Take an extra reference, because the inode is going to keep
1996 * this dquot pointer even after the trans_commit.
1998 *IO_olddq
= xfs_qm_dqhold(newdq
);
2004 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
2007 xfs_qm_vop_chown_reserve(
2008 struct xfs_trans
*tp
,
2009 struct xfs_inode
*ip
,
2010 struct xfs_dquot
*udqp
,
2011 struct xfs_dquot
*gdqp
,
2012 struct xfs_dquot
*pdqp
,
2015 struct xfs_mount
*mp
= ip
->i_mount
;
2016 uint delblks
, blkflags
, prjflags
= 0;
2017 struct xfs_dquot
*udq_unres
= NULL
;
2018 struct xfs_dquot
*gdq_unres
= NULL
;
2019 struct xfs_dquot
*pdq_unres
= NULL
;
2020 struct xfs_dquot
*udq_delblks
= NULL
;
2021 struct xfs_dquot
*gdq_delblks
= NULL
;
2022 struct xfs_dquot
*pdq_delblks
= NULL
;
2026 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2027 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
2029 delblks
= ip
->i_delayed_blks
;
2030 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
2031 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
2033 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
2034 ip
->i_d
.di_uid
!= be32_to_cpu(udqp
->q_core
.d_id
)) {
2037 * If there are delayed allocation blocks, then we have to
2038 * unreserve those from the old dquot, and add them to the
2042 ASSERT(ip
->i_udquot
);
2043 udq_unres
= ip
->i_udquot
;
2046 if (XFS_IS_GQUOTA_ON(ip
->i_mount
) && gdqp
&&
2047 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
)) {
2050 ASSERT(ip
->i_gdquot
);
2051 gdq_unres
= ip
->i_gdquot
;
2055 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) && pdqp
&&
2056 xfs_get_projid(ip
) != be32_to_cpu(pdqp
->q_core
.d_id
)) {
2057 prjflags
= XFS_QMOPT_ENOSPC
;
2060 ASSERT(ip
->i_pdquot
);
2061 pdq_unres
= ip
->i_pdquot
;
2065 error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
2066 udq_delblks
, gdq_delblks
, pdq_delblks
,
2067 ip
->i_d
.di_nblocks
, 1,
2068 flags
| blkflags
| prjflags
);
2073 * Do the delayed blks reservations/unreservations now. Since, these
2074 * are done without the help of a transaction, if a reservation fails
2075 * its previous reservations won't be automatically undone by trans
2076 * code. So, we have to do it manually here.
2080 * Do the reservations first. Unreservation can't fail.
2082 ASSERT(udq_delblks
|| gdq_delblks
|| pdq_delblks
);
2083 ASSERT(udq_unres
|| gdq_unres
|| pdq_unres
);
2084 error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2085 udq_delblks
, gdq_delblks
, pdq_delblks
,
2086 (xfs_qcnt_t
)delblks
, 0,
2087 flags
| blkflags
| prjflags
);
2090 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2091 udq_unres
, gdq_unres
, pdq_unres
,
2092 -((xfs_qcnt_t
)delblks
), 0, blkflags
);
2099 xfs_qm_vop_rename_dqattach(
2100 struct xfs_inode
**i_tab
)
2102 struct xfs_mount
*mp
= i_tab
[0]->i_mount
;
2105 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
2108 for (i
= 0; (i
< 4 && i_tab
[i
]); i
++) {
2109 struct xfs_inode
*ip
= i_tab
[i
];
2113 * Watch out for duplicate entries in the table.
2115 if (i
== 0 || ip
!= i_tab
[i
-1]) {
2116 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
2117 error
= xfs_qm_dqattach(ip
, 0);
2127 xfs_qm_vop_create_dqattach(
2128 struct xfs_trans
*tp
,
2129 struct xfs_inode
*ip
,
2130 struct xfs_dquot
*udqp
,
2131 struct xfs_dquot
*gdqp
,
2132 struct xfs_dquot
*pdqp
)
2134 struct xfs_mount
*mp
= tp
->t_mountp
;
2136 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
2139 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
2140 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
2143 ASSERT(ip
->i_udquot
== NULL
);
2144 ASSERT(XFS_IS_UQUOTA_ON(mp
));
2145 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
2147 ip
->i_udquot
= xfs_qm_dqhold(udqp
);
2148 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2151 ASSERT(ip
->i_gdquot
== NULL
);
2152 ASSERT(XFS_IS_GQUOTA_ON(mp
));
2153 ASSERT(ip
->i_d
.di_gid
== be32_to_cpu(gdqp
->q_core
.d_id
));
2154 ip
->i_gdquot
= xfs_qm_dqhold(gdqp
);
2155 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2158 ASSERT(ip
->i_pdquot
== NULL
);
2159 ASSERT(XFS_IS_PQUOTA_ON(mp
));
2160 ASSERT(xfs_get_projid(ip
) == be32_to_cpu(pdqp
->q_core
.d_id
));
2162 ip
->i_pdquot
= xfs_qm_dqhold(pdqp
);
2163 xfs_trans_mod_dquot(tp
, pdqp
, XFS_TRANS_DQ_ICOUNT
, 1);