2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_ialloc.h"
30 #include "xfs_itable.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_trans.h"
36 #include "xfs_trans_space.h"
38 #include "xfs_trace.h"
39 #include "xfs_icache.h"
40 #include "xfs_cksum.h"
41 #include "xfs_dinode.h"
44 * The global quota manager. There is only one of these for the entire
45 * system, _not_ one per file system. XQM keeps track of the overall
46 * quota functionality, including maintaining the freelist and hash
49 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
50 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
53 STATIC
void xfs_qm_dqfree_one(struct xfs_dquot
*dqp
);
55 * We use the batch lookup interface to iterate over the dquots as it
56 * currently is the only interface into the radix tree code that allows
57 * fuzzy lookups instead of exact matches. Holding the lock over multiple
58 * operations is fine as all callers are used either during mount/umount
61 #define XFS_DQ_LOOKUP_BATCH 32
67 int (*execute
)(struct xfs_dquot
*dqp
, void *data
),
70 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
71 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
83 struct xfs_dquot
*batch
[XFS_DQ_LOOKUP_BATCH
];
87 mutex_lock(&qi
->qi_tree_lock
);
88 nr_found
= radix_tree_gang_lookup(tree
, (void **)batch
,
89 next_index
, XFS_DQ_LOOKUP_BATCH
);
91 mutex_unlock(&qi
->qi_tree_lock
);
95 for (i
= 0; i
< nr_found
; i
++) {
96 struct xfs_dquot
*dqp
= batch
[i
];
98 next_index
= be32_to_cpu(dqp
->q_core
.d_id
) + 1;
100 error
= execute(batch
[i
], data
);
101 if (error
== EAGAIN
) {
105 if (error
&& last_error
!= EFSCORRUPTED
)
109 mutex_unlock(&qi
->qi_tree_lock
);
111 /* bail out if the filesystem is corrupted. */
112 if (last_error
== EFSCORRUPTED
) {
128 * Purge a dquot from all tracking data structures and free it.
132 struct xfs_dquot
*dqp
,
135 struct xfs_mount
*mp
= dqp
->q_mount
;
136 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
139 if ((dqp
->dq_flags
& XFS_DQ_FREEING
) || dqp
->q_nrefs
!= 0) {
144 dqp
->dq_flags
|= XFS_DQ_FREEING
;
149 * If we are turning this type of quotas off, we don't care
150 * about the dirty metadata sitting in this dquot. OTOH, if
151 * we're unmounting, we do care, so we flush it and wait.
153 if (XFS_DQ_IS_DIRTY(dqp
)) {
154 struct xfs_buf
*bp
= NULL
;
158 * We don't care about getting disk errors here. We need
159 * to purge this dquot anyway, so we go ahead regardless.
161 error
= xfs_qm_dqflush(dqp
, &bp
);
163 xfs_warn(mp
, "%s: dquot %p flush failed",
166 error
= xfs_bwrite(bp
);
172 ASSERT(atomic_read(&dqp
->q_pincount
) == 0);
173 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
174 !(dqp
->q_logitem
.qli_item
.li_flags
& XFS_LI_IN_AIL
));
179 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
180 be32_to_cpu(dqp
->q_core
.d_id
));
184 * We move dquots to the freelist as soon as their reference count
185 * hits zero, so it really should be on the freelist here.
187 ASSERT(!list_empty(&dqp
->q_lru
));
188 list_lru_del(&qi
->qi_lru
, &dqp
->q_lru
);
189 XFS_STATS_DEC(xs_qm_dquot_unused
);
191 xfs_qm_dqdestroy(dqp
);
196 * Release the group or project dquot pointers the user dquots maybe carrying
197 * around as a hint, and proceed to purge the user dquot cache if requested.
200 xfs_qm_dqpurge_hints(
201 struct xfs_dquot
*dqp
,
204 struct xfs_dquot
*gdqp
= NULL
;
205 struct xfs_dquot
*pdqp
= NULL
;
206 uint flags
= *((uint
*)data
);
209 if (dqp
->dq_flags
& XFS_DQ_FREEING
) {
214 /* If this quota has a hint attached, prepare for releasing it now */
215 gdqp
= dqp
->q_gdquot
;
217 dqp
->q_gdquot
= NULL
;
219 pdqp
= dqp
->q_pdquot
;
221 dqp
->q_pdquot
= NULL
;
230 if (flags
& XFS_QMOPT_UQUOTA
)
231 return xfs_qm_dqpurge(dqp
, NULL
);
237 * Purge the dquot cache.
241 struct xfs_mount
*mp
,
245 * We have to release group/project dquot hint(s) from the user dquot
246 * at first if they are there, otherwise we would run into an infinite
247 * loop while walking through radix tree to purge other type of dquots
248 * since their refcount is not zero if the user dquot refers to them
251 * Call the special xfs_qm_dqpurge_hints() will end up go through the
252 * general xfs_qm_dqpurge() against user dquot cache if requested.
254 xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_dqpurge_hints
, &flags
);
256 if (flags
& XFS_QMOPT_GQUOTA
)
257 xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_dqpurge
, NULL
);
258 if (flags
& XFS_QMOPT_PQUOTA
)
259 xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_dqpurge
, NULL
);
263 * Just destroy the quotainfo structure.
267 struct xfs_mount
*mp
)
269 if (mp
->m_quotainfo
) {
270 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
271 xfs_qm_destroy_quotainfo(mp
);
277 * This is called from xfs_mountfs to start quotas and initialize all
278 * necessary data structures like quotainfo. This is also responsible for
279 * running a quotacheck as necessary. We are guaranteed that the superblock
280 * is consistently read in at this point.
282 * If we fail here, the mount will continue with quota turned off. We don't
283 * need to inidicate success or failure at all.
293 * If quotas on realtime volumes is not supported, we disable
294 * quotas immediately.
296 if (mp
->m_sb
.sb_rextents
) {
297 xfs_notice(mp
, "Cannot turn on quotas for realtime filesystem");
302 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
305 * Allocate the quotainfo structure inside the mount struct, and
306 * create quotainode(s), and change/rev superblock if necessary.
308 error
= xfs_qm_init_quotainfo(mp
);
311 * We must turn off quotas.
313 ASSERT(mp
->m_quotainfo
== NULL
);
318 * If any of the quotas are not consistent, do a quotacheck.
320 if (XFS_QM_NEED_QUOTACHECK(mp
)) {
321 error
= xfs_qm_quotacheck(mp
);
323 /* Quotacheck failed and disabled quotas. */
328 * If one type of quotas is off, then it will lose its
329 * quotachecked status, since we won't be doing accounting for
332 if (!XFS_IS_UQUOTA_ON(mp
))
333 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
334 if (!XFS_IS_GQUOTA_ON(mp
))
335 mp
->m_qflags
&= ~XFS_GQUOTA_CHKD
;
336 if (!XFS_IS_PQUOTA_ON(mp
))
337 mp
->m_qflags
&= ~XFS_PQUOTA_CHKD
;
341 * We actually don't have to acquire the m_sb_lock at all.
342 * This can only be called from mount, and that's single threaded. XXX
344 spin_lock(&mp
->m_sb_lock
);
345 sbf
= mp
->m_sb
.sb_qflags
;
346 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
347 spin_unlock(&mp
->m_sb_lock
);
349 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
350 if (xfs_qm_write_sb_changes(mp
, XFS_SB_QFLAGS
)) {
352 * We could only have been turning quotas off.
353 * We aren't in very good shape actually because
354 * the incore structures are convinced that quotas are
355 * off, but the on disk superblock doesn't know that !
357 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
358 xfs_alert(mp
, "%s: Superblock update failed!",
364 xfs_warn(mp
, "Failed to initialize disk quotas.");
370 * Called from the vfsops layer.
373 xfs_qm_unmount_quotas(
377 * Release the dquots that root inode, et al might be holding,
378 * before we flush quotas and blow away the quotainfo structure.
380 ASSERT(mp
->m_rootip
);
381 xfs_qm_dqdetach(mp
->m_rootip
);
383 xfs_qm_dqdetach(mp
->m_rbmip
);
385 xfs_qm_dqdetach(mp
->m_rsumip
);
388 * Release the quota inodes.
390 if (mp
->m_quotainfo
) {
391 if (mp
->m_quotainfo
->qi_uquotaip
) {
392 IRELE(mp
->m_quotainfo
->qi_uquotaip
);
393 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
395 if (mp
->m_quotainfo
->qi_gquotaip
) {
396 IRELE(mp
->m_quotainfo
->qi_gquotaip
);
397 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
399 if (mp
->m_quotainfo
->qi_pquotaip
) {
400 IRELE(mp
->m_quotainfo
->qi_pquotaip
);
401 mp
->m_quotainfo
->qi_pquotaip
= NULL
;
412 xfs_dquot_t
*udqhint
, /* hint */
413 xfs_dquot_t
**IO_idqpp
)
418 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
422 * See if we already have it in the inode itself. IO_idqpp is
423 * &i_udquot or &i_gdquot. This made the code look weird, but
424 * made the logic a lot simpler.
428 trace_xfs_dqattach_found(dqp
);
433 * udqhint is the i_udquot field in inode, and is non-NULL only
434 * when the type arg is group/project. Its purpose is to save a
435 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
439 ASSERT(type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
443 * No need to take dqlock to look at the id.
445 * The ID can't change until it gets reclaimed, and it won't
446 * be reclaimed as long as we have a ref from inode and we
449 if (type
== XFS_DQ_GROUP
)
450 dqp
= udqhint
->q_gdquot
;
452 dqp
= udqhint
->q_pdquot
;
453 if (dqp
&& be32_to_cpu(dqp
->q_core
.d_id
) == id
) {
454 ASSERT(*IO_idqpp
== NULL
);
456 *IO_idqpp
= xfs_qm_dqhold(dqp
);
457 xfs_dqunlock(udqhint
);
462 * We can't hold a dquot lock when we call the dqget code.
463 * We'll deadlock in no time, because of (not conforming to)
464 * lock ordering - the inodelock comes before any dquot lock,
465 * and we may drop and reacquire the ilock in xfs_qm_dqget().
467 xfs_dqunlock(udqhint
);
471 * Find the dquot from somewhere. This bumps the
472 * reference count of dquot and returns it locked.
473 * This can return ENOENT if dquot didn't exist on
474 * disk and we didn't ask it to allocate;
475 * ESRCH if quotas got turned off suddenly.
477 error
= xfs_qm_dqget(ip
->i_mount
, ip
, id
, type
,
478 doalloc
| XFS_QMOPT_DOWARN
, &dqp
);
482 trace_xfs_dqattach_get(dqp
);
485 * dqget may have dropped and re-acquired the ilock, but it guarantees
486 * that the dquot returned is the one that should go in the inode.
495 * Given a udquot and group/project type, attach the group/project
496 * dquot pointer to the udquot as a hint for future lookups.
499 xfs_qm_dqattach_hint(
500 struct xfs_inode
*ip
,
503 struct xfs_dquot
**dqhintp
;
504 struct xfs_dquot
*dqp
;
505 struct xfs_dquot
*udq
= ip
->i_udquot
;
507 ASSERT(type
== XFS_DQ_GROUP
|| type
== XFS_DQ_PROJ
);
511 if (type
== XFS_DQ_GROUP
) {
513 dqhintp
= &udq
->q_gdquot
;
516 dqhintp
= &udq
->q_pdquot
;
520 struct xfs_dquot
*tmp
;
530 *dqhintp
= xfs_qm_dqhold(dqp
);
536 xfs_qm_need_dqattach(
537 struct xfs_inode
*ip
)
539 struct xfs_mount
*mp
= ip
->i_mount
;
541 if (!XFS_IS_QUOTA_RUNNING(mp
))
543 if (!XFS_IS_QUOTA_ON(mp
))
545 if (!XFS_NOT_DQATTACHED(mp
, ip
))
547 if (xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
553 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
555 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
556 * Inode may get unlocked and relocked in here, and the caller must deal with
560 xfs_qm_dqattach_locked(
564 xfs_mount_t
*mp
= ip
->i_mount
;
568 if (!xfs_qm_need_dqattach(ip
))
571 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
573 if (XFS_IS_UQUOTA_ON(mp
)) {
574 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
575 flags
& XFS_QMOPT_DQALLOC
,
576 NULL
, &ip
->i_udquot
);
582 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
583 if (XFS_IS_GQUOTA_ON(mp
)) {
584 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
585 flags
& XFS_QMOPT_DQALLOC
,
586 ip
->i_udquot
, &ip
->i_gdquot
);
588 * Don't worry about the udquot that we may have
589 * attached above. It'll get detached, if not already.
596 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
597 if (XFS_IS_PQUOTA_ON(mp
)) {
598 error
= xfs_qm_dqattach_one(ip
, xfs_get_projid(ip
), XFS_DQ_PROJ
,
599 flags
& XFS_QMOPT_DQALLOC
,
600 ip
->i_udquot
, &ip
->i_pdquot
);
602 * Don't worry about the udquot that we may have
603 * attached above. It'll get detached, if not already.
611 * Attach this group/project quota to the user quota as a hint.
612 * This WON'T, in general, result in a thrash.
614 if (nquotas
> 1 && ip
->i_udquot
) {
615 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
616 ASSERT(ip
->i_gdquot
|| !XFS_IS_GQUOTA_ON(mp
));
617 ASSERT(ip
->i_pdquot
|| !XFS_IS_PQUOTA_ON(mp
));
620 * We do not have i_udquot locked at this point, but this check
621 * is OK since we don't depend on the i_gdquot to be accurate
622 * 100% all the time. It is just a hint, and this will
623 * succeed in general.
625 if (ip
->i_udquot
->q_gdquot
!= ip
->i_gdquot
)
626 xfs_qm_dqattach_hint(ip
, XFS_DQ_GROUP
);
628 if (ip
->i_udquot
->q_pdquot
!= ip
->i_pdquot
)
629 xfs_qm_dqattach_hint(ip
, XFS_DQ_PROJ
);
635 if (XFS_IS_UQUOTA_ON(mp
))
636 ASSERT(ip
->i_udquot
);
637 if (XFS_IS_GQUOTA_ON(mp
))
638 ASSERT(ip
->i_gdquot
);
639 if (XFS_IS_PQUOTA_ON(mp
))
640 ASSERT(ip
->i_pdquot
);
642 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
649 struct xfs_inode
*ip
,
654 if (!xfs_qm_need_dqattach(ip
))
657 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
658 error
= xfs_qm_dqattach_locked(ip
, flags
);
659 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
665 * Release dquots (and their references) if any.
666 * The inode should be locked EXCL except when this's called by
673 if (!(ip
->i_udquot
|| ip
->i_gdquot
|| ip
->i_pdquot
))
676 trace_xfs_dquot_dqdetach(ip
);
678 ASSERT(!xfs_is_quota_inode(&ip
->i_mount
->m_sb
, ip
->i_ino
));
680 xfs_qm_dqrele(ip
->i_udquot
);
684 xfs_qm_dqrele(ip
->i_gdquot
);
688 xfs_qm_dqrele(ip
->i_pdquot
);
693 struct xfs_qm_isolate
{
694 struct list_head buffers
;
695 struct list_head dispose
;
698 static enum lru_status
699 xfs_qm_dquot_isolate(
700 struct list_head
*item
,
701 spinlock_t
*lru_lock
,
704 struct xfs_dquot
*dqp
= container_of(item
,
705 struct xfs_dquot
, q_lru
);
706 struct xfs_qm_isolate
*isol
= arg
;
708 if (!xfs_dqlock_nowait(dqp
))
712 * This dquot has acquired a reference in the meantime remove it from
713 * the freelist and try again.
717 XFS_STATS_INC(xs_qm_dqwants
);
719 trace_xfs_dqreclaim_want(dqp
);
720 list_del_init(&dqp
->q_lru
);
721 XFS_STATS_DEC(xs_qm_dquot_unused
);
726 * If the dquot is dirty, flush it. If it's already being flushed, just
727 * skip it so there is time for the IO to complete before we try to
728 * reclaim it again on the next LRU pass.
730 if (!xfs_dqflock_nowait(dqp
)) {
735 if (XFS_DQ_IS_DIRTY(dqp
)) {
736 struct xfs_buf
*bp
= NULL
;
739 trace_xfs_dqreclaim_dirty(dqp
);
741 /* we have to drop the LRU lock to flush the dquot */
742 spin_unlock(lru_lock
);
744 error
= xfs_qm_dqflush(dqp
, &bp
);
746 xfs_warn(dqp
->q_mount
, "%s: dquot %p flush failed",
748 goto out_unlock_dirty
;
751 xfs_buf_delwri_queue(bp
, &isol
->buffers
);
753 goto out_unlock_dirty
;
758 * Prevent lookups now that we are past the point of no return.
760 dqp
->dq_flags
|= XFS_DQ_FREEING
;
763 ASSERT(dqp
->q_nrefs
== 0);
764 list_move_tail(&dqp
->q_lru
, &isol
->dispose
);
765 XFS_STATS_DEC(xs_qm_dquot_unused
);
766 trace_xfs_dqreclaim_done(dqp
);
767 XFS_STATS_INC(xs_qm_dqreclaims
);
771 trace_xfs_dqreclaim_busy(dqp
);
772 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
776 trace_xfs_dqreclaim_busy(dqp
);
777 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
785 struct shrinker
*shrink
,
786 struct shrink_control
*sc
)
788 struct xfs_quotainfo
*qi
= container_of(shrink
,
789 struct xfs_quotainfo
, qi_shrinker
);
790 struct xfs_qm_isolate isol
;
793 unsigned long nr_to_scan
= sc
->nr_to_scan
;
795 if ((sc
->gfp_mask
& (__GFP_FS
|__GFP_WAIT
)) != (__GFP_FS
|__GFP_WAIT
))
798 INIT_LIST_HEAD(&isol
.buffers
);
799 INIT_LIST_HEAD(&isol
.dispose
);
801 freed
= list_lru_walk_node(&qi
->qi_lru
, sc
->nid
, xfs_qm_dquot_isolate
, &isol
,
804 error
= xfs_buf_delwri_submit(&isol
.buffers
);
806 xfs_warn(NULL
, "%s: dquot reclaim failed", __func__
);
808 while (!list_empty(&isol
.dispose
)) {
809 struct xfs_dquot
*dqp
;
811 dqp
= list_first_entry(&isol
.dispose
, struct xfs_dquot
, q_lru
);
812 list_del_init(&dqp
->q_lru
);
813 xfs_qm_dqfree_one(dqp
);
821 struct shrinker
*shrink
,
822 struct shrink_control
*sc
)
824 struct xfs_quotainfo
*qi
= container_of(shrink
,
825 struct xfs_quotainfo
, qi_shrinker
);
827 return list_lru_count_node(&qi
->qi_lru
, sc
->nid
);
831 * This initializes all the quota information that's kept in the
835 xfs_qm_init_quotainfo(
838 xfs_quotainfo_t
*qinf
;
842 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
844 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
846 if ((error
= list_lru_init(&qinf
->qi_lru
))) {
848 mp
->m_quotainfo
= NULL
;
853 * See if quotainodes are setup, and if not, allocate them,
854 * and change the superblock accordingly.
856 if ((error
= xfs_qm_init_quotainos(mp
))) {
857 list_lru_destroy(&qinf
->qi_lru
);
859 mp
->m_quotainfo
= NULL
;
863 INIT_RADIX_TREE(&qinf
->qi_uquota_tree
, GFP_NOFS
);
864 INIT_RADIX_TREE(&qinf
->qi_gquota_tree
, GFP_NOFS
);
865 INIT_RADIX_TREE(&qinf
->qi_pquota_tree
, GFP_NOFS
);
866 mutex_init(&qinf
->qi_tree_lock
);
868 /* mutex used to serialize quotaoffs */
869 mutex_init(&qinf
->qi_quotaofflock
);
871 /* Precalc some constants */
872 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
873 qinf
->qi_dqperchunk
= xfs_calc_dquots_per_chunk(mp
,
874 qinf
->qi_dqchunklen
);
876 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
879 * We try to get the limits from the superuser's limits fields.
880 * This is quite hacky, but it is standard quota practice.
882 * We look at the USR dquot with id == 0 first, but if user quotas
883 * are not enabled we goto the GRP dquot with id == 0.
884 * We don't really care to keep separate default limits for user
885 * and group quotas, at least not at this point.
887 * Since we may not have done a quotacheck by this point, just read
888 * the dquot without attaching it to any hashtables or lists.
890 error
= xfs_qm_dqread(mp
, 0,
891 XFS_IS_UQUOTA_RUNNING(mp
) ? XFS_DQ_USER
:
892 (XFS_IS_GQUOTA_RUNNING(mp
) ? XFS_DQ_GROUP
:
894 XFS_QMOPT_DOWARN
, &dqp
);
896 xfs_disk_dquot_t
*ddqp
= &dqp
->q_core
;
899 * The warnings and timers set the grace period given to
900 * a user or group before he or she can not perform any
901 * more writing. If it is zero, a default is used.
903 qinf
->qi_btimelimit
= ddqp
->d_btimer
?
904 be32_to_cpu(ddqp
->d_btimer
) : XFS_QM_BTIMELIMIT
;
905 qinf
->qi_itimelimit
= ddqp
->d_itimer
?
906 be32_to_cpu(ddqp
->d_itimer
) : XFS_QM_ITIMELIMIT
;
907 qinf
->qi_rtbtimelimit
= ddqp
->d_rtbtimer
?
908 be32_to_cpu(ddqp
->d_rtbtimer
) : XFS_QM_RTBTIMELIMIT
;
909 qinf
->qi_bwarnlimit
= ddqp
->d_bwarns
?
910 be16_to_cpu(ddqp
->d_bwarns
) : XFS_QM_BWARNLIMIT
;
911 qinf
->qi_iwarnlimit
= ddqp
->d_iwarns
?
912 be16_to_cpu(ddqp
->d_iwarns
) : XFS_QM_IWARNLIMIT
;
913 qinf
->qi_rtbwarnlimit
= ddqp
->d_rtbwarns
?
914 be16_to_cpu(ddqp
->d_rtbwarns
) : XFS_QM_RTBWARNLIMIT
;
915 qinf
->qi_bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
916 qinf
->qi_bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
917 qinf
->qi_ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
918 qinf
->qi_isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
919 qinf
->qi_rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
920 qinf
->qi_rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
922 xfs_qm_dqdestroy(dqp
);
924 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
925 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
926 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
927 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
928 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
929 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
932 qinf
->qi_shrinker
.count_objects
= xfs_qm_shrink_count
;
933 qinf
->qi_shrinker
.scan_objects
= xfs_qm_shrink_scan
;
934 qinf
->qi_shrinker
.seeks
= DEFAULT_SEEKS
;
935 qinf
->qi_shrinker
.flags
= SHRINKER_NUMA_AWARE
;
936 register_shrinker(&qinf
->qi_shrinker
);
942 * Gets called when unmounting a filesystem or when all quotas get
944 * This purges the quota inodes, destroys locks and frees itself.
947 xfs_qm_destroy_quotainfo(
952 qi
= mp
->m_quotainfo
;
955 unregister_shrinker(&qi
->qi_shrinker
);
956 list_lru_destroy(&qi
->qi_lru
);
958 if (qi
->qi_uquotaip
) {
959 IRELE(qi
->qi_uquotaip
);
960 qi
->qi_uquotaip
= NULL
; /* paranoia */
962 if (qi
->qi_gquotaip
) {
963 IRELE(qi
->qi_gquotaip
);
964 qi
->qi_gquotaip
= NULL
;
966 if (qi
->qi_pquotaip
) {
967 IRELE(qi
->qi_pquotaip
);
968 qi
->qi_pquotaip
= NULL
;
970 mutex_destroy(&qi
->qi_quotaofflock
);
972 mp
->m_quotainfo
= NULL
;
976 * Create an inode and return with a reference already taken, but unlocked
977 * This is how we create quota inodes
992 * With superblock that doesn't have separate pquotino, we
993 * share an inode between gquota and pquota. If the on-disk
994 * superblock has GQUOTA and the filesystem is now mounted
995 * with PQUOTA, just use sb_gquotino for sb_pquotino and
998 if (!xfs_sb_version_has_pquotino(&mp
->m_sb
) &&
999 (flags
& (XFS_QMOPT_PQUOTA
|XFS_QMOPT_GQUOTA
))) {
1000 xfs_ino_t ino
= NULLFSINO
;
1002 if ((flags
& XFS_QMOPT_PQUOTA
) &&
1003 (mp
->m_sb
.sb_gquotino
!= NULLFSINO
)) {
1004 ino
= mp
->m_sb
.sb_gquotino
;
1005 ASSERT(mp
->m_sb
.sb_pquotino
== NULLFSINO
);
1006 } else if ((flags
& XFS_QMOPT_GQUOTA
) &&
1007 (mp
->m_sb
.sb_pquotino
!= NULLFSINO
)) {
1008 ino
= mp
->m_sb
.sb_pquotino
;
1009 ASSERT(mp
->m_sb
.sb_gquotino
== NULLFSINO
);
1011 if (ino
!= NULLFSINO
) {
1012 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, ip
);
1015 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
1016 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
1020 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QINOCREATE
);
1021 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_create
,
1022 XFS_QM_QINOCREATE_SPACE_RES(mp
), 0);
1024 xfs_trans_cancel(tp
, 0);
1029 error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0, 0, 1, ip
,
1032 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|
1039 * Make the changes in the superblock, and log those too.
1040 * sbfields arg may contain fields other than *QUOTINO;
1041 * VERSIONNUM for example.
1043 spin_lock(&mp
->m_sb_lock
);
1044 if (flags
& XFS_QMOPT_SBVERSION
) {
1045 ASSERT(!xfs_sb_version_hasquota(&mp
->m_sb
));
1046 ASSERT((sbfields
& (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1047 XFS_SB_GQUOTINO
| XFS_SB_PQUOTINO
| XFS_SB_QFLAGS
)) ==
1048 (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1049 XFS_SB_GQUOTINO
| XFS_SB_PQUOTINO
|
1052 xfs_sb_version_addquota(&mp
->m_sb
);
1053 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
1054 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
1055 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
1057 /* qflags will get updated fully _after_ quotacheck */
1058 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
;
1060 if (flags
& XFS_QMOPT_UQUOTA
)
1061 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
1062 else if (flags
& XFS_QMOPT_GQUOTA
)
1063 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
1065 mp
->m_sb
.sb_pquotino
= (*ip
)->i_ino
;
1066 spin_unlock(&mp
->m_sb_lock
);
1067 xfs_mod_sb(tp
, sbfields
);
1069 if ((error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
))) {
1070 xfs_alert(mp
, "%s failed (error %d)!", __func__
, error
);
1078 xfs_qm_reset_dqcounts(
1084 struct xfs_dqblk
*dqb
;
1087 trace_xfs_reset_dqcounts(bp
, _RET_IP_
);
1090 * Reset all counters and timers. They'll be
1091 * started afresh by xfs_qm_quotacheck.
1094 j
= XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
1095 do_div(j
, sizeof(xfs_dqblk_t
));
1096 ASSERT(mp
->m_quotainfo
->qi_dqperchunk
== j
);
1099 for (j
= 0; j
< mp
->m_quotainfo
->qi_dqperchunk
; j
++) {
1100 struct xfs_disk_dquot
*ddq
;
1102 ddq
= (struct xfs_disk_dquot
*)&dqb
[j
];
1105 * Do a sanity check, and if needed, repair the dqblk. Don't
1106 * output any warnings because it's perfectly possible to
1107 * find uninitialised dquot blks. See comment in xfs_dqcheck.
1109 xfs_dqcheck(mp
, ddq
, id
+j
, type
, XFS_QMOPT_DQREPAIR
,
1113 ddq
->d_rtbcount
= 0;
1116 ddq
->d_rtbtimer
= 0;
1119 ddq
->d_rtbwarns
= 0;
1121 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
1122 xfs_update_cksum((char *)&dqb
[j
],
1123 sizeof(struct xfs_dqblk
),
1131 struct xfs_mount
*mp
,
1134 xfs_filblks_t blkcnt
,
1136 struct list_head
*buffer_list
)
1143 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
1144 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
1148 * Blkcnt arg can be a very big number, and might even be
1149 * larger than the log itself. So, we have to break it up into
1150 * manageable-sized transactions.
1151 * Note that we don't start a permanent transaction here; we might
1152 * not be able to get a log reservation for the whole thing up front,
1153 * and we don't really care to either, because we just discard
1154 * everything if we were to crash in the middle of this loop.
1157 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
1158 XFS_FSB_TO_DADDR(mp
, bno
),
1159 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
1160 &xfs_dquot_buf_ops
);
1163 * CRC and validation errors will return a EFSCORRUPTED here. If
1164 * this occurs, re-read without CRC validation so that we can
1165 * repair the damage via xfs_qm_reset_dqcounts(). This process
1166 * will leave a trace in the log indicating corruption has
1169 if (error
== EFSCORRUPTED
) {
1170 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
1171 XFS_FSB_TO_DADDR(mp
, bno
),
1172 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
1179 xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
1180 xfs_buf_delwri_queue(bp
, buffer_list
);
1183 /* goto the next block. */
1185 firstid
+= mp
->m_quotainfo
->qi_dqperchunk
;
1192 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1193 * caller supplied function for every chunk of dquots that we find.
1197 struct xfs_mount
*mp
,
1198 struct xfs_inode
*qip
,
1200 struct list_head
*buffer_list
)
1202 struct xfs_bmbt_irec
*map
;
1203 int i
, nmaps
; /* number of map entries */
1204 int error
; /* return value */
1205 xfs_fileoff_t lblkno
;
1206 xfs_filblks_t maxlblkcnt
;
1208 xfs_fsblock_t rablkno
;
1209 xfs_filblks_t rablkcnt
;
1213 * This looks racy, but we can't keep an inode lock across a
1214 * trans_reserve. But, this gets called during quotacheck, and that
1215 * happens only at mount time which is single threaded.
1217 if (qip
->i_d
.di_nblocks
== 0)
1220 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
1223 maxlblkcnt
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
1227 nmaps
= XFS_DQITER_MAP_SIZE
;
1229 * We aren't changing the inode itself. Just changing
1230 * some of its data. No new blocks are added here, and
1231 * the inode is never added to the transaction.
1233 lock_mode
= xfs_ilock_data_map_shared(qip
);
1234 error
= xfs_bmapi_read(qip
, lblkno
, maxlblkcnt
- lblkno
,
1236 xfs_iunlock(qip
, lock_mode
);
1240 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
1241 for (i
= 0; i
< nmaps
; i
++) {
1242 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1243 ASSERT(map
[i
].br_blockcount
);
1246 lblkno
+= map
[i
].br_blockcount
;
1248 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1251 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1252 mp
->m_quotainfo
->qi_dqperchunk
;
1254 * Do a read-ahead on the next extent.
1256 if ((i
+1 < nmaps
) &&
1257 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1258 rablkcnt
= map
[i
+1].br_blockcount
;
1259 rablkno
= map
[i
+1].br_startblock
;
1260 while (rablkcnt
--) {
1261 xfs_buf_readahead(mp
->m_ddev_targp
,
1262 XFS_FSB_TO_DADDR(mp
, rablkno
),
1263 mp
->m_quotainfo
->qi_dqchunklen
,
1269 * Iterate thru all the blks in the extent and
1270 * reset the counters of all the dquots inside them.
1272 error
= xfs_qm_dqiter_bufs(mp
, firstid
,
1273 map
[i
].br_startblock
,
1274 map
[i
].br_blockcount
,
1275 flags
, buffer_list
);
1279 } while (nmaps
> 0);
1287 * Called by dqusage_adjust in doing a quotacheck.
1289 * Given the inode, and a dquot id this updates both the incore dqout as well
1290 * as the buffer copy. This is so that once the quotacheck is done, we can
1291 * just log all the buffers, as opposed to logging numerous updates to
1292 * individual dquots.
1295 xfs_qm_quotacheck_dqadjust(
1296 struct xfs_inode
*ip
,
1302 struct xfs_mount
*mp
= ip
->i_mount
;
1303 struct xfs_dquot
*dqp
;
1306 error
= xfs_qm_dqget(mp
, ip
, id
, type
,
1307 XFS_QMOPT_DQALLOC
| XFS_QMOPT_DOWARN
, &dqp
);
1310 * Shouldn't be able to turn off quotas here.
1312 ASSERT(error
!= ESRCH
);
1313 ASSERT(error
!= ENOENT
);
1317 trace_xfs_dqadjust(dqp
);
1320 * Adjust the inode count and the block count to reflect this inode's
1323 be64_add_cpu(&dqp
->q_core
.d_icount
, 1);
1324 dqp
->q_res_icount
++;
1326 be64_add_cpu(&dqp
->q_core
.d_bcount
, nblks
);
1327 dqp
->q_res_bcount
+= nblks
;
1330 be64_add_cpu(&dqp
->q_core
.d_rtbcount
, rtblks
);
1331 dqp
->q_res_rtbcount
+= rtblks
;
1335 * Set default limits, adjust timers (since we changed usages)
1337 * There are no timers for the default values set in the root dquot.
1339 if (dqp
->q_core
.d_id
) {
1340 xfs_qm_adjust_dqlimits(mp
, dqp
);
1341 xfs_qm_adjust_dqtimers(mp
, &dqp
->q_core
);
1344 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1352 xfs_qcnt_t
*O_rtblks
)
1354 xfs_filblks_t rtblks
; /* total rt blks */
1355 xfs_extnum_t idx
; /* extent record index */
1356 xfs_ifork_t
*ifp
; /* inode fork pointer */
1357 xfs_extnum_t nextents
; /* number of extent entries */
1360 ASSERT(XFS_IS_REALTIME_INODE(ip
));
1361 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1362 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1363 if ((error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
)))
1367 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
1368 for (idx
= 0; idx
< nextents
; idx
++)
1369 rtblks
+= xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp
, idx
));
1370 *O_rtblks
= (xfs_qcnt_t
)rtblks
;
1375 * callback routine supplied to bulkstat(). Given an inumber, find its
1376 * dquots and update them to account for resources taken by that inode.
1380 xfs_qm_dqusage_adjust(
1381 xfs_mount_t
*mp
, /* mount point for filesystem */
1382 xfs_ino_t ino
, /* inode number to get data for */
1383 void __user
*buffer
, /* not used */
1384 int ubsize
, /* not used */
1385 int *ubused
, /* not used */
1386 int *res
) /* result code value */
1389 xfs_qcnt_t nblks
, rtblks
= 0;
1392 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1395 * rootino must have its resources accounted for, not so with the quota
1398 if (xfs_is_quota_inode(&mp
->m_sb
, ino
)) {
1399 *res
= BULKSTAT_RV_NOTHING
;
1400 return XFS_ERROR(EINVAL
);
1404 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1405 * interface expects the inode to be exclusively locked because that's
1406 * the case in all other instances. It's OK that we do this because
1407 * quotacheck is done only at mount time.
1409 error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_EXCL
, &ip
);
1411 *res
= BULKSTAT_RV_NOTHING
;
1415 ASSERT(ip
->i_delayed_blks
== 0);
1417 if (XFS_IS_REALTIME_INODE(ip
)) {
1419 * Walk thru the extent list and count the realtime blocks.
1421 error
= xfs_qm_get_rtblks(ip
, &rtblks
);
1426 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1429 * Add the (disk blocks and inode) resources occupied by this
1430 * inode to its dquots. We do this adjustment in the incore dquot,
1431 * and also copy the changes to its buffer.
1432 * We don't care about putting these changes in a transaction
1433 * envelope because if we crash in the middle of a 'quotacheck'
1434 * we have to start from the beginning anyway.
1435 * Once we're done, we'll log all the dquot bufs.
1437 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1438 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1440 if (XFS_IS_UQUOTA_ON(mp
)) {
1441 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_uid
,
1442 XFS_DQ_USER
, nblks
, rtblks
);
1447 if (XFS_IS_GQUOTA_ON(mp
)) {
1448 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_gid
,
1449 XFS_DQ_GROUP
, nblks
, rtblks
);
1454 if (XFS_IS_PQUOTA_ON(mp
)) {
1455 error
= xfs_qm_quotacheck_dqadjust(ip
, xfs_get_projid(ip
),
1456 XFS_DQ_PROJ
, nblks
, rtblks
);
1461 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1463 *res
= BULKSTAT_RV_DIDONE
;
1467 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1469 *res
= BULKSTAT_RV_GIVEUP
;
1475 struct xfs_dquot
*dqp
,
1478 struct list_head
*buffer_list
= data
;
1479 struct xfs_buf
*bp
= NULL
;
1483 if (dqp
->dq_flags
& XFS_DQ_FREEING
)
1485 if (!XFS_DQ_IS_DIRTY(dqp
))
1489 error
= xfs_qm_dqflush(dqp
, &bp
);
1493 xfs_buf_delwri_queue(bp
, buffer_list
);
1501 * Walk thru all the filesystem inodes and construct a consistent view
1502 * of the disk quota world. If the quotacheck fails, disable quotas.
1508 int done
, count
, error
, error2
;
1512 LIST_HEAD (buffer_list
);
1513 struct xfs_inode
*uip
= mp
->m_quotainfo
->qi_uquotaip
;
1514 struct xfs_inode
*gip
= mp
->m_quotainfo
->qi_gquotaip
;
1515 struct xfs_inode
*pip
= mp
->m_quotainfo
->qi_pquotaip
;
1522 ASSERT(uip
|| gip
|| pip
);
1523 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1525 xfs_notice(mp
, "Quotacheck needed: Please wait.");
1528 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1529 * their counters to zero. We need a clean slate.
1530 * We don't log our changes till later.
1533 error
= xfs_qm_dqiterate(mp
, uip
, XFS_QMOPT_UQUOTA
,
1537 flags
|= XFS_UQUOTA_CHKD
;
1541 error
= xfs_qm_dqiterate(mp
, gip
, XFS_QMOPT_GQUOTA
,
1545 flags
|= XFS_GQUOTA_CHKD
;
1549 error
= xfs_qm_dqiterate(mp
, pip
, XFS_QMOPT_PQUOTA
,
1553 flags
|= XFS_PQUOTA_CHKD
;
1558 * Iterate thru all the inodes in the file system,
1559 * adjusting the corresponding dquot counters in core.
1561 error
= xfs_bulkstat(mp
, &lastino
, &count
,
1562 xfs_qm_dqusage_adjust
,
1563 structsz
, NULL
, &done
);
1570 * We've made all the changes that we need to make incore. Flush them
1571 * down to disk buffers if everything was updated successfully.
1573 if (XFS_IS_UQUOTA_ON(mp
)) {
1574 error
= xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_flush_one
,
1577 if (XFS_IS_GQUOTA_ON(mp
)) {
1578 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_flush_one
,
1583 if (XFS_IS_PQUOTA_ON(mp
)) {
1584 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_flush_one
,
1590 error2
= xfs_buf_delwri_submit(&buffer_list
);
1595 * We can get this error if we couldn't do a dquot allocation inside
1596 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1597 * dirty dquots that might be cached, we just want to get rid of them
1598 * and turn quotaoff. The dquots won't be attached to any of the inodes
1599 * at this point (because we intentionally didn't in dqget_noattach).
1602 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
1607 * If one type of quotas is off, then it will lose its
1608 * quotachecked status, since we won't be doing accounting for
1609 * that type anymore.
1611 mp
->m_qflags
&= ~XFS_ALL_QUOTA_CHKD
;
1612 mp
->m_qflags
|= flags
;
1615 while (!list_empty(&buffer_list
)) {
1616 struct xfs_buf
*bp
=
1617 list_first_entry(&buffer_list
, struct xfs_buf
, b_list
);
1618 list_del_init(&bp
->b_list
);
1624 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1627 * We must turn off quotas.
1629 ASSERT(mp
->m_quotainfo
!= NULL
);
1630 xfs_qm_destroy_quotainfo(mp
);
1631 if (xfs_mount_reset_sbqflags(mp
)) {
1633 "Quotacheck: Failed to reset quota flags.");
1636 xfs_notice(mp
, "Quotacheck: Done.");
1641 * This is called after the superblock has been read in and we're ready to
1642 * iget the quota inodes.
1645 xfs_qm_init_quotainos(
1648 struct xfs_inode
*uip
= NULL
;
1649 struct xfs_inode
*gip
= NULL
;
1650 struct xfs_inode
*pip
= NULL
;
1652 __int64_t sbflags
= 0;
1655 ASSERT(mp
->m_quotainfo
);
1658 * Get the uquota and gquota inodes
1660 if (xfs_sb_version_hasquota(&mp
->m_sb
)) {
1661 if (XFS_IS_UQUOTA_ON(mp
) &&
1662 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1663 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1664 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1667 return XFS_ERROR(error
);
1669 if (XFS_IS_GQUOTA_ON(mp
) &&
1670 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1671 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1672 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1677 if (XFS_IS_PQUOTA_ON(mp
) &&
1678 mp
->m_sb
.sb_pquotino
!= NULLFSINO
) {
1679 ASSERT(mp
->m_sb
.sb_pquotino
> 0);
1680 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_pquotino
,
1686 flags
|= XFS_QMOPT_SBVERSION
;
1687 sbflags
|= (XFS_SB_VERSIONNUM
| XFS_SB_UQUOTINO
|
1688 XFS_SB_GQUOTINO
| XFS_SB_PQUOTINO
|
1693 * Create the three inodes, if they don't exist already. The changes
1694 * made above will get added to a transaction and logged in one of
1695 * the qino_alloc calls below. If the device is readonly,
1696 * temporarily switch to read-write to do this.
1698 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1699 error
= xfs_qm_qino_alloc(mp
, &uip
,
1700 sbflags
| XFS_SB_UQUOTINO
,
1701 flags
| XFS_QMOPT_UQUOTA
);
1705 flags
&= ~XFS_QMOPT_SBVERSION
;
1707 if (XFS_IS_GQUOTA_ON(mp
) && gip
== NULL
) {
1708 error
= xfs_qm_qino_alloc(mp
, &gip
,
1709 sbflags
| XFS_SB_GQUOTINO
,
1710 flags
| XFS_QMOPT_GQUOTA
);
1714 flags
&= ~XFS_QMOPT_SBVERSION
;
1716 if (XFS_IS_PQUOTA_ON(mp
) && pip
== NULL
) {
1717 error
= xfs_qm_qino_alloc(mp
, &pip
,
1718 sbflags
| XFS_SB_PQUOTINO
,
1719 flags
| XFS_QMOPT_PQUOTA
);
1724 mp
->m_quotainfo
->qi_uquotaip
= uip
;
1725 mp
->m_quotainfo
->qi_gquotaip
= gip
;
1726 mp
->m_quotainfo
->qi_pquotaip
= pip
;
1737 return XFS_ERROR(error
);
1742 struct xfs_dquot
*dqp
)
1744 struct xfs_mount
*mp
= dqp
->q_mount
;
1745 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1747 mutex_lock(&qi
->qi_tree_lock
);
1748 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
1749 be32_to_cpu(dqp
->q_core
.d_id
));
1752 mutex_unlock(&qi
->qi_tree_lock
);
1754 xfs_qm_dqdestroy(dqp
);
1758 * Start a transaction and write the incore superblock changes to
1759 * disk. flags parameter indicates which fields have changed.
1762 xfs_qm_write_sb_changes(
1769 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
1770 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_qm_sbchange
, 0, 0);
1772 xfs_trans_cancel(tp
, 0);
1776 xfs_mod_sb(tp
, flags
);
1777 error
= xfs_trans_commit(tp
, 0);
1783 /* --------------- utility functions for vnodeops ---------------- */
1787 * Given an inode, a uid, gid and prid make sure that we have
1788 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1789 * quotas by creating this file.
1790 * This also attaches dquot(s) to the given inode after locking it,
1791 * and returns the dquots corresponding to the uid and/or gid.
1793 * in : inode (unlocked)
1794 * out : udquot, gdquot with references taken and unlocked
1798 struct xfs_inode
*ip
,
1803 struct xfs_dquot
**O_udqpp
,
1804 struct xfs_dquot
**O_gdqpp
,
1805 struct xfs_dquot
**O_pdqpp
)
1807 struct xfs_mount
*mp
= ip
->i_mount
;
1808 struct xfs_dquot
*uq
= NULL
;
1809 struct xfs_dquot
*gq
= NULL
;
1810 struct xfs_dquot
*pq
= NULL
;
1814 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1817 lockflags
= XFS_ILOCK_EXCL
;
1818 xfs_ilock(ip
, lockflags
);
1820 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
1821 gid
= ip
->i_d
.di_gid
;
1824 * Attach the dquot(s) to this inode, doing a dquot allocation
1825 * if necessary. The dquot(s) will not be locked.
1827 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1828 error
= xfs_qm_dqattach_locked(ip
, XFS_QMOPT_DQALLOC
);
1830 xfs_iunlock(ip
, lockflags
);
1835 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
1836 if (ip
->i_d
.di_uid
!= uid
) {
1838 * What we need is the dquot that has this uid, and
1839 * if we send the inode to dqget, the uid of the inode
1840 * takes priority over what's sent in the uid argument.
1841 * We must unlock inode here before calling dqget if
1842 * we're not sending the inode, because otherwise
1843 * we'll deadlock by doing trans_reserve while
1846 xfs_iunlock(ip
, lockflags
);
1847 error
= xfs_qm_dqget(mp
, NULL
, uid
,
1853 ASSERT(error
!= ENOENT
);
1857 * Get the ilock in the right order.
1860 lockflags
= XFS_ILOCK_SHARED
;
1861 xfs_ilock(ip
, lockflags
);
1864 * Take an extra reference, because we'll return
1867 ASSERT(ip
->i_udquot
);
1868 uq
= xfs_qm_dqhold(ip
->i_udquot
);
1871 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
1872 if (ip
->i_d
.di_gid
!= gid
) {
1873 xfs_iunlock(ip
, lockflags
);
1874 error
= xfs_qm_dqget(mp
, NULL
, gid
,
1880 ASSERT(error
!= ENOENT
);
1884 lockflags
= XFS_ILOCK_SHARED
;
1885 xfs_ilock(ip
, lockflags
);
1887 ASSERT(ip
->i_gdquot
);
1888 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1891 if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
1892 if (xfs_get_projid(ip
) != prid
) {
1893 xfs_iunlock(ip
, lockflags
);
1894 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)prid
,
1900 ASSERT(error
!= ENOENT
);
1904 lockflags
= XFS_ILOCK_SHARED
;
1905 xfs_ilock(ip
, lockflags
);
1907 ASSERT(ip
->i_pdquot
);
1908 pq
= xfs_qm_dqhold(ip
->i_pdquot
);
1912 trace_xfs_dquot_dqalloc(ip
);
1914 xfs_iunlock(ip
, lockflags
);
1938 * Actually transfer ownership, and do dquot modifications.
1939 * These were already reserved.
1945 xfs_dquot_t
**IO_olddq
,
1948 xfs_dquot_t
*prevdq
;
1949 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
1950 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
1953 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1954 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
1959 ASSERT(prevdq
!= newdq
);
1961 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
1962 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
1964 /* the sparkling new dquot */
1965 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
1966 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
1969 * Take an extra reference, because the inode is going to keep
1970 * this dquot pointer even after the trans_commit.
1972 *IO_olddq
= xfs_qm_dqhold(newdq
);
1978 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1981 xfs_qm_vop_chown_reserve(
1982 struct xfs_trans
*tp
,
1983 struct xfs_inode
*ip
,
1984 struct xfs_dquot
*udqp
,
1985 struct xfs_dquot
*gdqp
,
1986 struct xfs_dquot
*pdqp
,
1989 struct xfs_mount
*mp
= ip
->i_mount
;
1990 uint delblks
, blkflags
, prjflags
= 0;
1991 struct xfs_dquot
*udq_unres
= NULL
;
1992 struct xfs_dquot
*gdq_unres
= NULL
;
1993 struct xfs_dquot
*pdq_unres
= NULL
;
1994 struct xfs_dquot
*udq_delblks
= NULL
;
1995 struct xfs_dquot
*gdq_delblks
= NULL
;
1996 struct xfs_dquot
*pdq_delblks
= NULL
;
2000 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2001 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
2003 delblks
= ip
->i_delayed_blks
;
2004 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
2005 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
2007 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
2008 ip
->i_d
.di_uid
!= be32_to_cpu(udqp
->q_core
.d_id
)) {
2011 * If there are delayed allocation blocks, then we have to
2012 * unreserve those from the old dquot, and add them to the
2016 ASSERT(ip
->i_udquot
);
2017 udq_unres
= ip
->i_udquot
;
2020 if (XFS_IS_GQUOTA_ON(ip
->i_mount
) && gdqp
&&
2021 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
)) {
2024 ASSERT(ip
->i_gdquot
);
2025 gdq_unres
= ip
->i_gdquot
;
2029 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) && pdqp
&&
2030 xfs_get_projid(ip
) != be32_to_cpu(pdqp
->q_core
.d_id
)) {
2031 prjflags
= XFS_QMOPT_ENOSPC
;
2034 ASSERT(ip
->i_pdquot
);
2035 pdq_unres
= ip
->i_pdquot
;
2039 error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
2040 udq_delblks
, gdq_delblks
, pdq_delblks
,
2041 ip
->i_d
.di_nblocks
, 1,
2042 flags
| blkflags
| prjflags
);
2047 * Do the delayed blks reservations/unreservations now. Since, these
2048 * are done without the help of a transaction, if a reservation fails
2049 * its previous reservations won't be automatically undone by trans
2050 * code. So, we have to do it manually here.
2054 * Do the reservations first. Unreservation can't fail.
2056 ASSERT(udq_delblks
|| gdq_delblks
|| pdq_delblks
);
2057 ASSERT(udq_unres
|| gdq_unres
|| pdq_unres
);
2058 error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2059 udq_delblks
, gdq_delblks
, pdq_delblks
,
2060 (xfs_qcnt_t
)delblks
, 0,
2061 flags
| blkflags
| prjflags
);
2064 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
2065 udq_unres
, gdq_unres
, pdq_unres
,
2066 -((xfs_qcnt_t
)delblks
), 0, blkflags
);
2073 xfs_qm_vop_rename_dqattach(
2074 struct xfs_inode
**i_tab
)
2076 struct xfs_mount
*mp
= i_tab
[0]->i_mount
;
2079 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
2082 for (i
= 0; (i
< 4 && i_tab
[i
]); i
++) {
2083 struct xfs_inode
*ip
= i_tab
[i
];
2087 * Watch out for duplicate entries in the table.
2089 if (i
== 0 || ip
!= i_tab
[i
-1]) {
2090 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
2091 error
= xfs_qm_dqattach(ip
, 0);
2101 xfs_qm_vop_create_dqattach(
2102 struct xfs_trans
*tp
,
2103 struct xfs_inode
*ip
,
2104 struct xfs_dquot
*udqp
,
2105 struct xfs_dquot
*gdqp
,
2106 struct xfs_dquot
*pdqp
)
2108 struct xfs_mount
*mp
= tp
->t_mountp
;
2110 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
2113 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
2114 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
2116 if (udqp
&& XFS_IS_UQUOTA_ON(mp
)) {
2117 ASSERT(ip
->i_udquot
== NULL
);
2118 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
2120 ip
->i_udquot
= xfs_qm_dqhold(udqp
);
2121 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2123 if (gdqp
&& XFS_IS_GQUOTA_ON(mp
)) {
2124 ASSERT(ip
->i_gdquot
== NULL
);
2125 ASSERT(ip
->i_d
.di_gid
== be32_to_cpu(gdqp
->q_core
.d_id
));
2126 ip
->i_gdquot
= xfs_qm_dqhold(gdqp
);
2127 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
2129 if (pdqp
&& XFS_IS_PQUOTA_ON(mp
)) {
2130 ASSERT(ip
->i_pdquot
== NULL
);
2131 ASSERT(xfs_get_projid(ip
) == be32_to_cpu(pdqp
->q_core
.d_id
));
2133 ip
->i_pdquot
= xfs_qm_dqhold(pdqp
);
2134 xfs_trans_mod_dquot(tp
, pdqp
, XFS_TRANS_DQ_ICOUNT
, 1);