1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_itable.h"
18 #include "xfs_quota.h"
19 #include "xfs_error.h"
21 #include "xfs_bmap_btree.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_trans.h"
24 #include "xfs_trans_space.h"
26 #include "xfs_trace.h"
27 #include "xfs_icache.h"
28 #include "xfs_cksum.h"
31 * The global quota manager. There is only one of these for the entire
32 * system, _not_ one per file system. XQM keeps track of the overall
33 * quota functionality, including maintaining the freelist and hash
36 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
37 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
39 STATIC
void xfs_qm_destroy_quotainos(xfs_quotainfo_t
*qi
);
40 STATIC
void xfs_qm_dqfree_one(struct xfs_dquot
*dqp
);
42 * We use the batch lookup interface to iterate over the dquots as it
43 * currently is the only interface into the radix tree code that allows
44 * fuzzy lookups instead of exact matches. Holding the lock over multiple
45 * operations is fine as all callers are used either during mount/umount
48 #define XFS_DQ_LOOKUP_BATCH 32
54 int (*execute
)(struct xfs_dquot
*dqp
, void *data
),
57 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
58 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
70 struct xfs_dquot
*batch
[XFS_DQ_LOOKUP_BATCH
];
74 mutex_lock(&qi
->qi_tree_lock
);
75 nr_found
= radix_tree_gang_lookup(tree
, (void **)batch
,
76 next_index
, XFS_DQ_LOOKUP_BATCH
);
78 mutex_unlock(&qi
->qi_tree_lock
);
82 for (i
= 0; i
< nr_found
; i
++) {
83 struct xfs_dquot
*dqp
= batch
[i
];
85 next_index
= be32_to_cpu(dqp
->q_core
.d_id
) + 1;
87 error
= execute(batch
[i
], data
);
88 if (error
== -EAGAIN
) {
92 if (error
&& last_error
!= -EFSCORRUPTED
)
96 mutex_unlock(&qi
->qi_tree_lock
);
98 /* bail out if the filesystem is corrupted. */
99 if (last_error
== -EFSCORRUPTED
) {
103 /* we're done if id overflows back to zero */
118 * Purge a dquot from all tracking data structures and free it.
122 struct xfs_dquot
*dqp
,
125 struct xfs_mount
*mp
= dqp
->q_mount
;
126 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
129 if ((dqp
->dq_flags
& XFS_DQ_FREEING
) || dqp
->q_nrefs
!= 0) {
134 dqp
->dq_flags
|= XFS_DQ_FREEING
;
139 * If we are turning this type of quotas off, we don't care
140 * about the dirty metadata sitting in this dquot. OTOH, if
141 * we're unmounting, we do care, so we flush it and wait.
143 if (XFS_DQ_IS_DIRTY(dqp
)) {
144 struct xfs_buf
*bp
= NULL
;
148 * We don't care about getting disk errors here. We need
149 * to purge this dquot anyway, so we go ahead regardless.
151 error
= xfs_qm_dqflush(dqp
, &bp
);
153 error
= xfs_bwrite(bp
);
159 ASSERT(atomic_read(&dqp
->q_pincount
) == 0);
160 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
161 !test_bit(XFS_LI_IN_AIL
, &dqp
->q_logitem
.qli_item
.li_flags
));
166 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
167 be32_to_cpu(dqp
->q_core
.d_id
));
171 * We move dquots to the freelist as soon as their reference count
172 * hits zero, so it really should be on the freelist here.
174 ASSERT(!list_empty(&dqp
->q_lru
));
175 list_lru_del(&qi
->qi_lru
, &dqp
->q_lru
);
176 XFS_STATS_DEC(mp
, xs_qm_dquot_unused
);
178 xfs_qm_dqdestroy(dqp
);
183 * Purge the dquot cache.
187 struct xfs_mount
*mp
,
190 if (flags
& XFS_QMOPT_UQUOTA
)
191 xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_dqpurge
, NULL
);
192 if (flags
& XFS_QMOPT_GQUOTA
)
193 xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_dqpurge
, NULL
);
194 if (flags
& XFS_QMOPT_PQUOTA
)
195 xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_dqpurge
, NULL
);
199 * Just destroy the quotainfo structure.
203 struct xfs_mount
*mp
)
205 if (mp
->m_quotainfo
) {
206 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
207 xfs_qm_destroy_quotainfo(mp
);
212 * Called from the vfsops layer.
215 xfs_qm_unmount_quotas(
219 * Release the dquots that root inode, et al might be holding,
220 * before we flush quotas and blow away the quotainfo structure.
222 ASSERT(mp
->m_rootip
);
223 xfs_qm_dqdetach(mp
->m_rootip
);
225 xfs_qm_dqdetach(mp
->m_rbmip
);
227 xfs_qm_dqdetach(mp
->m_rsumip
);
230 * Release the quota inodes.
232 if (mp
->m_quotainfo
) {
233 if (mp
->m_quotainfo
->qi_uquotaip
) {
234 xfs_irele(mp
->m_quotainfo
->qi_uquotaip
);
235 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
237 if (mp
->m_quotainfo
->qi_gquotaip
) {
238 xfs_irele(mp
->m_quotainfo
->qi_gquotaip
);
239 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
241 if (mp
->m_quotainfo
->qi_pquotaip
) {
242 xfs_irele(mp
->m_quotainfo
->qi_pquotaip
);
243 mp
->m_quotainfo
->qi_pquotaip
= NULL
;
254 xfs_dquot_t
**IO_idqpp
)
259 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
263 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
264 * or &i_gdquot. This made the code look weird, but made the logic a lot
269 trace_xfs_dqattach_found(dqp
);
274 * Find the dquot from somewhere. This bumps the reference count of
275 * dquot and returns it locked. This can return ENOENT if dquot didn't
276 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
277 * turned off suddenly.
279 error
= xfs_qm_dqget_inode(ip
, type
, doalloc
, &dqp
);
283 trace_xfs_dqattach_get(dqp
);
286 * dqget may have dropped and re-acquired the ilock, but it guarantees
287 * that the dquot returned is the one that should go in the inode.
295 xfs_qm_need_dqattach(
296 struct xfs_inode
*ip
)
298 struct xfs_mount
*mp
= ip
->i_mount
;
300 if (!XFS_IS_QUOTA_RUNNING(mp
))
302 if (!XFS_IS_QUOTA_ON(mp
))
304 if (!XFS_NOT_DQATTACHED(mp
, ip
))
306 if (xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
312 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
314 * If @doalloc is true, the dquot(s) will be allocated if needed.
315 * Inode may get unlocked and relocked in here, and the caller must deal with
319 xfs_qm_dqattach_locked(
323 xfs_mount_t
*mp
= ip
->i_mount
;
326 if (!xfs_qm_need_dqattach(ip
))
329 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
331 if (XFS_IS_UQUOTA_ON(mp
) && !ip
->i_udquot
) {
332 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
333 doalloc
, &ip
->i_udquot
);
336 ASSERT(ip
->i_udquot
);
339 if (XFS_IS_GQUOTA_ON(mp
) && !ip
->i_gdquot
) {
340 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
341 doalloc
, &ip
->i_gdquot
);
344 ASSERT(ip
->i_gdquot
);
347 if (XFS_IS_PQUOTA_ON(mp
) && !ip
->i_pdquot
) {
348 error
= xfs_qm_dqattach_one(ip
, xfs_get_projid(ip
), XFS_DQ_PROJ
,
349 doalloc
, &ip
->i_pdquot
);
352 ASSERT(ip
->i_pdquot
);
357 * Don't worry about the dquots that we may have attached before any
358 * error - they'll get detached later if it has not already been done.
360 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
366 struct xfs_inode
*ip
)
370 if (!xfs_qm_need_dqattach(ip
))
373 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
374 error
= xfs_qm_dqattach_locked(ip
, false);
375 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
381 * Release dquots (and their references) if any.
382 * The inode should be locked EXCL except when this's called by
389 if (!(ip
->i_udquot
|| ip
->i_gdquot
|| ip
->i_pdquot
))
392 trace_xfs_dquot_dqdetach(ip
);
394 ASSERT(!xfs_is_quota_inode(&ip
->i_mount
->m_sb
, ip
->i_ino
));
396 xfs_qm_dqrele(ip
->i_udquot
);
400 xfs_qm_dqrele(ip
->i_gdquot
);
404 xfs_qm_dqrele(ip
->i_pdquot
);
409 struct xfs_qm_isolate
{
410 struct list_head buffers
;
411 struct list_head dispose
;
414 static enum lru_status
415 xfs_qm_dquot_isolate(
416 struct list_head
*item
,
417 struct list_lru_one
*lru
,
418 spinlock_t
*lru_lock
,
420 __releases(lru_lock
) __acquires(lru_lock
)
422 struct xfs_dquot
*dqp
= container_of(item
,
423 struct xfs_dquot
, q_lru
);
424 struct xfs_qm_isolate
*isol
= arg
;
426 if (!xfs_dqlock_nowait(dqp
))
430 * This dquot has acquired a reference in the meantime remove it from
431 * the freelist and try again.
435 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqwants
);
437 trace_xfs_dqreclaim_want(dqp
);
438 list_lru_isolate(lru
, &dqp
->q_lru
);
439 XFS_STATS_DEC(dqp
->q_mount
, xs_qm_dquot_unused
);
444 * If the dquot is dirty, flush it. If it's already being flushed, just
445 * skip it so there is time for the IO to complete before we try to
446 * reclaim it again on the next LRU pass.
448 if (!xfs_dqflock_nowait(dqp
)) {
453 if (XFS_DQ_IS_DIRTY(dqp
)) {
454 struct xfs_buf
*bp
= NULL
;
457 trace_xfs_dqreclaim_dirty(dqp
);
459 /* we have to drop the LRU lock to flush the dquot */
460 spin_unlock(lru_lock
);
462 error
= xfs_qm_dqflush(dqp
, &bp
);
464 goto out_unlock_dirty
;
466 xfs_buf_delwri_queue(bp
, &isol
->buffers
);
468 goto out_unlock_dirty
;
473 * Prevent lookups now that we are past the point of no return.
475 dqp
->dq_flags
|= XFS_DQ_FREEING
;
478 ASSERT(dqp
->q_nrefs
== 0);
479 list_lru_isolate_move(lru
, &dqp
->q_lru
, &isol
->dispose
);
480 XFS_STATS_DEC(dqp
->q_mount
, xs_qm_dquot_unused
);
481 trace_xfs_dqreclaim_done(dqp
);
482 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqreclaims
);
486 trace_xfs_dqreclaim_busy(dqp
);
487 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqreclaim_misses
);
491 trace_xfs_dqreclaim_busy(dqp
);
492 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dqreclaim_misses
);
500 struct shrinker
*shrink
,
501 struct shrink_control
*sc
)
503 struct xfs_quotainfo
*qi
= container_of(shrink
,
504 struct xfs_quotainfo
, qi_shrinker
);
505 struct xfs_qm_isolate isol
;
509 if ((sc
->gfp_mask
& (__GFP_FS
|__GFP_DIRECT_RECLAIM
)) != (__GFP_FS
|__GFP_DIRECT_RECLAIM
))
512 INIT_LIST_HEAD(&isol
.buffers
);
513 INIT_LIST_HEAD(&isol
.dispose
);
515 freed
= list_lru_shrink_walk(&qi
->qi_lru
, sc
,
516 xfs_qm_dquot_isolate
, &isol
);
518 error
= xfs_buf_delwri_submit(&isol
.buffers
);
520 xfs_warn(NULL
, "%s: dquot reclaim failed", __func__
);
522 while (!list_empty(&isol
.dispose
)) {
523 struct xfs_dquot
*dqp
;
525 dqp
= list_first_entry(&isol
.dispose
, struct xfs_dquot
, q_lru
);
526 list_del_init(&dqp
->q_lru
);
527 xfs_qm_dqfree_one(dqp
);
535 struct shrinker
*shrink
,
536 struct shrink_control
*sc
)
538 struct xfs_quotainfo
*qi
= container_of(shrink
,
539 struct xfs_quotainfo
, qi_shrinker
);
541 return list_lru_shrink_count(&qi
->qi_lru
, sc
);
548 xfs_quotainfo_t
*qinf
)
551 struct xfs_def_quota
*defq
;
552 struct xfs_disk_dquot
*ddqp
;
555 error
= xfs_qm_dqget_uncached(mp
, 0, type
, &dqp
);
560 defq
= xfs_get_defquota(dqp
, qinf
);
563 * Timers and warnings have been already set, let's just set the
564 * default limits for this quota type
566 defq
->bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
567 defq
->bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
568 defq
->ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
569 defq
->isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
570 defq
->rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
571 defq
->rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
572 xfs_qm_dqdestroy(dqp
);
575 /* Initialize quota time limits from the root dquot. */
577 xfs_qm_init_timelimits(
578 struct xfs_mount
*mp
,
579 struct xfs_quotainfo
*qinf
)
581 struct xfs_disk_dquot
*ddqp
;
582 struct xfs_dquot
*dqp
;
586 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
587 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
588 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
589 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
590 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
591 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
594 * We try to get the limits from the superuser's limits fields.
595 * This is quite hacky, but it is standard quota practice.
597 * Since we may not have done a quotacheck by this point, just read
598 * the dquot without attaching it to any hashtables or lists.
600 * Timers and warnings are globally set by the first timer found in
601 * user/group/proj quota types, otherwise a default value is used.
602 * This should be split into different fields per quota type.
604 if (XFS_IS_UQUOTA_RUNNING(mp
))
606 else if (XFS_IS_GQUOTA_RUNNING(mp
))
610 error
= xfs_qm_dqget_uncached(mp
, 0, type
, &dqp
);
616 * The warnings and timers set the grace period given to
617 * a user or group before he or she can not perform any
618 * more writing. If it is zero, a default is used.
621 qinf
->qi_btimelimit
= be32_to_cpu(ddqp
->d_btimer
);
623 qinf
->qi_itimelimit
= be32_to_cpu(ddqp
->d_itimer
);
624 if (ddqp
->d_rtbtimer
)
625 qinf
->qi_rtbtimelimit
= be32_to_cpu(ddqp
->d_rtbtimer
);
627 qinf
->qi_bwarnlimit
= be16_to_cpu(ddqp
->d_bwarns
);
629 qinf
->qi_iwarnlimit
= be16_to_cpu(ddqp
->d_iwarns
);
630 if (ddqp
->d_rtbwarns
)
631 qinf
->qi_rtbwarnlimit
= be16_to_cpu(ddqp
->d_rtbwarns
);
633 xfs_qm_dqdestroy(dqp
);
637 * This initializes all the quota information that's kept in the
641 xfs_qm_init_quotainfo(
642 struct xfs_mount
*mp
)
644 struct xfs_quotainfo
*qinf
;
647 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
649 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
651 error
= list_lru_init(&qinf
->qi_lru
);
656 * See if quotainodes are setup, and if not, allocate them,
657 * and change the superblock accordingly.
659 error
= xfs_qm_init_quotainos(mp
);
663 INIT_RADIX_TREE(&qinf
->qi_uquota_tree
, GFP_NOFS
);
664 INIT_RADIX_TREE(&qinf
->qi_gquota_tree
, GFP_NOFS
);
665 INIT_RADIX_TREE(&qinf
->qi_pquota_tree
, GFP_NOFS
);
666 mutex_init(&qinf
->qi_tree_lock
);
668 /* mutex used to serialize quotaoffs */
669 mutex_init(&qinf
->qi_quotaofflock
);
671 /* Precalc some constants */
672 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
673 qinf
->qi_dqperchunk
= xfs_calc_dquots_per_chunk(qinf
->qi_dqchunklen
);
675 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
677 xfs_qm_init_timelimits(mp
, qinf
);
679 if (XFS_IS_UQUOTA_RUNNING(mp
))
680 xfs_qm_set_defquota(mp
, XFS_DQ_USER
, qinf
);
681 if (XFS_IS_GQUOTA_RUNNING(mp
))
682 xfs_qm_set_defquota(mp
, XFS_DQ_GROUP
, qinf
);
683 if (XFS_IS_PQUOTA_RUNNING(mp
))
684 xfs_qm_set_defquota(mp
, XFS_DQ_PROJ
, qinf
);
686 qinf
->qi_shrinker
.count_objects
= xfs_qm_shrink_count
;
687 qinf
->qi_shrinker
.scan_objects
= xfs_qm_shrink_scan
;
688 qinf
->qi_shrinker
.seeks
= DEFAULT_SEEKS
;
689 qinf
->qi_shrinker
.flags
= SHRINKER_NUMA_AWARE
;
691 error
= register_shrinker(&qinf
->qi_shrinker
);
698 mutex_destroy(&qinf
->qi_quotaofflock
);
699 mutex_destroy(&qinf
->qi_tree_lock
);
700 xfs_qm_destroy_quotainos(qinf
);
702 list_lru_destroy(&qinf
->qi_lru
);
705 mp
->m_quotainfo
= NULL
;
710 * Gets called when unmounting a filesystem or when all quotas get
712 * This purges the quota inodes, destroys locks and frees itself.
715 xfs_qm_destroy_quotainfo(
720 qi
= mp
->m_quotainfo
;
723 unregister_shrinker(&qi
->qi_shrinker
);
724 list_lru_destroy(&qi
->qi_lru
);
725 xfs_qm_destroy_quotainos(qi
);
726 mutex_destroy(&qi
->qi_tree_lock
);
727 mutex_destroy(&qi
->qi_quotaofflock
);
729 mp
->m_quotainfo
= NULL
;
733 * Create an inode and return with a reference already taken, but unlocked
734 * This is how we create quota inodes
744 bool need_alloc
= true;
748 * With superblock that doesn't have separate pquotino, we
749 * share an inode between gquota and pquota. If the on-disk
750 * superblock has GQUOTA and the filesystem is now mounted
751 * with PQUOTA, just use sb_gquotino for sb_pquotino and
754 if (!xfs_sb_version_has_pquotino(&mp
->m_sb
) &&
755 (flags
& (XFS_QMOPT_PQUOTA
|XFS_QMOPT_GQUOTA
))) {
756 xfs_ino_t ino
= NULLFSINO
;
758 if ((flags
& XFS_QMOPT_PQUOTA
) &&
759 (mp
->m_sb
.sb_gquotino
!= NULLFSINO
)) {
760 ino
= mp
->m_sb
.sb_gquotino
;
761 ASSERT(mp
->m_sb
.sb_pquotino
== NULLFSINO
);
762 } else if ((flags
& XFS_QMOPT_GQUOTA
) &&
763 (mp
->m_sb
.sb_pquotino
!= NULLFSINO
)) {
764 ino
= mp
->m_sb
.sb_pquotino
;
765 ASSERT(mp
->m_sb
.sb_gquotino
== NULLFSINO
);
767 if (ino
!= NULLFSINO
) {
768 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, ip
);
771 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
772 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
777 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_create
,
778 XFS_QM_QINOCREATE_SPACE_RES(mp
), 0, 0, &tp
);
783 error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0, 0, ip
);
785 xfs_trans_cancel(tp
);
791 * Make the changes in the superblock, and log those too.
792 * sbfields arg may contain fields other than *QUOTINO;
793 * VERSIONNUM for example.
795 spin_lock(&mp
->m_sb_lock
);
796 if (flags
& XFS_QMOPT_SBVERSION
) {
797 ASSERT(!xfs_sb_version_hasquota(&mp
->m_sb
));
799 xfs_sb_version_addquota(&mp
->m_sb
);
800 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
801 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
802 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
804 /* qflags will get updated fully _after_ quotacheck */
805 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
;
807 if (flags
& XFS_QMOPT_UQUOTA
)
808 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
809 else if (flags
& XFS_QMOPT_GQUOTA
)
810 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
812 mp
->m_sb
.sb_pquotino
= (*ip
)->i_ino
;
813 spin_unlock(&mp
->m_sb_lock
);
816 error
= xfs_trans_commit(tp
);
818 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
819 xfs_alert(mp
, "%s failed (error %d)!", __func__
, error
);
822 xfs_finish_inode_setup(*ip
);
828 xfs_qm_reset_dqcounts(
834 struct xfs_dqblk
*dqb
;
838 trace_xfs_reset_dqcounts(bp
, _RET_IP_
);
841 * Reset all counters and timers. They'll be
842 * started afresh by xfs_qm_quotacheck.
845 j
= (int)XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
) /
847 ASSERT(mp
->m_quotainfo
->qi_dqperchunk
== j
);
850 for (j
= 0; j
< mp
->m_quotainfo
->qi_dqperchunk
; j
++) {
851 struct xfs_disk_dquot
*ddq
;
853 ddq
= (struct xfs_disk_dquot
*)&dqb
[j
];
856 * Do a sanity check, and if needed, repair the dqblk. Don't
857 * output any warnings because it's perfectly possible to
858 * find uninitialised dquot blks. See comment in
861 fa
= xfs_dqblk_verify(mp
, &dqb
[j
], id
+ j
, type
);
863 xfs_dqblk_repair(mp
, &dqb
[j
], id
+ j
, type
);
866 * Reset type in case we are reusing group quota file for
867 * project quotas or vice versa
880 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
881 xfs_update_cksum((char *)&dqb
[j
],
882 sizeof(struct xfs_dqblk
),
889 xfs_qm_reset_dqcounts_all(
890 struct xfs_mount
*mp
,
893 xfs_filblks_t blkcnt
,
895 struct list_head
*buffer_list
)
902 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
903 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
907 * Blkcnt arg can be a very big number, and might even be
908 * larger than the log itself. So, we have to break it up into
909 * manageable-sized transactions.
910 * Note that we don't start a permanent transaction here; we might
911 * not be able to get a log reservation for the whole thing up front,
912 * and we don't really care to either, because we just discard
913 * everything if we were to crash in the middle of this loop.
916 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
917 XFS_FSB_TO_DADDR(mp
, bno
),
918 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
922 * CRC and validation errors will return a EFSCORRUPTED here. If
923 * this occurs, re-read without CRC validation so that we can
924 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 * will leave a trace in the log indicating corruption has
928 if (error
== -EFSCORRUPTED
) {
929 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
930 XFS_FSB_TO_DADDR(mp
, bno
),
931 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
939 * A corrupt buffer might not have a verifier attached, so
940 * make sure we have the correct one attached before writeback
943 bp
->b_ops
= &xfs_dquot_buf_ops
;
944 xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
945 xfs_buf_delwri_queue(bp
, buffer_list
);
948 /* goto the next block. */
950 firstid
+= mp
->m_quotainfo
->qi_dqperchunk
;
957 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
958 * counters for every chunk of dquots that we find.
961 xfs_qm_reset_dqcounts_buf(
962 struct xfs_mount
*mp
,
963 struct xfs_inode
*qip
,
965 struct list_head
*buffer_list
)
967 struct xfs_bmbt_irec
*map
;
968 int i
, nmaps
; /* number of map entries */
969 int error
; /* return value */
970 xfs_fileoff_t lblkno
;
971 xfs_filblks_t maxlblkcnt
;
973 xfs_fsblock_t rablkno
;
974 xfs_filblks_t rablkcnt
;
978 * This looks racy, but we can't keep an inode lock across a
979 * trans_reserve. But, this gets called during quotacheck, and that
980 * happens only at mount time which is single threaded.
982 if (qip
->i_d
.di_nblocks
== 0)
985 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
988 maxlblkcnt
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
992 nmaps
= XFS_DQITER_MAP_SIZE
;
994 * We aren't changing the inode itself. Just changing
995 * some of its data. No new blocks are added here, and
996 * the inode is never added to the transaction.
998 lock_mode
= xfs_ilock_data_map_shared(qip
);
999 error
= xfs_bmapi_read(qip
, lblkno
, maxlblkcnt
- lblkno
,
1001 xfs_iunlock(qip
, lock_mode
);
1005 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
1006 for (i
= 0; i
< nmaps
; i
++) {
1007 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
1008 ASSERT(map
[i
].br_blockcount
);
1011 lblkno
+= map
[i
].br_blockcount
;
1013 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
1016 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
1017 mp
->m_quotainfo
->qi_dqperchunk
;
1019 * Do a read-ahead on the next extent.
1021 if ((i
+1 < nmaps
) &&
1022 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
1023 rablkcnt
= map
[i
+1].br_blockcount
;
1024 rablkno
= map
[i
+1].br_startblock
;
1025 while (rablkcnt
--) {
1026 xfs_buf_readahead(mp
->m_ddev_targp
,
1027 XFS_FSB_TO_DADDR(mp
, rablkno
),
1028 mp
->m_quotainfo
->qi_dqchunklen
,
1029 &xfs_dquot_buf_ops
);
1034 * Iterate thru all the blks in the extent and
1035 * reset the counters of all the dquots inside them.
1037 error
= xfs_qm_reset_dqcounts_all(mp
, firstid
,
1038 map
[i
].br_startblock
,
1039 map
[i
].br_blockcount
,
1040 flags
, buffer_list
);
1044 } while (nmaps
> 0);
1052 * Called by dqusage_adjust in doing a quotacheck.
1054 * Given the inode, and a dquot id this updates both the incore dqout as well
1055 * as the buffer copy. This is so that once the quotacheck is done, we can
1056 * just log all the buffers, as opposed to logging numerous updates to
1057 * individual dquots.
1060 xfs_qm_quotacheck_dqadjust(
1061 struct xfs_inode
*ip
,
1066 struct xfs_mount
*mp
= ip
->i_mount
;
1067 struct xfs_dquot
*dqp
;
1071 id
= xfs_qm_id_for_quotatype(ip
, type
);
1072 error
= xfs_qm_dqget(mp
, id
, type
, true, &dqp
);
1075 * Shouldn't be able to turn off quotas here.
1077 ASSERT(error
!= -ESRCH
);
1078 ASSERT(error
!= -ENOENT
);
1082 trace_xfs_dqadjust(dqp
);
1085 * Adjust the inode count and the block count to reflect this inode's
1088 be64_add_cpu(&dqp
->q_core
.d_icount
, 1);
1089 dqp
->q_res_icount
++;
1091 be64_add_cpu(&dqp
->q_core
.d_bcount
, nblks
);
1092 dqp
->q_res_bcount
+= nblks
;
1095 be64_add_cpu(&dqp
->q_core
.d_rtbcount
, rtblks
);
1096 dqp
->q_res_rtbcount
+= rtblks
;
1100 * Set default limits, adjust timers (since we changed usages)
1102 * There are no timers for the default values set in the root dquot.
1104 if (dqp
->q_core
.d_id
) {
1105 xfs_qm_adjust_dqlimits(mp
, dqp
);
1106 xfs_qm_adjust_dqtimers(mp
, &dqp
->q_core
);
1109 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1115 * callback routine supplied to bulkstat(). Given an inumber, find its
1116 * dquots and update them to account for resources taken by that inode.
1120 xfs_qm_dqusage_adjust(
1121 xfs_mount_t
*mp
, /* mount point for filesystem */
1122 xfs_ino_t ino
, /* inode number to get data for */
1123 void __user
*buffer
, /* not used */
1124 int ubsize
, /* not used */
1125 int *ubused
, /* not used */
1126 int *res
) /* result code value */
1130 xfs_filblks_t rtblks
= 0; /* total rt blks */
1133 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1136 * rootino must have its resources accounted for, not so with the quota
1139 if (xfs_is_quota_inode(&mp
->m_sb
, ino
)) {
1140 *res
= BULKSTAT_RV_NOTHING
;
1145 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1146 * at mount time and therefore nobody will be racing chown/chproj.
1148 error
= xfs_iget(mp
, NULL
, ino
, XFS_IGET_DONTCACHE
, 0, &ip
);
1150 *res
= BULKSTAT_RV_NOTHING
;
1154 ASSERT(ip
->i_delayed_blks
== 0);
1156 if (XFS_IS_REALTIME_INODE(ip
)) {
1157 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1159 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1160 error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
);
1165 xfs_bmap_count_leaves(ifp
, &rtblks
);
1168 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1171 * Add the (disk blocks and inode) resources occupied by this
1172 * inode to its dquots. We do this adjustment in the incore dquot,
1173 * and also copy the changes to its buffer.
1174 * We don't care about putting these changes in a transaction
1175 * envelope because if we crash in the middle of a 'quotacheck'
1176 * we have to start from the beginning anyway.
1177 * Once we're done, we'll log all the dquot bufs.
1179 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1180 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1182 if (XFS_IS_UQUOTA_ON(mp
)) {
1183 error
= xfs_qm_quotacheck_dqadjust(ip
, XFS_DQ_USER
, nblks
,
1189 if (XFS_IS_GQUOTA_ON(mp
)) {
1190 error
= xfs_qm_quotacheck_dqadjust(ip
, XFS_DQ_GROUP
, nblks
,
1196 if (XFS_IS_PQUOTA_ON(mp
)) {
1197 error
= xfs_qm_quotacheck_dqadjust(ip
, XFS_DQ_PROJ
, nblks
,
1204 *res
= BULKSTAT_RV_DIDONE
;
1209 *res
= BULKSTAT_RV_GIVEUP
;
1215 struct xfs_dquot
*dqp
,
1218 struct xfs_mount
*mp
= dqp
->q_mount
;
1219 struct list_head
*buffer_list
= data
;
1220 struct xfs_buf
*bp
= NULL
;
1224 if (dqp
->dq_flags
& XFS_DQ_FREEING
)
1226 if (!XFS_DQ_IS_DIRTY(dqp
))
1230 * The only way the dquot is already flush locked by the time quotacheck
1231 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1232 * it for the final time. Quotacheck collects all dquot bufs in the
1233 * local delwri queue before dquots are dirtied, so reclaim can't have
1234 * possibly queued it for I/O. The only way out is to push the buffer to
1235 * cycle the flush lock.
1237 if (!xfs_dqflock_nowait(dqp
)) {
1238 /* buf is pinned in-core by delwri list */
1239 bp
= xfs_buf_incore(mp
->m_ddev_targp
, dqp
->q_blkno
,
1240 mp
->m_quotainfo
->qi_dqchunklen
, 0);
1247 xfs_buf_delwri_pushbuf(bp
, buffer_list
);
1254 error
= xfs_qm_dqflush(dqp
, &bp
);
1258 xfs_buf_delwri_queue(bp
, buffer_list
);
1266 * Walk thru all the filesystem inodes and construct a consistent view
1267 * of the disk quota world. If the quotacheck fails, disable quotas.
1273 int done
, count
, error
, error2
;
1277 LIST_HEAD (buffer_list
);
1278 struct xfs_inode
*uip
= mp
->m_quotainfo
->qi_uquotaip
;
1279 struct xfs_inode
*gip
= mp
->m_quotainfo
->qi_gquotaip
;
1280 struct xfs_inode
*pip
= mp
->m_quotainfo
->qi_pquotaip
;
1287 ASSERT(uip
|| gip
|| pip
);
1288 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1290 xfs_notice(mp
, "Quotacheck needed: Please wait.");
1293 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1294 * their counters to zero. We need a clean slate.
1295 * We don't log our changes till later.
1298 error
= xfs_qm_reset_dqcounts_buf(mp
, uip
, XFS_QMOPT_UQUOTA
,
1302 flags
|= XFS_UQUOTA_CHKD
;
1306 error
= xfs_qm_reset_dqcounts_buf(mp
, gip
, XFS_QMOPT_GQUOTA
,
1310 flags
|= XFS_GQUOTA_CHKD
;
1314 error
= xfs_qm_reset_dqcounts_buf(mp
, pip
, XFS_QMOPT_PQUOTA
,
1318 flags
|= XFS_PQUOTA_CHKD
;
1323 * Iterate thru all the inodes in the file system,
1324 * adjusting the corresponding dquot counters in core.
1326 error
= xfs_bulkstat(mp
, &lastino
, &count
,
1327 xfs_qm_dqusage_adjust
,
1328 structsz
, NULL
, &done
);
1335 * We've made all the changes that we need to make incore. Flush them
1336 * down to disk buffers if everything was updated successfully.
1338 if (XFS_IS_UQUOTA_ON(mp
)) {
1339 error
= xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_flush_one
,
1342 if (XFS_IS_GQUOTA_ON(mp
)) {
1343 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_flush_one
,
1348 if (XFS_IS_PQUOTA_ON(mp
)) {
1349 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_flush_one
,
1355 error2
= xfs_buf_delwri_submit(&buffer_list
);
1360 * We can get this error if we couldn't do a dquot allocation inside
1361 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1362 * dirty dquots that might be cached, we just want to get rid of them
1363 * and turn quotaoff. The dquots won't be attached to any of the inodes
1364 * at this point (because we intentionally didn't in dqget_noattach).
1367 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
1372 * If one type of quotas is off, then it will lose its
1373 * quotachecked status, since we won't be doing accounting for
1374 * that type anymore.
1376 mp
->m_qflags
&= ~XFS_ALL_QUOTA_CHKD
;
1377 mp
->m_qflags
|= flags
;
1380 xfs_buf_delwri_cancel(&buffer_list
);
1384 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1387 * We must turn off quotas.
1389 ASSERT(mp
->m_quotainfo
!= NULL
);
1390 xfs_qm_destroy_quotainfo(mp
);
1391 if (xfs_mount_reset_sbqflags(mp
)) {
1393 "Quotacheck: Failed to reset quota flags.");
1396 xfs_notice(mp
, "Quotacheck: Done.");
1401 * This is called from xfs_mountfs to start quotas and initialize all
1402 * necessary data structures like quotainfo. This is also responsible for
1403 * running a quotacheck as necessary. We are guaranteed that the superblock
1404 * is consistently read in at this point.
1406 * If we fail here, the mount will continue with quota turned off. We don't
1407 * need to inidicate success or failure at all.
1410 xfs_qm_mount_quotas(
1411 struct xfs_mount
*mp
)
1417 * If quotas on realtime volumes is not supported, we disable
1418 * quotas immediately.
1420 if (mp
->m_sb
.sb_rextents
) {
1421 xfs_notice(mp
, "Cannot turn on quotas for realtime filesystem");
1426 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1429 * Allocate the quotainfo structure inside the mount struct, and
1430 * create quotainode(s), and change/rev superblock if necessary.
1432 error
= xfs_qm_init_quotainfo(mp
);
1435 * We must turn off quotas.
1437 ASSERT(mp
->m_quotainfo
== NULL
);
1442 * If any of the quotas are not consistent, do a quotacheck.
1444 if (XFS_QM_NEED_QUOTACHECK(mp
)) {
1445 error
= xfs_qm_quotacheck(mp
);
1447 /* Quotacheck failed and disabled quotas. */
1452 * If one type of quotas is off, then it will lose its
1453 * quotachecked status, since we won't be doing accounting for
1454 * that type anymore.
1456 if (!XFS_IS_UQUOTA_ON(mp
))
1457 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
1458 if (!XFS_IS_GQUOTA_ON(mp
))
1459 mp
->m_qflags
&= ~XFS_GQUOTA_CHKD
;
1460 if (!XFS_IS_PQUOTA_ON(mp
))
1461 mp
->m_qflags
&= ~XFS_PQUOTA_CHKD
;
1465 * We actually don't have to acquire the m_sb_lock at all.
1466 * This can only be called from mount, and that's single threaded. XXX
1468 spin_lock(&mp
->m_sb_lock
);
1469 sbf
= mp
->m_sb
.sb_qflags
;
1470 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
1471 spin_unlock(&mp
->m_sb_lock
);
1473 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
1474 if (xfs_sync_sb(mp
, false)) {
1476 * We could only have been turning quotas off.
1477 * We aren't in very good shape actually because
1478 * the incore structures are convinced that quotas are
1479 * off, but the on disk superblock doesn't know that !
1481 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
1482 xfs_alert(mp
, "%s: Superblock update failed!",
1488 xfs_warn(mp
, "Failed to initialize disk quotas.");
1494 * This is called after the superblock has been read in and we're ready to
1495 * iget the quota inodes.
1498 xfs_qm_init_quotainos(
1501 struct xfs_inode
*uip
= NULL
;
1502 struct xfs_inode
*gip
= NULL
;
1503 struct xfs_inode
*pip
= NULL
;
1507 ASSERT(mp
->m_quotainfo
);
1510 * Get the uquota and gquota inodes
1512 if (xfs_sb_version_hasquota(&mp
->m_sb
)) {
1513 if (XFS_IS_UQUOTA_ON(mp
) &&
1514 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1515 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1516 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1521 if (XFS_IS_GQUOTA_ON(mp
) &&
1522 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1523 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1524 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1529 if (XFS_IS_PQUOTA_ON(mp
) &&
1530 mp
->m_sb
.sb_pquotino
!= NULLFSINO
) {
1531 ASSERT(mp
->m_sb
.sb_pquotino
> 0);
1532 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_pquotino
,
1538 flags
|= XFS_QMOPT_SBVERSION
;
1542 * Create the three inodes, if they don't exist already. The changes
1543 * made above will get added to a transaction and logged in one of
1544 * the qino_alloc calls below. If the device is readonly,
1545 * temporarily switch to read-write to do this.
1547 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1548 error
= xfs_qm_qino_alloc(mp
, &uip
,
1549 flags
| XFS_QMOPT_UQUOTA
);
1553 flags
&= ~XFS_QMOPT_SBVERSION
;
1555 if (XFS_IS_GQUOTA_ON(mp
) && gip
== NULL
) {
1556 error
= xfs_qm_qino_alloc(mp
, &gip
,
1557 flags
| XFS_QMOPT_GQUOTA
);
1561 flags
&= ~XFS_QMOPT_SBVERSION
;
1563 if (XFS_IS_PQUOTA_ON(mp
) && pip
== NULL
) {
1564 error
= xfs_qm_qino_alloc(mp
, &pip
,
1565 flags
| XFS_QMOPT_PQUOTA
);
1570 mp
->m_quotainfo
->qi_uquotaip
= uip
;
1571 mp
->m_quotainfo
->qi_gquotaip
= gip
;
1572 mp
->m_quotainfo
->qi_pquotaip
= pip
;
1587 xfs_qm_destroy_quotainos(
1588 xfs_quotainfo_t
*qi
)
1590 if (qi
->qi_uquotaip
) {
1591 xfs_irele(qi
->qi_uquotaip
);
1592 qi
->qi_uquotaip
= NULL
; /* paranoia */
1594 if (qi
->qi_gquotaip
) {
1595 xfs_irele(qi
->qi_gquotaip
);
1596 qi
->qi_gquotaip
= NULL
;
1598 if (qi
->qi_pquotaip
) {
1599 xfs_irele(qi
->qi_pquotaip
);
1600 qi
->qi_pquotaip
= NULL
;
1606 struct xfs_dquot
*dqp
)
1608 struct xfs_mount
*mp
= dqp
->q_mount
;
1609 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1611 mutex_lock(&qi
->qi_tree_lock
);
1612 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
1613 be32_to_cpu(dqp
->q_core
.d_id
));
1616 mutex_unlock(&qi
->qi_tree_lock
);
1618 xfs_qm_dqdestroy(dqp
);
1621 /* --------------- utility functions for vnodeops ---------------- */
1625 * Given an inode, a uid, gid and prid make sure that we have
1626 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1627 * quotas by creating this file.
1628 * This also attaches dquot(s) to the given inode after locking it,
1629 * and returns the dquots corresponding to the uid and/or gid.
1631 * in : inode (unlocked)
1632 * out : udquot, gdquot with references taken and unlocked
1636 struct xfs_inode
*ip
,
1641 struct xfs_dquot
**O_udqpp
,
1642 struct xfs_dquot
**O_gdqpp
,
1643 struct xfs_dquot
**O_pdqpp
)
1645 struct xfs_mount
*mp
= ip
->i_mount
;
1646 struct xfs_dquot
*uq
= NULL
;
1647 struct xfs_dquot
*gq
= NULL
;
1648 struct xfs_dquot
*pq
= NULL
;
1652 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1655 lockflags
= XFS_ILOCK_EXCL
;
1656 xfs_ilock(ip
, lockflags
);
1658 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
1659 gid
= ip
->i_d
.di_gid
;
1662 * Attach the dquot(s) to this inode, doing a dquot allocation
1663 * if necessary. The dquot(s) will not be locked.
1665 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1666 error
= xfs_qm_dqattach_locked(ip
, true);
1668 xfs_iunlock(ip
, lockflags
);
1673 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
1674 if (ip
->i_d
.di_uid
!= uid
) {
1676 * What we need is the dquot that has this uid, and
1677 * if we send the inode to dqget, the uid of the inode
1678 * takes priority over what's sent in the uid argument.
1679 * We must unlock inode here before calling dqget if
1680 * we're not sending the inode, because otherwise
1681 * we'll deadlock by doing trans_reserve while
1684 xfs_iunlock(ip
, lockflags
);
1685 error
= xfs_qm_dqget(mp
, uid
, XFS_DQ_USER
, true, &uq
);
1687 ASSERT(error
!= -ENOENT
);
1691 * Get the ilock in the right order.
1694 lockflags
= XFS_ILOCK_SHARED
;
1695 xfs_ilock(ip
, lockflags
);
1698 * Take an extra reference, because we'll return
1701 ASSERT(ip
->i_udquot
);
1702 uq
= xfs_qm_dqhold(ip
->i_udquot
);
1705 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
1706 if (ip
->i_d
.di_gid
!= gid
) {
1707 xfs_iunlock(ip
, lockflags
);
1708 error
= xfs_qm_dqget(mp
, gid
, XFS_DQ_GROUP
, true, &gq
);
1710 ASSERT(error
!= -ENOENT
);
1714 lockflags
= XFS_ILOCK_SHARED
;
1715 xfs_ilock(ip
, lockflags
);
1717 ASSERT(ip
->i_gdquot
);
1718 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1721 if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
1722 if (xfs_get_projid(ip
) != prid
) {
1723 xfs_iunlock(ip
, lockflags
);
1724 error
= xfs_qm_dqget(mp
, (xfs_dqid_t
)prid
, XFS_DQ_PROJ
,
1727 ASSERT(error
!= -ENOENT
);
1731 lockflags
= XFS_ILOCK_SHARED
;
1732 xfs_ilock(ip
, lockflags
);
1734 ASSERT(ip
->i_pdquot
);
1735 pq
= xfs_qm_dqhold(ip
->i_pdquot
);
1739 trace_xfs_dquot_dqalloc(ip
);
1741 xfs_iunlock(ip
, lockflags
);
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1770 xfs_dquot_t
**IO_olddq
,
1773 xfs_dquot_t
*prevdq
;
1774 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
1775 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
1778 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1779 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
1784 ASSERT(prevdq
!= newdq
);
1786 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
1787 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
1789 /* the sparkling new dquot */
1790 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
1791 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
1794 * Take an extra reference, because the inode is going to keep
1795 * this dquot pointer even after the trans_commit.
1797 *IO_olddq
= xfs_qm_dqhold(newdq
);
1803 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1806 xfs_qm_vop_chown_reserve(
1807 struct xfs_trans
*tp
,
1808 struct xfs_inode
*ip
,
1809 struct xfs_dquot
*udqp
,
1810 struct xfs_dquot
*gdqp
,
1811 struct xfs_dquot
*pdqp
,
1814 struct xfs_mount
*mp
= ip
->i_mount
;
1815 uint delblks
, blkflags
, prjflags
= 0;
1816 struct xfs_dquot
*udq_unres
= NULL
;
1817 struct xfs_dquot
*gdq_unres
= NULL
;
1818 struct xfs_dquot
*pdq_unres
= NULL
;
1819 struct xfs_dquot
*udq_delblks
= NULL
;
1820 struct xfs_dquot
*gdq_delblks
= NULL
;
1821 struct xfs_dquot
*pdq_delblks
= NULL
;
1825 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
1826 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1828 delblks
= ip
->i_delayed_blks
;
1829 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
1830 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
1832 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
1833 ip
->i_d
.di_uid
!= be32_to_cpu(udqp
->q_core
.d_id
)) {
1836 * If there are delayed allocation blocks, then we have to
1837 * unreserve those from the old dquot, and add them to the
1841 ASSERT(ip
->i_udquot
);
1842 udq_unres
= ip
->i_udquot
;
1845 if (XFS_IS_GQUOTA_ON(ip
->i_mount
) && gdqp
&&
1846 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
)) {
1849 ASSERT(ip
->i_gdquot
);
1850 gdq_unres
= ip
->i_gdquot
;
1854 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) && pdqp
&&
1855 xfs_get_projid(ip
) != be32_to_cpu(pdqp
->q_core
.d_id
)) {
1856 prjflags
= XFS_QMOPT_ENOSPC
;
1859 ASSERT(ip
->i_pdquot
);
1860 pdq_unres
= ip
->i_pdquot
;
1864 error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
1865 udq_delblks
, gdq_delblks
, pdq_delblks
,
1866 ip
->i_d
.di_nblocks
, 1,
1867 flags
| blkflags
| prjflags
);
1872 * Do the delayed blks reservations/unreservations now. Since, these
1873 * are done without the help of a transaction, if a reservation fails
1874 * its previous reservations won't be automatically undone by trans
1875 * code. So, we have to do it manually here.
1879 * Do the reservations first. Unreservation can't fail.
1881 ASSERT(udq_delblks
|| gdq_delblks
|| pdq_delblks
);
1882 ASSERT(udq_unres
|| gdq_unres
|| pdq_unres
);
1883 error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
1884 udq_delblks
, gdq_delblks
, pdq_delblks
,
1885 (xfs_qcnt_t
)delblks
, 0,
1886 flags
| blkflags
| prjflags
);
1889 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
1890 udq_unres
, gdq_unres
, pdq_unres
,
1891 -((xfs_qcnt_t
)delblks
), 0, blkflags
);
1898 xfs_qm_vop_rename_dqattach(
1899 struct xfs_inode
**i_tab
)
1901 struct xfs_mount
*mp
= i_tab
[0]->i_mount
;
1904 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1907 for (i
= 0; (i
< 4 && i_tab
[i
]); i
++) {
1908 struct xfs_inode
*ip
= i_tab
[i
];
1912 * Watch out for duplicate entries in the table.
1914 if (i
== 0 || ip
!= i_tab
[i
-1]) {
1915 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1916 error
= xfs_qm_dqattach(ip
);
1926 xfs_qm_vop_create_dqattach(
1927 struct xfs_trans
*tp
,
1928 struct xfs_inode
*ip
,
1929 struct xfs_dquot
*udqp
,
1930 struct xfs_dquot
*gdqp
,
1931 struct xfs_dquot
*pdqp
)
1933 struct xfs_mount
*mp
= tp
->t_mountp
;
1935 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1938 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1939 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1941 if (udqp
&& XFS_IS_UQUOTA_ON(mp
)) {
1942 ASSERT(ip
->i_udquot
== NULL
);
1943 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
1945 ip
->i_udquot
= xfs_qm_dqhold(udqp
);
1946 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1948 if (gdqp
&& XFS_IS_GQUOTA_ON(mp
)) {
1949 ASSERT(ip
->i_gdquot
== NULL
);
1950 ASSERT(ip
->i_d
.di_gid
== be32_to_cpu(gdqp
->q_core
.d_id
));
1951 ip
->i_gdquot
= xfs_qm_dqhold(gdqp
);
1952 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1954 if (pdqp
&& XFS_IS_PQUOTA_ON(mp
)) {
1955 ASSERT(ip
->i_pdquot
== NULL
);
1956 ASSERT(xfs_get_projid(ip
) == be32_to_cpu(pdqp
->q_core
.d_id
));
1958 ip
->i_pdquot
= xfs_qm_dqhold(pdqp
);
1959 xfs_trans_mod_dquot(tp
, pdqp
, XFS_TRANS_DQ_ICOUNT
, 1);