2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_inode.h"
29 #include "xfs_bmap_util.h"
30 #include "xfs_alloc.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_trans_priv.h"
38 #include "xfs_cksum.h"
39 #include "xfs_trace.h"
41 #include "xfs_bmap_btree.h"
48 * dquot->q_qlock (xfs_dqlock() and friends)
49 * dquot->q_flush (xfs_dqflock() and friends)
52 * If two dquots need to be locked the order is user before group/project,
53 * otherwise by the lowest id first, see xfs_dqlock2.
57 xfs_buftarg_t
*xfs_dqerror_target
;
60 int xfs_dqerror_mod
= 33;
63 struct kmem_zone
*xfs_qm_dqtrxzone
;
64 static struct kmem_zone
*xfs_qm_dqzone
;
66 static struct lock_class_key xfs_dquot_group_class
;
67 static struct lock_class_key xfs_dquot_project_class
;
70 * This is called to free all the memory associated with a dquot
76 ASSERT(list_empty(&dqp
->q_lru
));
78 kmem_free(dqp
->q_logitem
.qli_item
.li_lv_shadow
);
79 mutex_destroy(&dqp
->q_qlock
);
81 XFS_STATS_DEC(dqp
->q_mount
, xs_qm_dquot
);
82 kmem_zone_free(xfs_qm_dqzone
, dqp
);
86 * If default limits are in force, push them into the dquot now.
87 * We overwrite the dquot limits only if they are zero and this
88 * is not the root dquot.
91 xfs_qm_adjust_dqlimits(
95 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
96 struct xfs_disk_dquot
*d
= &dq
->q_core
;
97 struct xfs_def_quota
*defq
;
101 defq
= xfs_get_defquota(dq
, q
);
103 if (defq
->bsoftlimit
&& !d
->d_blk_softlimit
) {
104 d
->d_blk_softlimit
= cpu_to_be64(defq
->bsoftlimit
);
107 if (defq
->bhardlimit
&& !d
->d_blk_hardlimit
) {
108 d
->d_blk_hardlimit
= cpu_to_be64(defq
->bhardlimit
);
111 if (defq
->isoftlimit
&& !d
->d_ino_softlimit
)
112 d
->d_ino_softlimit
= cpu_to_be64(defq
->isoftlimit
);
113 if (defq
->ihardlimit
&& !d
->d_ino_hardlimit
)
114 d
->d_ino_hardlimit
= cpu_to_be64(defq
->ihardlimit
);
115 if (defq
->rtbsoftlimit
&& !d
->d_rtb_softlimit
)
116 d
->d_rtb_softlimit
= cpu_to_be64(defq
->rtbsoftlimit
);
117 if (defq
->rtbhardlimit
&& !d
->d_rtb_hardlimit
)
118 d
->d_rtb_hardlimit
= cpu_to_be64(defq
->rtbhardlimit
);
121 xfs_dquot_set_prealloc_limits(dq
);
125 * Check the limits and timers of a dquot and start or reset timers
127 * This gets called even when quota enforcement is OFF, which makes our
128 * life a little less complicated. (We just don't reject any quota
129 * reservations in that case, when enforcement is off).
130 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
132 * In contrast, warnings are a little different in that they don't
133 * 'automatically' get started when limits get exceeded. They do
134 * get reset to zero, however, when we find the count to be under
135 * the soft limit (they are only ever set non-zero via userspace).
138 xfs_qm_adjust_dqtimers(
145 if (d
->d_blk_hardlimit
)
146 ASSERT(be64_to_cpu(d
->d_blk_softlimit
) <=
147 be64_to_cpu(d
->d_blk_hardlimit
));
148 if (d
->d_ino_hardlimit
)
149 ASSERT(be64_to_cpu(d
->d_ino_softlimit
) <=
150 be64_to_cpu(d
->d_ino_hardlimit
));
151 if (d
->d_rtb_hardlimit
)
152 ASSERT(be64_to_cpu(d
->d_rtb_softlimit
) <=
153 be64_to_cpu(d
->d_rtb_hardlimit
));
157 if ((d
->d_blk_softlimit
&&
158 (be64_to_cpu(d
->d_bcount
) >
159 be64_to_cpu(d
->d_blk_softlimit
))) ||
160 (d
->d_blk_hardlimit
&&
161 (be64_to_cpu(d
->d_bcount
) >
162 be64_to_cpu(d
->d_blk_hardlimit
)))) {
163 d
->d_btimer
= cpu_to_be32(get_seconds() +
164 mp
->m_quotainfo
->qi_btimelimit
);
169 if ((!d
->d_blk_softlimit
||
170 (be64_to_cpu(d
->d_bcount
) <=
171 be64_to_cpu(d
->d_blk_softlimit
))) &&
172 (!d
->d_blk_hardlimit
||
173 (be64_to_cpu(d
->d_bcount
) <=
174 be64_to_cpu(d
->d_blk_hardlimit
)))) {
180 if ((d
->d_ino_softlimit
&&
181 (be64_to_cpu(d
->d_icount
) >
182 be64_to_cpu(d
->d_ino_softlimit
))) ||
183 (d
->d_ino_hardlimit
&&
184 (be64_to_cpu(d
->d_icount
) >
185 be64_to_cpu(d
->d_ino_hardlimit
)))) {
186 d
->d_itimer
= cpu_to_be32(get_seconds() +
187 mp
->m_quotainfo
->qi_itimelimit
);
192 if ((!d
->d_ino_softlimit
||
193 (be64_to_cpu(d
->d_icount
) <=
194 be64_to_cpu(d
->d_ino_softlimit
))) &&
195 (!d
->d_ino_hardlimit
||
196 (be64_to_cpu(d
->d_icount
) <=
197 be64_to_cpu(d
->d_ino_hardlimit
)))) {
202 if (!d
->d_rtbtimer
) {
203 if ((d
->d_rtb_softlimit
&&
204 (be64_to_cpu(d
->d_rtbcount
) >
205 be64_to_cpu(d
->d_rtb_softlimit
))) ||
206 (d
->d_rtb_hardlimit
&&
207 (be64_to_cpu(d
->d_rtbcount
) >
208 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
209 d
->d_rtbtimer
= cpu_to_be32(get_seconds() +
210 mp
->m_quotainfo
->qi_rtbtimelimit
);
215 if ((!d
->d_rtb_softlimit
||
216 (be64_to_cpu(d
->d_rtbcount
) <=
217 be64_to_cpu(d
->d_rtb_softlimit
))) &&
218 (!d
->d_rtb_hardlimit
||
219 (be64_to_cpu(d
->d_rtbcount
) <=
220 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
227 * initialize a buffer full of dquots and log the whole thing
230 xfs_qm_init_dquot_blk(
237 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
243 ASSERT(xfs_buf_islocked(bp
));
248 * ID of the first dquot in the block - id's are zero based.
250 curid
= id
- (id
% q
->qi_dqperchunk
);
251 memset(d
, 0, BBTOB(q
->qi_dqchunklen
));
252 for (i
= 0; i
< q
->qi_dqperchunk
; i
++, d
++, curid
++) {
253 d
->dd_diskdq
.d_magic
= cpu_to_be16(XFS_DQUOT_MAGIC
);
254 d
->dd_diskdq
.d_version
= XFS_DQUOT_VERSION
;
255 d
->dd_diskdq
.d_id
= cpu_to_be32(curid
);
256 d
->dd_diskdq
.d_flags
= type
;
257 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
258 uuid_copy(&d
->dd_uuid
, &mp
->m_sb
.sb_meta_uuid
);
259 xfs_update_cksum((char *)d
, sizeof(struct xfs_dqblk
),
264 xfs_trans_dquot_buf(tp
, bp
,
265 (type
& XFS_DQ_USER
? XFS_BLF_UDQUOT_BUF
:
266 ((type
& XFS_DQ_PROJ
) ? XFS_BLF_PDQUOT_BUF
:
267 XFS_BLF_GDQUOT_BUF
)));
268 xfs_trans_log_buf(tp
, bp
, 0, BBTOB(q
->qi_dqchunklen
) - 1);
272 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
273 * watermarks correspond to the soft and hard limits by default. If a soft limit
274 * is not specified, we use 95% of the hard limit.
277 xfs_dquot_set_prealloc_limits(struct xfs_dquot
*dqp
)
281 dqp
->q_prealloc_hi_wmark
= be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
);
282 dqp
->q_prealloc_lo_wmark
= be64_to_cpu(dqp
->q_core
.d_blk_softlimit
);
283 if (!dqp
->q_prealloc_lo_wmark
) {
284 dqp
->q_prealloc_lo_wmark
= dqp
->q_prealloc_hi_wmark
;
285 do_div(dqp
->q_prealloc_lo_wmark
, 100);
286 dqp
->q_prealloc_lo_wmark
*= 95;
289 space
= dqp
->q_prealloc_hi_wmark
;
292 dqp
->q_low_space
[XFS_QLOWSP_1_PCNT
] = space
;
293 dqp
->q_low_space
[XFS_QLOWSP_3_PCNT
] = space
* 3;
294 dqp
->q_low_space
[XFS_QLOWSP_5_PCNT
] = space
* 5;
298 * Allocate a block and fill it with dquots.
299 * This is called when the bmapi finds a hole.
307 xfs_fileoff_t offset_fsb
,
310 xfs_fsblock_t firstblock
;
311 struct xfs_defer_ops dfops
;
315 xfs_trans_t
*tp
= *tpp
;
319 trace_xfs_dqalloc(dqp
);
322 * Initialize the bmap freelist prior to calling bmapi code.
324 xfs_defer_init(&dfops
, &firstblock
);
325 xfs_ilock(quotip
, XFS_ILOCK_EXCL
);
327 * Return if this type of quotas is turned off while we didn't
330 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
331 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
335 xfs_trans_ijoin(tp
, quotip
, XFS_ILOCK_EXCL
);
337 error
= xfs_bmapi_write(tp
, quotip
, offset_fsb
,
338 XFS_DQUOT_CLUSTER_SIZE_FSB
, XFS_BMAPI_METADATA
,
339 &firstblock
, XFS_QM_DQALLOC_SPACE_RES(mp
),
340 &map
, &nmaps
, &dfops
);
343 ASSERT(map
.br_blockcount
== XFS_DQUOT_CLUSTER_SIZE_FSB
);
345 ASSERT((map
.br_startblock
!= DELAYSTARTBLOCK
) &&
346 (map
.br_startblock
!= HOLESTARTBLOCK
));
349 * Keep track of the blkno to save a lookup later
351 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
353 /* now we can just get the buffer (there's nothing to read yet) */
354 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
356 mp
->m_quotainfo
->qi_dqchunklen
,
362 bp
->b_ops
= &xfs_dquot_buf_ops
;
365 * Make a chunk of dquots out of this buffer and log
368 xfs_qm_init_dquot_blk(tp
, mp
, be32_to_cpu(dqp
->q_core
.d_id
),
369 dqp
->dq_flags
& XFS_DQ_ALLTYPES
, bp
);
372 * xfs_defer_finish() may commit the current transaction and
373 * start a second transaction if the freelist is not empty.
375 * Since we still want to modify this buffer, we need to
376 * ensure that the buffer is not released on commit of
377 * the first transaction and ensure the buffer is added to the
378 * second transaction.
380 * If there is only one transaction then don't stop the buffer
381 * from being released when it commits later on.
384 xfs_trans_bhold(tp
, bp
);
386 error
= xfs_defer_finish(tpp
, &dfops
, NULL
);
390 /* Transaction was committed? */
393 xfs_trans_bjoin(tp
, bp
);
395 xfs_trans_bhold_release(tp
, bp
);
402 xfs_defer_cancel(&dfops
);
404 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
411 struct xfs_mount
*mp
,
412 struct xfs_trans
*tp
,
413 struct xfs_dquot
*dqp
,
415 struct xfs_buf
**bpp
)
418 struct xfs_disk_dquot
*ddq
;
423 * Read the buffer without verification so we get the corrupted
424 * buffer returned to us. make sure we verify it on write, though.
426 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, dqp
->q_blkno
,
427 mp
->m_quotainfo
->qi_dqchunklen
,
431 ASSERT(*bpp
== NULL
);
434 (*bpp
)->b_ops
= &xfs_dquot_buf_ops
;
436 ASSERT(xfs_buf_islocked(*bpp
));
437 d
= (struct xfs_dqblk
*)(*bpp
)->b_addr
;
439 /* Do the actual repair of dquots in this buffer */
440 for (i
= 0; i
< mp
->m_quotainfo
->qi_dqperchunk
; i
++) {
441 ddq
= &d
[i
].dd_diskdq
;
442 error
= xfs_dqcheck(mp
, ddq
, firstid
+ i
,
443 dqp
->dq_flags
& XFS_DQ_ALLTYPES
,
444 XFS_QMOPT_DQREPAIR
, "xfs_qm_dqrepair");
446 /* repair failed, we're screwed */
447 xfs_trans_brelse(tp
, *bpp
);
456 * Maps a dquot to the buffer containing its on-disk version.
457 * This returns a ptr to the buffer containing the on-disk dquot
458 * in the bpp param, and a ptr to the on-disk dquot within that buffer
464 xfs_disk_dquot_t
**O_ddpp
,
468 struct xfs_bmbt_irec map
;
469 int nmaps
= 1, error
;
471 struct xfs_inode
*quotip
;
472 struct xfs_mount
*mp
= dqp
->q_mount
;
473 xfs_dqid_t id
= be32_to_cpu(dqp
->q_core
.d_id
);
474 struct xfs_trans
*tp
= (tpp
? *tpp
: NULL
);
477 quotip
= xfs_quota_inode(dqp
->q_mount
, dqp
->dq_flags
);
478 dqp
->q_fileoffset
= (xfs_fileoff_t
)id
/ mp
->m_quotainfo
->qi_dqperchunk
;
480 lock_mode
= xfs_ilock_data_map_shared(quotip
);
481 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
483 * Return if this type of quotas is turned off while we
484 * didn't have the quota inode lock.
486 xfs_iunlock(quotip
, lock_mode
);
491 * Find the block map; no allocations yet
493 error
= xfs_bmapi_read(quotip
, dqp
->q_fileoffset
,
494 XFS_DQUOT_CLUSTER_SIZE_FSB
, &map
, &nmaps
, 0);
496 xfs_iunlock(quotip
, lock_mode
);
501 ASSERT(map
.br_blockcount
== 1);
504 * Offset of dquot in the (fixed sized) dquot chunk.
506 dqp
->q_bufoffset
= (id
% mp
->m_quotainfo
->qi_dqperchunk
) *
509 ASSERT(map
.br_startblock
!= DELAYSTARTBLOCK
);
510 if (map
.br_startblock
== HOLESTARTBLOCK
) {
512 * We don't allocate unless we're asked to
514 if (!(flags
& XFS_QMOPT_DQALLOC
))
518 error
= xfs_qm_dqalloc(tpp
, mp
, dqp
, quotip
,
519 dqp
->q_fileoffset
, &bp
);
524 trace_xfs_dqtobp_read(dqp
);
527 * store the blkno etc so that we don't have to do the
528 * mapping all the time
530 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
532 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
534 mp
->m_quotainfo
->qi_dqchunklen
,
535 0, &bp
, &xfs_dquot_buf_ops
);
537 if (error
== -EFSCORRUPTED
&& (flags
& XFS_QMOPT_DQREPAIR
)) {
538 xfs_dqid_t firstid
= (xfs_dqid_t
)map
.br_startoff
*
539 mp
->m_quotainfo
->qi_dqperchunk
;
541 error
= xfs_qm_dqrepair(mp
, tp
, dqp
, firstid
, &bp
);
550 ASSERT(xfs_buf_islocked(bp
));
552 *O_ddpp
= bp
->b_addr
+ dqp
->q_bufoffset
;
559 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
560 * and release the buffer immediately.
562 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
566 struct xfs_mount
*mp
,
570 struct xfs_dquot
**O_dqpp
)
572 struct xfs_dquot
*dqp
;
573 struct xfs_disk_dquot
*ddqp
;
575 struct xfs_trans
*tp
= NULL
;
578 dqp
= kmem_zone_zalloc(xfs_qm_dqzone
, KM_SLEEP
);
580 dqp
->dq_flags
= type
;
581 dqp
->q_core
.d_id
= cpu_to_be32(id
);
583 INIT_LIST_HEAD(&dqp
->q_lru
);
584 mutex_init(&dqp
->q_qlock
);
585 init_waitqueue_head(&dqp
->q_pinwait
);
588 * Because we want to use a counting completion, complete
589 * the flush completion once to allow a single access to
590 * the flush completion without blocking.
592 init_completion(&dqp
->q_flush
);
593 complete(&dqp
->q_flush
);
596 * Make sure group quotas have a different lock class than user
601 /* uses the default lock class */
604 lockdep_set_class(&dqp
->q_qlock
, &xfs_dquot_group_class
);
607 lockdep_set_class(&dqp
->q_qlock
, &xfs_dquot_project_class
);
614 XFS_STATS_INC(mp
, xs_qm_dquot
);
616 trace_xfs_dqread(dqp
);
618 if (flags
& XFS_QMOPT_DQALLOC
) {
619 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_qm_dqalloc
,
620 XFS_QM_DQALLOC_SPACE_RES(mp
), 0, 0, &tp
);
626 * get a pointer to the on-disk dquot and the buffer containing it
627 * dqp already knows its own type (GROUP/USER).
629 error
= xfs_qm_dqtobp(&tp
, dqp
, &ddqp
, &bp
, flags
);
632 * This can happen if quotas got turned off (ESRCH),
633 * or if the dquot didn't exist on disk and we ask to
636 trace_xfs_dqread_fail(dqp
);
640 /* copy everything from disk dquot to the incore dquot */
641 memcpy(&dqp
->q_core
, ddqp
, sizeof(xfs_disk_dquot_t
));
642 xfs_qm_dquot_logitem_init(dqp
);
645 * Reservation counters are defined as reservation plus current usage
646 * to avoid having to add every time.
648 dqp
->q_res_bcount
= be64_to_cpu(ddqp
->d_bcount
);
649 dqp
->q_res_icount
= be64_to_cpu(ddqp
->d_icount
);
650 dqp
->q_res_rtbcount
= be64_to_cpu(ddqp
->d_rtbcount
);
652 /* initialize the dquot speculative prealloc thresholds */
653 xfs_dquot_set_prealloc_limits(dqp
);
655 /* Mark the buf so that this will stay incore a little longer */
656 xfs_buf_set_ref(bp
, XFS_DQUOT_REF
);
659 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
660 * So we need to release with xfs_trans_brelse().
661 * The strategy here is identical to that of inodes; we lock
662 * the dquot in xfs_qm_dqget() before making it accessible to
663 * others. This is because dquots, like inodes, need a good level of
664 * concurrency, and we don't want to take locks on the entire buffers
665 * for dquot accesses.
666 * Note also that the dquot buffer may even be dirty at this point, if
667 * this particular dquot was repaired. We still aren't afraid to
668 * brelse it because we have the changes incore.
670 ASSERT(xfs_buf_islocked(bp
));
671 xfs_trans_brelse(tp
, bp
);
674 error
= xfs_trans_commit(tp
);
684 xfs_trans_cancel(tp
);
686 xfs_qm_dqdestroy(dqp
);
692 * Advance to the next id in the current chunk, or if at the
693 * end of the chunk, skip ahead to first id in next allocated chunk
694 * using the SEEK_DATA interface.
703 struct xfs_inode
*quotip
;
713 /* If we'd wrap past the max ID, stop */
717 /* If new ID is within the current chunk, advancing it sufficed */
718 if (next_id
% mp
->m_quotainfo
->qi_dqperchunk
) {
723 /* Nope, next_id is now past the current chunk, so find the next one */
724 start
= (xfs_fsblock_t
)next_id
/ mp
->m_quotainfo
->qi_dqperchunk
;
726 quotip
= xfs_quota_inode(mp
, type
);
727 lock
= xfs_ilock_data_map_shared(quotip
);
729 offset
= __xfs_seek_hole_data(VFS_I(quotip
), XFS_FSB_TO_B(mp
, start
),
734 xfs_iunlock(quotip
, lock
);
736 /* -ENXIO is essentially "no more data" */
738 return (error
== -ENXIO
? -ENOENT
: error
);
740 /* Convert next data offset back to a quota id */
741 *id
= XFS_B_TO_FSB(mp
, offset
) * mp
->m_quotainfo
->qi_dqperchunk
;
746 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
747 * a locked dquot, doing an allocation (if requested) as needed.
748 * When both an inode and an id are given, the inode's id takes precedence.
749 * That is, if the id changes while we don't hold the ilock inside this
750 * function, the new dquot is returned, not necessarily the one requested
751 * in the id argument.
756 xfs_inode_t
*ip
, /* locked inode (optional) */
757 xfs_dqid_t id
, /* uid/projid/gid depending on type */
758 uint type
, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
759 uint flags
, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
760 xfs_dquot_t
**O_dqpp
) /* OUT : locked incore dquot */
762 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
763 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
764 struct xfs_dquot
*dqp
;
768 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
769 if ((! XFS_IS_UQUOTA_ON(mp
) && type
== XFS_DQ_USER
) ||
770 (! XFS_IS_PQUOTA_ON(mp
) && type
== XFS_DQ_PROJ
) ||
771 (! XFS_IS_GQUOTA_ON(mp
) && type
== XFS_DQ_GROUP
)) {
776 if (xfs_do_dqerror
) {
777 if ((xfs_dqerror_target
== mp
->m_ddev_targp
) &&
778 (xfs_dqreq_num
++ % xfs_dqerror_mod
) == 0) {
779 xfs_debug(mp
, "Returning error in dqget");
784 ASSERT(type
== XFS_DQ_USER
||
785 type
== XFS_DQ_PROJ
||
786 type
== XFS_DQ_GROUP
);
788 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
789 ASSERT(xfs_inode_dquot(ip
, type
) == NULL
);
793 /* Get the end of the quota file if we need it */
794 if (flags
& XFS_QMOPT_DQNEXT
) {
795 struct xfs_inode
*quotip
;
799 quotip
= xfs_quota_inode(mp
, type
);
800 lock_mode
= xfs_ilock_data_map_shared(quotip
);
801 error
= xfs_bmap_last_offset(quotip
, &last
, XFS_DATA_FORK
);
802 xfs_iunlock(quotip
, lock_mode
);
805 eof
= XFS_FSB_TO_B(mp
, last
);
809 mutex_lock(&qi
->qi_tree_lock
);
810 dqp
= radix_tree_lookup(tree
, id
);
813 if (dqp
->dq_flags
& XFS_DQ_FREEING
) {
815 mutex_unlock(&qi
->qi_tree_lock
);
816 trace_xfs_dqget_freeing(dqp
);
821 /* uninit / unused quota found in radix tree, keep looking */
822 if (flags
& XFS_QMOPT_DQNEXT
) {
823 if (XFS_IS_DQUOT_UNINITIALIZED(dqp
)) {
825 mutex_unlock(&qi
->qi_tree_lock
);
826 error
= xfs_dq_get_next_id(mp
, type
, &id
, eof
);
834 mutex_unlock(&qi
->qi_tree_lock
);
836 trace_xfs_dqget_hit(dqp
);
837 XFS_STATS_INC(mp
, xs_qm_dqcachehits
);
841 mutex_unlock(&qi
->qi_tree_lock
);
842 XFS_STATS_INC(mp
, xs_qm_dqcachemisses
);
845 * Dquot cache miss. We don't want to keep the inode lock across
846 * a (potential) disk read. Also we don't want to deal with the lock
847 * ordering between quotainode and this inode. OTOH, dropping the inode
848 * lock here means dealing with a chown that can happen before
849 * we re-acquire the lock.
852 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
854 error
= xfs_qm_dqread(mp
, id
, type
, flags
, &dqp
);
857 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
859 /* If we are asked to find next active id, keep looking */
860 if (error
== -ENOENT
&& (flags
& XFS_QMOPT_DQNEXT
)) {
861 error
= xfs_dq_get_next_id(mp
, type
, &id
, eof
);
871 * A dquot could be attached to this inode by now, since
872 * we had dropped the ilock.
874 if (xfs_this_quota_on(mp
, type
)) {
875 struct xfs_dquot
*dqp1
;
877 dqp1
= xfs_inode_dquot(ip
, type
);
879 xfs_qm_dqdestroy(dqp
);
885 /* inode stays locked on return */
886 xfs_qm_dqdestroy(dqp
);
891 mutex_lock(&qi
->qi_tree_lock
);
892 error
= radix_tree_insert(tree
, id
, dqp
);
893 if (unlikely(error
)) {
894 WARN_ON(error
!= -EEXIST
);
897 * Duplicate found. Just throw away the new dquot and start
900 mutex_unlock(&qi
->qi_tree_lock
);
901 trace_xfs_dqget_dup(dqp
);
902 xfs_qm_dqdestroy(dqp
);
903 XFS_STATS_INC(mp
, xs_qm_dquot_dups
);
908 * We return a locked dquot to the caller, with a reference taken
914 mutex_unlock(&qi
->qi_tree_lock
);
916 /* If we are asked to find next active id, keep looking */
917 if (flags
& XFS_QMOPT_DQNEXT
) {
918 if (XFS_IS_DQUOT_UNINITIALIZED(dqp
)) {
920 error
= xfs_dq_get_next_id(mp
, type
, &id
, eof
);
928 ASSERT((ip
== NULL
) || xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
929 trace_xfs_dqget_miss(dqp
);
935 * Release a reference to the dquot (decrement ref-count) and unlock it.
937 * If there is a group quota attached to this dquot, carefully release that
938 * too without tripping over deadlocks'n'stuff.
942 struct xfs_dquot
*dqp
)
944 ASSERT(dqp
->q_nrefs
> 0);
945 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
947 trace_xfs_dqput(dqp
);
949 if (--dqp
->q_nrefs
== 0) {
950 struct xfs_quotainfo
*qi
= dqp
->q_mount
->m_quotainfo
;
951 trace_xfs_dqput_free(dqp
);
953 if (list_lru_add(&qi
->qi_lru
, &dqp
->q_lru
))
954 XFS_STATS_INC(dqp
->q_mount
, xs_qm_dquot_unused
);
960 * Release a dquot. Flush it if dirty, then dqput() it.
961 * dquot must not be locked.
970 trace_xfs_dqrele(dqp
);
974 * We don't care to flush it if the dquot is dirty here.
975 * That will create stutters that we want to avoid.
976 * Instead we do a delayed write when we try to reclaim
977 * a dirty dquot. Also xfs_sync will take part of the burden...
983 * This is the dquot flushing I/O completion routine. It is called
984 * from interrupt level when the buffer containing the dquot is
985 * flushed to disk. It is responsible for removing the dquot logitem
986 * from the AIL if it has not been re-logged, and unlocking the dquot's
987 * flush lock. This behavior is very similar to that of inodes..
992 struct xfs_log_item
*lip
)
994 xfs_dq_logitem_t
*qip
= (struct xfs_dq_logitem
*)lip
;
995 xfs_dquot_t
*dqp
= qip
->qli_dquot
;
996 struct xfs_ail
*ailp
= lip
->li_ailp
;
999 * We only want to pull the item from the AIL if its
1000 * location in the log has not changed since we started the flush.
1001 * Thus, we only bother if the dquot's lsn has
1002 * not changed. First we check the lsn outside the lock
1003 * since it's cheaper, and then we recheck while
1004 * holding the lock before removing the dquot from the AIL.
1006 if ((lip
->li_flags
& XFS_LI_IN_AIL
) &&
1007 ((lip
->li_lsn
== qip
->qli_flush_lsn
) ||
1008 (lip
->li_flags
& XFS_LI_FAILED
))) {
1010 /* xfs_trans_ail_delete() drops the AIL lock. */
1011 spin_lock(&ailp
->xa_lock
);
1012 if (lip
->li_lsn
== qip
->qli_flush_lsn
) {
1013 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_CORRUPT_INCORE
);
1016 * Clear the failed state since we are about to drop the
1019 if (lip
->li_flags
& XFS_LI_FAILED
)
1020 xfs_clear_li_failed(lip
);
1021 spin_unlock(&ailp
->xa_lock
);
1026 * Release the dq's flush lock since we're done with it.
1032 * Write a modified dquot to disk.
1033 * The dquot must be locked and the flush lock too taken by caller.
1034 * The flush lock will not be unlocked until the dquot reaches the disk,
1035 * but the dquot is free to be unlocked and modified by the caller
1036 * in the interim. Dquot is still locked on return. This behavior is
1037 * identical to that of inodes.
1041 struct xfs_dquot
*dqp
,
1042 struct xfs_buf
**bpp
)
1044 struct xfs_mount
*mp
= dqp
->q_mount
;
1046 struct xfs_disk_dquot
*ddqp
;
1049 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
1050 ASSERT(!completion_done(&dqp
->q_flush
));
1052 trace_xfs_dqflush(dqp
);
1056 xfs_qm_dqunpin_wait(dqp
);
1059 * This may have been unpinned because the filesystem is shutting
1060 * down forcibly. If that's the case we must not write this dquot
1061 * to disk, because the log record didn't make it to disk.
1063 * We also have to remove the log item from the AIL in this case,
1064 * as we wait for an emptry AIL as part of the unmount process.
1066 if (XFS_FORCED_SHUTDOWN(mp
)) {
1067 struct xfs_log_item
*lip
= &dqp
->q_logitem
.qli_item
;
1068 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
1070 xfs_trans_ail_remove(lip
, SHUTDOWN_CORRUPT_INCORE
);
1077 * Get the buffer containing the on-disk dquot
1079 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dqp
->q_blkno
,
1080 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
1081 &xfs_dquot_buf_ops
);
1086 * Calculate the location of the dquot inside the buffer.
1088 ddqp
= bp
->b_addr
+ dqp
->q_bufoffset
;
1091 * A simple sanity check in case we got a corrupted dquot..
1093 error
= xfs_dqcheck(mp
, &dqp
->q_core
, be32_to_cpu(ddqp
->d_id
), 0,
1094 XFS_QMOPT_DOWARN
, "dqflush (incore copy)");
1098 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1102 /* This is the only portion of data that needs to persist */
1103 memcpy(ddqp
, &dqp
->q_core
, sizeof(xfs_disk_dquot_t
));
1106 * Clear the dirty field and remember the flush lsn for later use.
1108 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
1110 xfs_trans_ail_copy_lsn(mp
->m_ail
, &dqp
->q_logitem
.qli_flush_lsn
,
1111 &dqp
->q_logitem
.qli_item
.li_lsn
);
1114 * copy the lsn into the on-disk dquot now while we have the in memory
1115 * dquot here. This can't be done later in the write verifier as we
1116 * can't get access to the log item at that point in time.
1118 * We also calculate the CRC here so that the on-disk dquot in the
1119 * buffer always has a valid CRC. This ensures there is no possibility
1120 * of a dquot without an up-to-date CRC getting to disk.
1122 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
1123 struct xfs_dqblk
*dqb
= (struct xfs_dqblk
*)ddqp
;
1125 dqb
->dd_lsn
= cpu_to_be64(dqp
->q_logitem
.qli_item
.li_lsn
);
1126 xfs_update_cksum((char *)dqb
, sizeof(struct xfs_dqblk
),
1131 * Attach an iodone routine so that we can remove this dquot from the
1132 * AIL and release the flush lock once the dquot is synced to disk.
1134 xfs_buf_attach_iodone(bp
, xfs_qm_dqflush_done
,
1135 &dqp
->q_logitem
.qli_item
);
1138 * If the buffer is pinned then push on the log so we won't
1139 * get stuck waiting in the write for too long.
1141 if (xfs_buf_ispinned(bp
)) {
1142 trace_xfs_dqflush_force(dqp
);
1143 xfs_log_force(mp
, 0);
1146 trace_xfs_dqflush_done(dqp
);
1156 * Lock two xfs_dquot structures.
1158 * To avoid deadlocks we always lock the quota structure with
1159 * the lowerd id first.
1168 if (be32_to_cpu(d1
->q_core
.d_id
) >
1169 be32_to_cpu(d2
->q_core
.d_id
)) {
1170 mutex_lock(&d2
->q_qlock
);
1171 mutex_lock_nested(&d1
->q_qlock
, XFS_QLOCK_NESTED
);
1173 mutex_lock(&d1
->q_qlock
);
1174 mutex_lock_nested(&d2
->q_qlock
, XFS_QLOCK_NESTED
);
1177 mutex_lock(&d1
->q_qlock
);
1179 mutex_lock(&d2
->q_qlock
);
1187 kmem_zone_init(sizeof(struct xfs_dquot
), "xfs_dquot");
1192 kmem_zone_init(sizeof(struct xfs_dquot_acct
), "xfs_dqtrx");
1193 if (!xfs_qm_dqtrxzone
)
1194 goto out_free_dqzone
;
1199 kmem_zone_destroy(xfs_qm_dqzone
);
1207 kmem_zone_destroy(xfs_qm_dqtrxzone
);
1208 kmem_zone_destroy(xfs_qm_dqzone
);