2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
23 #include "xfs_trans.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_rtalloc.h"
34 #include "xfs_error.h"
35 #include "xfs_itable.h"
37 #include "xfs_buf_item.h"
38 #include "xfs_trans_space.h"
39 #include "xfs_trans_priv.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
49 * dquot->q_qlock (xfs_dqlock() and friends)
50 * dquot->q_flush (xfs_dqflock() and friends)
53 * If two dquots need to be locked the order is user before group/project,
54 * otherwise by the lowest id first, see xfs_dqlock2.
58 xfs_buftarg_t
*xfs_dqerror_target
;
61 int xfs_dqerror_mod
= 33;
64 struct kmem_zone
*xfs_qm_dqtrxzone
;
65 static struct kmem_zone
*xfs_qm_dqzone
;
67 static struct lock_class_key xfs_dquot_group_class
;
68 static struct lock_class_key xfs_dquot_project_class
;
71 * This is called to free all the memory associated with a dquot
77 ASSERT(list_empty(&dqp
->q_lru
));
79 mutex_destroy(&dqp
->q_qlock
);
80 kmem_zone_free(xfs_qm_dqzone
, dqp
);
82 XFS_STATS_DEC(xs_qm_dquot
);
86 * If default limits are in force, push them into the dquot now.
87 * We overwrite the dquot limits only if they are zero and this
88 * is not the root dquot.
91 xfs_qm_adjust_dqlimits(
95 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
96 struct xfs_disk_dquot
*d
= &dq
->q_core
;
101 if (q
->qi_bsoftlimit
&& !d
->d_blk_softlimit
) {
102 d
->d_blk_softlimit
= cpu_to_be64(q
->qi_bsoftlimit
);
105 if (q
->qi_bhardlimit
&& !d
->d_blk_hardlimit
) {
106 d
->d_blk_hardlimit
= cpu_to_be64(q
->qi_bhardlimit
);
109 if (q
->qi_isoftlimit
&& !d
->d_ino_softlimit
)
110 d
->d_ino_softlimit
= cpu_to_be64(q
->qi_isoftlimit
);
111 if (q
->qi_ihardlimit
&& !d
->d_ino_hardlimit
)
112 d
->d_ino_hardlimit
= cpu_to_be64(q
->qi_ihardlimit
);
113 if (q
->qi_rtbsoftlimit
&& !d
->d_rtb_softlimit
)
114 d
->d_rtb_softlimit
= cpu_to_be64(q
->qi_rtbsoftlimit
);
115 if (q
->qi_rtbhardlimit
&& !d
->d_rtb_hardlimit
)
116 d
->d_rtb_hardlimit
= cpu_to_be64(q
->qi_rtbhardlimit
);
119 xfs_dquot_set_prealloc_limits(dq
);
123 * Check the limits and timers of a dquot and start or reset timers
125 * This gets called even when quota enforcement is OFF, which makes our
126 * life a little less complicated. (We just don't reject any quota
127 * reservations in that case, when enforcement is off).
128 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
130 * In contrast, warnings are a little different in that they don't
131 * 'automatically' get started when limits get exceeded. They do
132 * get reset to zero, however, when we find the count to be under
133 * the soft limit (they are only ever set non-zero via userspace).
136 xfs_qm_adjust_dqtimers(
143 if (d
->d_blk_hardlimit
)
144 ASSERT(be64_to_cpu(d
->d_blk_softlimit
) <=
145 be64_to_cpu(d
->d_blk_hardlimit
));
146 if (d
->d_ino_hardlimit
)
147 ASSERT(be64_to_cpu(d
->d_ino_softlimit
) <=
148 be64_to_cpu(d
->d_ino_hardlimit
));
149 if (d
->d_rtb_hardlimit
)
150 ASSERT(be64_to_cpu(d
->d_rtb_softlimit
) <=
151 be64_to_cpu(d
->d_rtb_hardlimit
));
155 if ((d
->d_blk_softlimit
&&
156 (be64_to_cpu(d
->d_bcount
) >
157 be64_to_cpu(d
->d_blk_softlimit
))) ||
158 (d
->d_blk_hardlimit
&&
159 (be64_to_cpu(d
->d_bcount
) >
160 be64_to_cpu(d
->d_blk_hardlimit
)))) {
161 d
->d_btimer
= cpu_to_be32(get_seconds() +
162 mp
->m_quotainfo
->qi_btimelimit
);
167 if ((!d
->d_blk_softlimit
||
168 (be64_to_cpu(d
->d_bcount
) <=
169 be64_to_cpu(d
->d_blk_softlimit
))) &&
170 (!d
->d_blk_hardlimit
||
171 (be64_to_cpu(d
->d_bcount
) <=
172 be64_to_cpu(d
->d_blk_hardlimit
)))) {
178 if ((d
->d_ino_softlimit
&&
179 (be64_to_cpu(d
->d_icount
) >
180 be64_to_cpu(d
->d_ino_softlimit
))) ||
181 (d
->d_ino_hardlimit
&&
182 (be64_to_cpu(d
->d_icount
) >
183 be64_to_cpu(d
->d_ino_hardlimit
)))) {
184 d
->d_itimer
= cpu_to_be32(get_seconds() +
185 mp
->m_quotainfo
->qi_itimelimit
);
190 if ((!d
->d_ino_softlimit
||
191 (be64_to_cpu(d
->d_icount
) <=
192 be64_to_cpu(d
->d_ino_softlimit
))) &&
193 (!d
->d_ino_hardlimit
||
194 (be64_to_cpu(d
->d_icount
) <=
195 be64_to_cpu(d
->d_ino_hardlimit
)))) {
200 if (!d
->d_rtbtimer
) {
201 if ((d
->d_rtb_softlimit
&&
202 (be64_to_cpu(d
->d_rtbcount
) >
203 be64_to_cpu(d
->d_rtb_softlimit
))) ||
204 (d
->d_rtb_hardlimit
&&
205 (be64_to_cpu(d
->d_rtbcount
) >
206 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
207 d
->d_rtbtimer
= cpu_to_be32(get_seconds() +
208 mp
->m_quotainfo
->qi_rtbtimelimit
);
213 if ((!d
->d_rtb_softlimit
||
214 (be64_to_cpu(d
->d_rtbcount
) <=
215 be64_to_cpu(d
->d_rtb_softlimit
))) &&
216 (!d
->d_rtb_hardlimit
||
217 (be64_to_cpu(d
->d_rtbcount
) <=
218 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
225 * initialize a buffer full of dquots and log the whole thing
228 xfs_qm_init_dquot_blk(
235 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
240 ASSERT(xfs_buf_islocked(bp
));
245 * ID of the first dquot in the block - id's are zero based.
247 curid
= id
- (id
% q
->qi_dqperchunk
);
249 memset(d
, 0, BBTOB(q
->qi_dqchunklen
));
250 for (i
= 0; i
< q
->qi_dqperchunk
; i
++, d
++, curid
++) {
251 d
->dd_diskdq
.d_magic
= cpu_to_be16(XFS_DQUOT_MAGIC
);
252 d
->dd_diskdq
.d_version
= XFS_DQUOT_VERSION
;
253 d
->dd_diskdq
.d_id
= cpu_to_be32(curid
);
254 d
->dd_diskdq
.d_flags
= type
;
255 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
256 uuid_copy(&d
->dd_uuid
, &mp
->m_sb
.sb_uuid
);
257 xfs_update_cksum((char *)d
, sizeof(struct xfs_dqblk
),
262 xfs_trans_dquot_buf(tp
, bp
,
263 (type
& XFS_DQ_USER
? XFS_BLF_UDQUOT_BUF
:
264 ((type
& XFS_DQ_PROJ
) ? XFS_BLF_PDQUOT_BUF
:
265 XFS_BLF_GDQUOT_BUF
)));
266 xfs_trans_log_buf(tp
, bp
, 0, BBTOB(q
->qi_dqchunklen
) - 1);
270 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
271 * watermarks correspond to the soft and hard limits by default. If a soft limit
272 * is not specified, we use 95% of the hard limit.
275 xfs_dquot_set_prealloc_limits(struct xfs_dquot
*dqp
)
279 dqp
->q_prealloc_hi_wmark
= be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
);
280 dqp
->q_prealloc_lo_wmark
= be64_to_cpu(dqp
->q_core
.d_blk_softlimit
);
281 if (!dqp
->q_prealloc_lo_wmark
) {
282 dqp
->q_prealloc_lo_wmark
= dqp
->q_prealloc_hi_wmark
;
283 do_div(dqp
->q_prealloc_lo_wmark
, 100);
284 dqp
->q_prealloc_lo_wmark
*= 95;
287 space
= dqp
->q_prealloc_hi_wmark
;
290 dqp
->q_low_space
[XFS_QLOWSP_1_PCNT
] = space
;
291 dqp
->q_low_space
[XFS_QLOWSP_3_PCNT
] = space
* 3;
292 dqp
->q_low_space
[XFS_QLOWSP_5_PCNT
] = space
* 5;
296 xfs_dquot_buf_verify_crc(
297 struct xfs_mount
*mp
,
300 struct xfs_dqblk
*d
= (struct xfs_dqblk
*)bp
->b_addr
;
304 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
308 * if we are in log recovery, the quota subsystem has not been
309 * initialised so we have no quotainfo structure. In that case, we need
310 * to manually calculate the number of dquots in the buffer.
313 ndquots
= mp
->m_quotainfo
->qi_dqperchunk
;
315 ndquots
= xfs_qm_calc_dquots_per_chunk(mp
,
316 XFS_BB_TO_FSB(mp
, bp
->b_length
));
318 for (i
= 0; i
< ndquots
; i
++, d
++) {
319 if (!xfs_verify_cksum((char *)d
, sizeof(struct xfs_dqblk
),
322 if (!uuid_equal(&d
->dd_uuid
, &mp
->m_sb
.sb_uuid
))
329 xfs_dquot_buf_verify(
330 struct xfs_mount
*mp
,
333 struct xfs_dqblk
*d
= (struct xfs_dqblk
*)bp
->b_addr
;
339 * if we are in log recovery, the quota subsystem has not been
340 * initialised so we have no quotainfo structure. In that case, we need
341 * to manually calculate the number of dquots in the buffer.
344 ndquots
= mp
->m_quotainfo
->qi_dqperchunk
;
346 ndquots
= xfs_qm_calc_dquots_per_chunk(mp
, bp
->b_length
);
349 * On the first read of the buffer, verify that each dquot is valid.
350 * We don't know what the id of the dquot is supposed to be, just that
351 * they should be increasing monotonically within the buffer. If the
352 * first id is corrupt, then it will fail on the second dquot in the
353 * buffer so corruptions could point to the wrong dquot in this case.
355 for (i
= 0; i
< ndquots
; i
++) {
356 struct xfs_disk_dquot
*ddq
;
359 ddq
= &d
[i
].dd_diskdq
;
362 id
= be32_to_cpu(ddq
->d_id
);
364 error
= xfs_qm_dqcheck(mp
, ddq
, id
+ i
, 0, XFS_QMOPT_DOWARN
,
365 "xfs_dquot_buf_verify");
373 xfs_dquot_buf_read_verify(
376 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
378 if (!xfs_dquot_buf_verify_crc(mp
, bp
) || !xfs_dquot_buf_verify(mp
, bp
)) {
379 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, bp
->b_addr
);
380 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
385 * we don't calculate the CRC here as that is done when the dquot is flushed to
386 * the buffer after the update is done. This ensures that the dquot in the
387 * buffer always has an up-to-date CRC value.
390 xfs_dquot_buf_write_verify(
393 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
395 if (!xfs_dquot_buf_verify(mp
, bp
)) {
396 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, bp
->b_addr
);
397 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
402 const struct xfs_buf_ops xfs_dquot_buf_ops
= {
403 .verify_read
= xfs_dquot_buf_read_verify
,
404 .verify_write
= xfs_dquot_buf_write_verify
,
408 * Allocate a block and fill it with dquots.
409 * This is called when the bmapi finds a hole.
417 xfs_fileoff_t offset_fsb
,
420 xfs_fsblock_t firstblock
;
421 xfs_bmap_free_t flist
;
423 int nmaps
, error
, committed
;
425 xfs_trans_t
*tp
= *tpp
;
429 trace_xfs_dqalloc(dqp
);
432 * Initialize the bmap freelist prior to calling bmapi code.
434 xfs_bmap_init(&flist
, &firstblock
);
435 xfs_ilock(quotip
, XFS_ILOCK_EXCL
);
437 * Return if this type of quotas is turned off while we didn't
440 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
441 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
445 xfs_trans_ijoin(tp
, quotip
, XFS_ILOCK_EXCL
);
447 error
= xfs_bmapi_write(tp
, quotip
, offset_fsb
,
448 XFS_DQUOT_CLUSTER_SIZE_FSB
, XFS_BMAPI_METADATA
,
449 &firstblock
, XFS_QM_DQALLOC_SPACE_RES(mp
),
450 &map
, &nmaps
, &flist
);
453 ASSERT(map
.br_blockcount
== XFS_DQUOT_CLUSTER_SIZE_FSB
);
455 ASSERT((map
.br_startblock
!= DELAYSTARTBLOCK
) &&
456 (map
.br_startblock
!= HOLESTARTBLOCK
));
459 * Keep track of the blkno to save a lookup later
461 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
463 /* now we can just get the buffer (there's nothing to read yet) */
464 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
466 mp
->m_quotainfo
->qi_dqchunklen
,
469 error
= xfs_buf_geterror(bp
);
472 bp
->b_ops
= &xfs_dquot_buf_ops
;
475 * Make a chunk of dquots out of this buffer and log
478 xfs_qm_init_dquot_blk(tp
, mp
, be32_to_cpu(dqp
->q_core
.d_id
),
479 dqp
->dq_flags
& XFS_DQ_ALLTYPES
, bp
);
482 * xfs_bmap_finish() may commit the current transaction and
483 * start a second transaction if the freelist is not empty.
485 * Since we still want to modify this buffer, we need to
486 * ensure that the buffer is not released on commit of
487 * the first transaction and ensure the buffer is added to the
488 * second transaction.
490 * If there is only one transaction then don't stop the buffer
491 * from being released when it commits later on.
494 xfs_trans_bhold(tp
, bp
);
496 if ((error
= xfs_bmap_finish(tpp
, &flist
, &committed
))) {
502 xfs_trans_bjoin(tp
, bp
);
504 xfs_trans_bhold_release(tp
, bp
);
511 xfs_bmap_cancel(&flist
);
513 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
519 struct xfs_mount
*mp
,
520 struct xfs_trans
*tp
,
521 struct xfs_dquot
*dqp
,
523 struct xfs_buf
**bpp
)
526 struct xfs_disk_dquot
*ddq
;
531 * Read the buffer without verification so we get the corrupted
532 * buffer returned to us. make sure we verify it on write, though.
534 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, dqp
->q_blkno
,
535 mp
->m_quotainfo
->qi_dqchunklen
,
539 ASSERT(*bpp
== NULL
);
540 return XFS_ERROR(error
);
542 (*bpp
)->b_ops
= &xfs_dquot_buf_ops
;
544 ASSERT(xfs_buf_islocked(*bpp
));
545 d
= (struct xfs_dqblk
*)(*bpp
)->b_addr
;
547 /* Do the actual repair of dquots in this buffer */
548 for (i
= 0; i
< mp
->m_quotainfo
->qi_dqperchunk
; i
++) {
549 ddq
= &d
[i
].dd_diskdq
;
550 error
= xfs_qm_dqcheck(mp
, ddq
, firstid
+ i
,
551 dqp
->dq_flags
& XFS_DQ_ALLTYPES
,
552 XFS_QMOPT_DQREPAIR
, "xfs_qm_dqrepair");
554 /* repair failed, we're screwed */
555 xfs_trans_brelse(tp
, *bpp
);
556 return XFS_ERROR(EIO
);
564 * Maps a dquot to the buffer containing its on-disk version.
565 * This returns a ptr to the buffer containing the on-disk dquot
566 * in the bpp param, and a ptr to the on-disk dquot within that buffer
572 xfs_disk_dquot_t
**O_ddpp
,
576 struct xfs_bmbt_irec map
;
577 int nmaps
= 1, error
;
579 struct xfs_inode
*quotip
= xfs_dq_to_quota_inode(dqp
);
580 struct xfs_mount
*mp
= dqp
->q_mount
;
581 xfs_dqid_t id
= be32_to_cpu(dqp
->q_core
.d_id
);
582 struct xfs_trans
*tp
= (tpp
? *tpp
: NULL
);
584 dqp
->q_fileoffset
= (xfs_fileoff_t
)id
/ mp
->m_quotainfo
->qi_dqperchunk
;
586 xfs_ilock(quotip
, XFS_ILOCK_SHARED
);
587 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
589 * Return if this type of quotas is turned off while we
590 * didn't have the quota inode lock.
592 xfs_iunlock(quotip
, XFS_ILOCK_SHARED
);
597 * Find the block map; no allocations yet
599 error
= xfs_bmapi_read(quotip
, dqp
->q_fileoffset
,
600 XFS_DQUOT_CLUSTER_SIZE_FSB
, &map
, &nmaps
, 0);
602 xfs_iunlock(quotip
, XFS_ILOCK_SHARED
);
607 ASSERT(map
.br_blockcount
== 1);
610 * Offset of dquot in the (fixed sized) dquot chunk.
612 dqp
->q_bufoffset
= (id
% mp
->m_quotainfo
->qi_dqperchunk
) *
615 ASSERT(map
.br_startblock
!= DELAYSTARTBLOCK
);
616 if (map
.br_startblock
== HOLESTARTBLOCK
) {
618 * We don't allocate unless we're asked to
620 if (!(flags
& XFS_QMOPT_DQALLOC
))
624 error
= xfs_qm_dqalloc(tpp
, mp
, dqp
, quotip
,
625 dqp
->q_fileoffset
, &bp
);
630 trace_xfs_dqtobp_read(dqp
);
633 * store the blkno etc so that we don't have to do the
634 * mapping all the time
636 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
638 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
640 mp
->m_quotainfo
->qi_dqchunklen
,
641 0, &bp
, &xfs_dquot_buf_ops
);
643 if (error
== EFSCORRUPTED
&& (flags
& XFS_QMOPT_DQREPAIR
)) {
644 xfs_dqid_t firstid
= (xfs_dqid_t
)map
.br_startoff
*
645 mp
->m_quotainfo
->qi_dqperchunk
;
647 error
= xfs_qm_dqrepair(mp
, tp
, dqp
, firstid
, &bp
);
652 return XFS_ERROR(error
);
656 ASSERT(xfs_buf_islocked(bp
));
658 *O_ddpp
= bp
->b_addr
+ dqp
->q_bufoffset
;
665 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
666 * and release the buffer immediately.
668 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
672 struct xfs_mount
*mp
,
676 struct xfs_dquot
**O_dqpp
)
678 struct xfs_dquot
*dqp
;
679 struct xfs_disk_dquot
*ddqp
;
681 struct xfs_trans
*tp
= NULL
;
686 dqp
= kmem_zone_zalloc(xfs_qm_dqzone
, KM_SLEEP
);
688 dqp
->dq_flags
= type
;
689 dqp
->q_core
.d_id
= cpu_to_be32(id
);
691 INIT_LIST_HEAD(&dqp
->q_lru
);
692 mutex_init(&dqp
->q_qlock
);
693 init_waitqueue_head(&dqp
->q_pinwait
);
696 * Because we want to use a counting completion, complete
697 * the flush completion once to allow a single access to
698 * the flush completion without blocking.
700 init_completion(&dqp
->q_flush
);
701 complete(&dqp
->q_flush
);
704 * Make sure group quotas have a different lock class than user
709 /* uses the default lock class */
712 lockdep_set_class(&dqp
->q_qlock
, &xfs_dquot_group_class
);
715 lockdep_set_class(&dqp
->q_qlock
, &xfs_dquot_project_class
);
722 XFS_STATS_INC(xs_qm_dquot
);
724 trace_xfs_dqread(dqp
);
726 if (flags
& XFS_QMOPT_DQALLOC
) {
727 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_DQALLOC
);
728 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_attrsetm
,
729 XFS_QM_DQALLOC_SPACE_RES(mp
), 0);
732 cancelflags
= XFS_TRANS_RELEASE_LOG_RES
;
736 * get a pointer to the on-disk dquot and the buffer containing it
737 * dqp already knows its own type (GROUP/USER).
739 error
= xfs_qm_dqtobp(&tp
, dqp
, &ddqp
, &bp
, flags
);
742 * This can happen if quotas got turned off (ESRCH),
743 * or if the dquot didn't exist on disk and we ask to
746 trace_xfs_dqread_fail(dqp
);
747 cancelflags
|= XFS_TRANS_ABORT
;
751 /* copy everything from disk dquot to the incore dquot */
752 memcpy(&dqp
->q_core
, ddqp
, sizeof(xfs_disk_dquot_t
));
753 xfs_qm_dquot_logitem_init(dqp
);
756 * Reservation counters are defined as reservation plus current usage
757 * to avoid having to add every time.
759 dqp
->q_res_bcount
= be64_to_cpu(ddqp
->d_bcount
);
760 dqp
->q_res_icount
= be64_to_cpu(ddqp
->d_icount
);
761 dqp
->q_res_rtbcount
= be64_to_cpu(ddqp
->d_rtbcount
);
763 /* initialize the dquot speculative prealloc thresholds */
764 xfs_dquot_set_prealloc_limits(dqp
);
766 /* Mark the buf so that this will stay incore a little longer */
767 xfs_buf_set_ref(bp
, XFS_DQUOT_REF
);
770 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
771 * So we need to release with xfs_trans_brelse().
772 * The strategy here is identical to that of inodes; we lock
773 * the dquot in xfs_qm_dqget() before making it accessible to
774 * others. This is because dquots, like inodes, need a good level of
775 * concurrency, and we don't want to take locks on the entire buffers
776 * for dquot accesses.
777 * Note also that the dquot buffer may even be dirty at this point, if
778 * this particular dquot was repaired. We still aren't afraid to
779 * brelse it because we have the changes incore.
781 ASSERT(xfs_buf_islocked(bp
));
782 xfs_trans_brelse(tp
, bp
);
785 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
795 xfs_trans_cancel(tp
, cancelflags
);
797 xfs_qm_dqdestroy(dqp
);
803 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
804 * a locked dquot, doing an allocation (if requested) as needed.
805 * When both an inode and an id are given, the inode's id takes precedence.
806 * That is, if the id changes while we don't hold the ilock inside this
807 * function, the new dquot is returned, not necessarily the one requested
808 * in the id argument.
813 xfs_inode_t
*ip
, /* locked inode (optional) */
814 xfs_dqid_t id
, /* uid/projid/gid depending on type */
815 uint type
, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
816 uint flags
, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
817 xfs_dquot_t
**O_dqpp
) /* OUT : locked incore dquot */
819 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
820 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
821 struct xfs_dquot
*dqp
;
824 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
825 if ((! XFS_IS_UQUOTA_ON(mp
) && type
== XFS_DQ_USER
) ||
826 (! XFS_IS_PQUOTA_ON(mp
) && type
== XFS_DQ_PROJ
) ||
827 (! XFS_IS_GQUOTA_ON(mp
) && type
== XFS_DQ_GROUP
)) {
832 if (xfs_do_dqerror
) {
833 if ((xfs_dqerror_target
== mp
->m_ddev_targp
) &&
834 (xfs_dqreq_num
++ % xfs_dqerror_mod
) == 0) {
835 xfs_debug(mp
, "Returning error in dqget");
840 ASSERT(type
== XFS_DQ_USER
||
841 type
== XFS_DQ_PROJ
||
842 type
== XFS_DQ_GROUP
);
844 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
845 ASSERT(xfs_inode_dquot(ip
, type
) == NULL
);
850 mutex_lock(&qi
->qi_tree_lock
);
851 dqp
= radix_tree_lookup(tree
, id
);
854 if (dqp
->dq_flags
& XFS_DQ_FREEING
) {
856 mutex_unlock(&qi
->qi_tree_lock
);
857 trace_xfs_dqget_freeing(dqp
);
863 mutex_unlock(&qi
->qi_tree_lock
);
865 trace_xfs_dqget_hit(dqp
);
866 XFS_STATS_INC(xs_qm_dqcachehits
);
870 mutex_unlock(&qi
->qi_tree_lock
);
871 XFS_STATS_INC(xs_qm_dqcachemisses
);
874 * Dquot cache miss. We don't want to keep the inode lock across
875 * a (potential) disk read. Also we don't want to deal with the lock
876 * ordering between quotainode and this inode. OTOH, dropping the inode
877 * lock here means dealing with a chown that can happen before
878 * we re-acquire the lock.
881 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
883 error
= xfs_qm_dqread(mp
, id
, type
, flags
, &dqp
);
886 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
893 * A dquot could be attached to this inode by now, since
894 * we had dropped the ilock.
896 if (xfs_this_quota_on(mp
, type
)) {
897 struct xfs_dquot
*dqp1
;
899 dqp1
= xfs_inode_dquot(ip
, type
);
901 xfs_qm_dqdestroy(dqp
);
907 /* inode stays locked on return */
908 xfs_qm_dqdestroy(dqp
);
909 return XFS_ERROR(ESRCH
);
913 mutex_lock(&qi
->qi_tree_lock
);
914 error
= -radix_tree_insert(tree
, id
, dqp
);
915 if (unlikely(error
)) {
916 WARN_ON(error
!= EEXIST
);
919 * Duplicate found. Just throw away the new dquot and start
922 mutex_unlock(&qi
->qi_tree_lock
);
923 trace_xfs_dqget_dup(dqp
);
924 xfs_qm_dqdestroy(dqp
);
925 XFS_STATS_INC(xs_qm_dquot_dups
);
930 * We return a locked dquot to the caller, with a reference taken
936 mutex_unlock(&qi
->qi_tree_lock
);
939 ASSERT((ip
== NULL
) || xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
940 trace_xfs_dqget_miss(dqp
);
948 struct xfs_dquot
*dqp
)
950 struct xfs_quotainfo
*qi
= dqp
->q_mount
->m_quotainfo
;
951 struct xfs_dquot
*gdqp
;
952 struct xfs_dquot
*pdqp
;
954 trace_xfs_dqput_free(dqp
);
956 if (list_lru_add(&qi
->qi_lru
, &dqp
->q_lru
))
957 XFS_STATS_INC(xs_qm_dquot_unused
);
960 * If we just added a udquot to the freelist, then we want to release
961 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
962 * keep the gdquot/pdquot from getting reclaimed.
964 gdqp
= dqp
->q_gdquot
;
967 dqp
->q_gdquot
= NULL
;
970 pdqp
= dqp
->q_pdquot
;
973 dqp
->q_pdquot
= NULL
;
978 * If we had a group/project quota hint, release it now.
987 * Release a reference to the dquot (decrement ref-count) and unlock it.
989 * If there is a group quota attached to this dquot, carefully release that
990 * too without tripping over deadlocks'n'stuff.
994 struct xfs_dquot
*dqp
)
996 ASSERT(dqp
->q_nrefs
> 0);
997 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
999 trace_xfs_dqput(dqp
);
1001 if (--dqp
->q_nrefs
> 0)
1004 xfs_qm_dqput_final(dqp
);
1008 * Release a dquot. Flush it if dirty, then dqput() it.
1009 * dquot must not be locked.
1018 trace_xfs_dqrele(dqp
);
1022 * We don't care to flush it if the dquot is dirty here.
1023 * That will create stutters that we want to avoid.
1024 * Instead we do a delayed write when we try to reclaim
1025 * a dirty dquot. Also xfs_sync will take part of the burden...
1031 * This is the dquot flushing I/O completion routine. It is called
1032 * from interrupt level when the buffer containing the dquot is
1033 * flushed to disk. It is responsible for removing the dquot logitem
1034 * from the AIL if it has not been re-logged, and unlocking the dquot's
1035 * flush lock. This behavior is very similar to that of inodes..
1038 xfs_qm_dqflush_done(
1040 struct xfs_log_item
*lip
)
1042 xfs_dq_logitem_t
*qip
= (struct xfs_dq_logitem
*)lip
;
1043 xfs_dquot_t
*dqp
= qip
->qli_dquot
;
1044 struct xfs_ail
*ailp
= lip
->li_ailp
;
1047 * We only want to pull the item from the AIL if its
1048 * location in the log has not changed since we started the flush.
1049 * Thus, we only bother if the dquot's lsn has
1050 * not changed. First we check the lsn outside the lock
1051 * since it's cheaper, and then we recheck while
1052 * holding the lock before removing the dquot from the AIL.
1054 if ((lip
->li_flags
& XFS_LI_IN_AIL
) &&
1055 lip
->li_lsn
== qip
->qli_flush_lsn
) {
1057 /* xfs_trans_ail_delete() drops the AIL lock. */
1058 spin_lock(&ailp
->xa_lock
);
1059 if (lip
->li_lsn
== qip
->qli_flush_lsn
)
1060 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_CORRUPT_INCORE
);
1062 spin_unlock(&ailp
->xa_lock
);
1066 * Release the dq's flush lock since we're done with it.
1072 * Write a modified dquot to disk.
1073 * The dquot must be locked and the flush lock too taken by caller.
1074 * The flush lock will not be unlocked until the dquot reaches the disk,
1075 * but the dquot is free to be unlocked and modified by the caller
1076 * in the interim. Dquot is still locked on return. This behavior is
1077 * identical to that of inodes.
1081 struct xfs_dquot
*dqp
,
1082 struct xfs_buf
**bpp
)
1084 struct xfs_mount
*mp
= dqp
->q_mount
;
1086 struct xfs_disk_dquot
*ddqp
;
1089 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
1090 ASSERT(!completion_done(&dqp
->q_flush
));
1092 trace_xfs_dqflush(dqp
);
1096 xfs_qm_dqunpin_wait(dqp
);
1099 * This may have been unpinned because the filesystem is shutting
1100 * down forcibly. If that's the case we must not write this dquot
1101 * to disk, because the log record didn't make it to disk.
1103 * We also have to remove the log item from the AIL in this case,
1104 * as we wait for an emptry AIL as part of the unmount process.
1106 if (XFS_FORCED_SHUTDOWN(mp
)) {
1107 struct xfs_log_item
*lip
= &dqp
->q_logitem
.qli_item
;
1108 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
1110 spin_lock(&mp
->m_ail
->xa_lock
);
1111 if (lip
->li_flags
& XFS_LI_IN_AIL
)
1112 xfs_trans_ail_delete(mp
->m_ail
, lip
,
1113 SHUTDOWN_CORRUPT_INCORE
);
1115 spin_unlock(&mp
->m_ail
->xa_lock
);
1116 error
= XFS_ERROR(EIO
);
1121 * Get the buffer containing the on-disk dquot
1123 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dqp
->q_blkno
,
1124 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
, NULL
);
1129 * Calculate the location of the dquot inside the buffer.
1131 ddqp
= bp
->b_addr
+ dqp
->q_bufoffset
;
1134 * A simple sanity check in case we got a corrupted dquot..
1136 error
= xfs_qm_dqcheck(mp
, &dqp
->q_core
, be32_to_cpu(ddqp
->d_id
), 0,
1137 XFS_QMOPT_DOWARN
, "dqflush (incore copy)");
1141 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1142 return XFS_ERROR(EIO
);
1145 /* This is the only portion of data that needs to persist */
1146 memcpy(ddqp
, &dqp
->q_core
, sizeof(xfs_disk_dquot_t
));
1149 * Clear the dirty field and remember the flush lsn for later use.
1151 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
1153 xfs_trans_ail_copy_lsn(mp
->m_ail
, &dqp
->q_logitem
.qli_flush_lsn
,
1154 &dqp
->q_logitem
.qli_item
.li_lsn
);
1157 * copy the lsn into the on-disk dquot now while we have the in memory
1158 * dquot here. This can't be done later in the write verifier as we
1159 * can't get access to the log item at that point in time.
1161 * We also calculate the CRC here so that the on-disk dquot in the
1162 * buffer always has a valid CRC. This ensures there is no possibility
1163 * of a dquot without an up-to-date CRC getting to disk.
1165 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
1166 struct xfs_dqblk
*dqb
= (struct xfs_dqblk
*)ddqp
;
1168 dqb
->dd_lsn
= cpu_to_be64(dqp
->q_logitem
.qli_item
.li_lsn
);
1169 xfs_update_cksum((char *)dqb
, sizeof(struct xfs_dqblk
),
1174 * Attach an iodone routine so that we can remove this dquot from the
1175 * AIL and release the flush lock once the dquot is synced to disk.
1177 xfs_buf_attach_iodone(bp
, xfs_qm_dqflush_done
,
1178 &dqp
->q_logitem
.qli_item
);
1181 * If the buffer is pinned then push on the log so we won't
1182 * get stuck waiting in the write for too long.
1184 if (xfs_buf_ispinned(bp
)) {
1185 trace_xfs_dqflush_force(dqp
);
1186 xfs_log_force(mp
, 0);
1189 trace_xfs_dqflush_done(dqp
);
1195 return XFS_ERROR(EIO
);
1199 * Lock two xfs_dquot structures.
1201 * To avoid deadlocks we always lock the quota structure with
1202 * the lowerd id first.
1211 if (be32_to_cpu(d1
->q_core
.d_id
) >
1212 be32_to_cpu(d2
->q_core
.d_id
)) {
1213 mutex_lock(&d2
->q_qlock
);
1214 mutex_lock_nested(&d1
->q_qlock
, XFS_QLOCK_NESTED
);
1216 mutex_lock(&d1
->q_qlock
);
1217 mutex_lock_nested(&d2
->q_qlock
, XFS_QLOCK_NESTED
);
1220 mutex_lock(&d1
->q_qlock
);
1222 mutex_lock(&d2
->q_qlock
);
1230 kmem_zone_init(sizeof(struct xfs_dquot
), "xfs_dquot");
1235 kmem_zone_init(sizeof(struct xfs_dquot_acct
), "xfs_dqtrx");
1236 if (!xfs_qm_dqtrxzone
)
1237 goto out_free_dqzone
;
1242 kmem_zone_destroy(xfs_qm_dqzone
);
1250 kmem_zone_destroy(xfs_qm_dqtrxzone
);
1251 kmem_zone_destroy(xfs_qm_dqzone
);