memcg: fix Bad page state after replace_page_cache
[zen-stable.git] / fs / xfs / xfs_dquot.c
blob53db20ee3e774fab3f643f8c280e018086dffa10
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_bmap.h"
32 #include "xfs_rtalloc.h"
33 #include "xfs_error.h"
34 #include "xfs_itable.h"
35 #include "xfs_attr.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_trans_space.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_qm.h"
40 #include "xfs_trace.h"
43 * Lock order:
45 * ip->i_lock
46 * qh->qh_lock
47 * qi->qi_dqlist_lock
48 * dquot->q_qlock (xfs_dqlock() and friends)
49 * dquot->q_flush (xfs_dqflock() and friends)
50 * xfs_Gqm->qm_dqfrlist_lock
52 * If two dquots need to be locked the order is user before group/project,
53 * otherwise by the lowest id first, see xfs_dqlock2.
56 #ifdef DEBUG
57 xfs_buftarg_t *xfs_dqerror_target;
58 int xfs_do_dqerror;
59 int xfs_dqreq_num;
60 int xfs_dqerror_mod = 33;
61 #endif
63 static struct lock_class_key xfs_dquot_other_class;
66 * This is called to free all the memory associated with a dquot
68 void
69 xfs_qm_dqdestroy(
70 xfs_dquot_t *dqp)
72 ASSERT(list_empty(&dqp->q_freelist));
74 mutex_destroy(&dqp->q_qlock);
75 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
77 atomic_dec(&xfs_Gqm->qm_totaldquots);
81 * If default limits are in force, push them into the dquot now.
82 * We overwrite the dquot limits only if they are zero and this
83 * is not the root dquot.
85 void
86 xfs_qm_adjust_dqlimits(
87 xfs_mount_t *mp,
88 xfs_disk_dquot_t *d)
90 xfs_quotainfo_t *q = mp->m_quotainfo;
92 ASSERT(d->d_id);
94 if (q->qi_bsoftlimit && !d->d_blk_softlimit)
95 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
96 if (q->qi_bhardlimit && !d->d_blk_hardlimit)
97 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
98 if (q->qi_isoftlimit && !d->d_ino_softlimit)
99 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
100 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
101 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
102 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
103 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
104 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
105 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
109 * Check the limits and timers of a dquot and start or reset timers
110 * if necessary.
111 * This gets called even when quota enforcement is OFF, which makes our
112 * life a little less complicated. (We just don't reject any quota
113 * reservations in that case, when enforcement is off).
114 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
115 * enforcement's off.
116 * In contrast, warnings are a little different in that they don't
117 * 'automatically' get started when limits get exceeded. They do
118 * get reset to zero, however, when we find the count to be under
119 * the soft limit (they are only ever set non-zero via userspace).
121 void
122 xfs_qm_adjust_dqtimers(
123 xfs_mount_t *mp,
124 xfs_disk_dquot_t *d)
126 ASSERT(d->d_id);
128 #ifdef DEBUG
129 if (d->d_blk_hardlimit)
130 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
131 be64_to_cpu(d->d_blk_hardlimit));
132 if (d->d_ino_hardlimit)
133 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
134 be64_to_cpu(d->d_ino_hardlimit));
135 if (d->d_rtb_hardlimit)
136 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
137 be64_to_cpu(d->d_rtb_hardlimit));
138 #endif
140 if (!d->d_btimer) {
141 if ((d->d_blk_softlimit &&
142 (be64_to_cpu(d->d_bcount) >
143 be64_to_cpu(d->d_blk_softlimit))) ||
144 (d->d_blk_hardlimit &&
145 (be64_to_cpu(d->d_bcount) >
146 be64_to_cpu(d->d_blk_hardlimit)))) {
147 d->d_btimer = cpu_to_be32(get_seconds() +
148 mp->m_quotainfo->qi_btimelimit);
149 } else {
150 d->d_bwarns = 0;
152 } else {
153 if ((!d->d_blk_softlimit ||
154 (be64_to_cpu(d->d_bcount) <=
155 be64_to_cpu(d->d_blk_softlimit))) &&
156 (!d->d_blk_hardlimit ||
157 (be64_to_cpu(d->d_bcount) <=
158 be64_to_cpu(d->d_blk_hardlimit)))) {
159 d->d_btimer = 0;
163 if (!d->d_itimer) {
164 if ((d->d_ino_softlimit &&
165 (be64_to_cpu(d->d_icount) >
166 be64_to_cpu(d->d_ino_softlimit))) ||
167 (d->d_ino_hardlimit &&
168 (be64_to_cpu(d->d_icount) >
169 be64_to_cpu(d->d_ino_hardlimit)))) {
170 d->d_itimer = cpu_to_be32(get_seconds() +
171 mp->m_quotainfo->qi_itimelimit);
172 } else {
173 d->d_iwarns = 0;
175 } else {
176 if ((!d->d_ino_softlimit ||
177 (be64_to_cpu(d->d_icount) <=
178 be64_to_cpu(d->d_ino_softlimit))) &&
179 (!d->d_ino_hardlimit ||
180 (be64_to_cpu(d->d_icount) <=
181 be64_to_cpu(d->d_ino_hardlimit)))) {
182 d->d_itimer = 0;
186 if (!d->d_rtbtimer) {
187 if ((d->d_rtb_softlimit &&
188 (be64_to_cpu(d->d_rtbcount) >
189 be64_to_cpu(d->d_rtb_softlimit))) ||
190 (d->d_rtb_hardlimit &&
191 (be64_to_cpu(d->d_rtbcount) >
192 be64_to_cpu(d->d_rtb_hardlimit)))) {
193 d->d_rtbtimer = cpu_to_be32(get_seconds() +
194 mp->m_quotainfo->qi_rtbtimelimit);
195 } else {
196 d->d_rtbwarns = 0;
198 } else {
199 if ((!d->d_rtb_softlimit ||
200 (be64_to_cpu(d->d_rtbcount) <=
201 be64_to_cpu(d->d_rtb_softlimit))) &&
202 (!d->d_rtb_hardlimit ||
203 (be64_to_cpu(d->d_rtbcount) <=
204 be64_to_cpu(d->d_rtb_hardlimit)))) {
205 d->d_rtbtimer = 0;
211 * initialize a buffer full of dquots and log the whole thing
213 STATIC void
214 xfs_qm_init_dquot_blk(
215 xfs_trans_t *tp,
216 xfs_mount_t *mp,
217 xfs_dqid_t id,
218 uint type,
219 xfs_buf_t *bp)
221 struct xfs_quotainfo *q = mp->m_quotainfo;
222 xfs_dqblk_t *d;
223 int curid, i;
225 ASSERT(tp);
226 ASSERT(xfs_buf_islocked(bp));
228 d = bp->b_addr;
231 * ID of the first dquot in the block - id's are zero based.
233 curid = id - (id % q->qi_dqperchunk);
234 ASSERT(curid >= 0);
235 memset(d, 0, BBTOB(q->qi_dqchunklen));
236 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
237 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
238 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
239 d->dd_diskdq.d_id = cpu_to_be32(curid);
240 d->dd_diskdq.d_flags = type;
243 xfs_trans_dquot_buf(tp, bp,
244 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
245 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
246 XFS_BLF_GDQUOT_BUF)));
247 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
253 * Allocate a block and fill it with dquots.
254 * This is called when the bmapi finds a hole.
256 STATIC int
257 xfs_qm_dqalloc(
258 xfs_trans_t **tpp,
259 xfs_mount_t *mp,
260 xfs_dquot_t *dqp,
261 xfs_inode_t *quotip,
262 xfs_fileoff_t offset_fsb,
263 xfs_buf_t **O_bpp)
265 xfs_fsblock_t firstblock;
266 xfs_bmap_free_t flist;
267 xfs_bmbt_irec_t map;
268 int nmaps, error, committed;
269 xfs_buf_t *bp;
270 xfs_trans_t *tp = *tpp;
272 ASSERT(tp != NULL);
274 trace_xfs_dqalloc(dqp);
277 * Initialize the bmap freelist prior to calling bmapi code.
279 xfs_bmap_init(&flist, &firstblock);
280 xfs_ilock(quotip, XFS_ILOCK_EXCL);
282 * Return if this type of quotas is turned off while we didn't
283 * have an inode lock
285 if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
286 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
287 return (ESRCH);
290 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
291 nmaps = 1;
292 error = xfs_bmapi_write(tp, quotip, offset_fsb,
293 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
294 &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
295 &map, &nmaps, &flist);
296 if (error)
297 goto error0;
298 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
299 ASSERT(nmaps == 1);
300 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
301 (map.br_startblock != HOLESTARTBLOCK));
304 * Keep track of the blkno to save a lookup later
306 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
308 /* now we can just get the buffer (there's nothing to read yet) */
309 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
310 dqp->q_blkno,
311 mp->m_quotainfo->qi_dqchunklen,
314 error = xfs_buf_geterror(bp);
315 if (error)
316 goto error1;
319 * Make a chunk of dquots out of this buffer and log
320 * the entire thing.
322 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
323 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
326 * xfs_bmap_finish() may commit the current transaction and
327 * start a second transaction if the freelist is not empty.
329 * Since we still want to modify this buffer, we need to
330 * ensure that the buffer is not released on commit of
331 * the first transaction and ensure the buffer is added to the
332 * second transaction.
334 * If there is only one transaction then don't stop the buffer
335 * from being released when it commits later on.
338 xfs_trans_bhold(tp, bp);
340 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
341 goto error1;
344 if (committed) {
345 tp = *tpp;
346 xfs_trans_bjoin(tp, bp);
347 } else {
348 xfs_trans_bhold_release(tp, bp);
351 *O_bpp = bp;
352 return 0;
354 error1:
355 xfs_bmap_cancel(&flist);
356 error0:
357 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
359 return (error);
363 * Maps a dquot to the buffer containing its on-disk version.
364 * This returns a ptr to the buffer containing the on-disk dquot
365 * in the bpp param, and a ptr to the on-disk dquot within that buffer
367 STATIC int
368 xfs_qm_dqtobp(
369 xfs_trans_t **tpp,
370 xfs_dquot_t *dqp,
371 xfs_disk_dquot_t **O_ddpp,
372 xfs_buf_t **O_bpp,
373 uint flags)
375 xfs_bmbt_irec_t map;
376 int nmaps = 1, error;
377 xfs_buf_t *bp;
378 xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
379 xfs_mount_t *mp = dqp->q_mount;
380 xfs_disk_dquot_t *ddq;
381 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
382 xfs_trans_t *tp = (tpp ? *tpp : NULL);
384 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
386 xfs_ilock(quotip, XFS_ILOCK_SHARED);
387 if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
389 * Return if this type of quotas is turned off while we
390 * didn't have the quota inode lock.
392 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
393 return ESRCH;
397 * Find the block map; no allocations yet
399 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
400 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
402 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
403 if (error)
404 return error;
406 ASSERT(nmaps == 1);
407 ASSERT(map.br_blockcount == 1);
410 * Offset of dquot in the (fixed sized) dquot chunk.
412 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
413 sizeof(xfs_dqblk_t);
415 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
416 if (map.br_startblock == HOLESTARTBLOCK) {
418 * We don't allocate unless we're asked to
420 if (!(flags & XFS_QMOPT_DQALLOC))
421 return ENOENT;
423 ASSERT(tp);
424 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
425 dqp->q_fileoffset, &bp);
426 if (error)
427 return error;
428 tp = *tpp;
429 } else {
430 trace_xfs_dqtobp_read(dqp);
433 * store the blkno etc so that we don't have to do the
434 * mapping all the time
436 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
438 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
439 dqp->q_blkno,
440 mp->m_quotainfo->qi_dqchunklen,
441 0, &bp);
442 if (error || !bp)
443 return XFS_ERROR(error);
446 ASSERT(xfs_buf_islocked(bp));
449 * calculate the location of the dquot inside the buffer.
451 ddq = bp->b_addr + dqp->q_bufoffset;
454 * A simple sanity check in case we got a corrupted dquot...
456 error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
457 flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
458 "dqtobp");
459 if (error) {
460 if (!(flags & XFS_QMOPT_DQREPAIR)) {
461 xfs_trans_brelse(tp, bp);
462 return XFS_ERROR(EIO);
466 *O_bpp = bp;
467 *O_ddpp = ddq;
469 return (0);
474 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
475 * and release the buffer immediately.
477 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
480 xfs_qm_dqread(
481 struct xfs_mount *mp,
482 xfs_dqid_t id,
483 uint type,
484 uint flags,
485 struct xfs_dquot **O_dqpp)
487 struct xfs_dquot *dqp;
488 struct xfs_disk_dquot *ddqp;
489 struct xfs_buf *bp;
490 struct xfs_trans *tp = NULL;
491 int error;
492 int cancelflags = 0;
495 dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
497 dqp->dq_flags = type;
498 dqp->q_core.d_id = cpu_to_be32(id);
499 dqp->q_mount = mp;
500 INIT_LIST_HEAD(&dqp->q_freelist);
501 mutex_init(&dqp->q_qlock);
502 init_waitqueue_head(&dqp->q_pinwait);
505 * Because we want to use a counting completion, complete
506 * the flush completion once to allow a single access to
507 * the flush completion without blocking.
509 init_completion(&dqp->q_flush);
510 complete(&dqp->q_flush);
513 * Make sure group quotas have a different lock class than user
514 * quotas.
516 if (!(type & XFS_DQ_USER))
517 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
519 atomic_inc(&xfs_Gqm->qm_totaldquots);
521 trace_xfs_dqread(dqp);
523 if (flags & XFS_QMOPT_DQALLOC) {
524 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
525 error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
526 XFS_WRITE_LOG_RES(mp) +
528 * Round the chunklen up to the next multiple
529 * of 128 (buf log item chunk size)).
531 BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
533 XFS_TRANS_PERM_LOG_RES,
534 XFS_WRITE_LOG_COUNT);
535 if (error)
536 goto error1;
537 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
541 * get a pointer to the on-disk dquot and the buffer containing it
542 * dqp already knows its own type (GROUP/USER).
544 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
545 if (error) {
547 * This can happen if quotas got turned off (ESRCH),
548 * or if the dquot didn't exist on disk and we ask to
549 * allocate (ENOENT).
551 trace_xfs_dqread_fail(dqp);
552 cancelflags |= XFS_TRANS_ABORT;
553 goto error1;
556 /* copy everything from disk dquot to the incore dquot */
557 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
558 xfs_qm_dquot_logitem_init(dqp);
561 * Reservation counters are defined as reservation plus current usage
562 * to avoid having to add every time.
564 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
565 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
566 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
568 /* Mark the buf so that this will stay incore a little longer */
569 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
572 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
573 * So we need to release with xfs_trans_brelse().
574 * The strategy here is identical to that of inodes; we lock
575 * the dquot in xfs_qm_dqget() before making it accessible to
576 * others. This is because dquots, like inodes, need a good level of
577 * concurrency, and we don't want to take locks on the entire buffers
578 * for dquot accesses.
579 * Note also that the dquot buffer may even be dirty at this point, if
580 * this particular dquot was repaired. We still aren't afraid to
581 * brelse it because we have the changes incore.
583 ASSERT(xfs_buf_islocked(bp));
584 xfs_trans_brelse(tp, bp);
586 if (tp) {
587 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
588 if (error)
589 goto error0;
592 *O_dqpp = dqp;
593 return error;
595 error1:
596 if (tp)
597 xfs_trans_cancel(tp, cancelflags);
598 error0:
599 xfs_qm_dqdestroy(dqp);
600 *O_dqpp = NULL;
601 return error;
605 * Lookup a dquot in the incore dquot hashtable. We keep two separate
606 * hashtables for user and group dquots; and, these are global tables
607 * inside the XQM, not per-filesystem tables.
608 * The hash chain must be locked by caller, and it is left locked
609 * on return. Returning dquot is locked.
611 STATIC int
612 xfs_qm_dqlookup(
613 xfs_mount_t *mp,
614 xfs_dqid_t id,
615 xfs_dqhash_t *qh,
616 xfs_dquot_t **O_dqpp)
618 xfs_dquot_t *dqp;
620 ASSERT(mutex_is_locked(&qh->qh_lock));
623 * Traverse the hashchain looking for a match
625 list_for_each_entry(dqp, &qh->qh_list, q_hashlist) {
627 * We already have the hashlock. We don't need the
628 * dqlock to look at the id field of the dquot, since the
629 * id can't be modified without the hashlock anyway.
631 if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp)
632 continue;
634 trace_xfs_dqlookup_found(dqp);
636 xfs_dqlock(dqp);
637 if (dqp->dq_flags & XFS_DQ_FREEING) {
638 *O_dqpp = NULL;
639 xfs_dqunlock(dqp);
640 return -1;
643 dqp->q_nrefs++;
646 * move the dquot to the front of the hashchain
648 list_move(&dqp->q_hashlist, &qh->qh_list);
649 trace_xfs_dqlookup_done(dqp);
650 *O_dqpp = dqp;
651 return 0;
654 *O_dqpp = NULL;
655 return 1;
659 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
660 * a locked dquot, doing an allocation (if requested) as needed.
661 * When both an inode and an id are given, the inode's id takes precedence.
662 * That is, if the id changes while we don't hold the ilock inside this
663 * function, the new dquot is returned, not necessarily the one requested
664 * in the id argument.
667 xfs_qm_dqget(
668 xfs_mount_t *mp,
669 xfs_inode_t *ip, /* locked inode (optional) */
670 xfs_dqid_t id, /* uid/projid/gid depending on type */
671 uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
672 uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
673 xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
675 xfs_dquot_t *dqp;
676 xfs_dqhash_t *h;
677 uint version;
678 int error;
680 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
681 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
682 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
683 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
684 return (ESRCH);
686 h = XFS_DQ_HASH(mp, id, type);
688 #ifdef DEBUG
689 if (xfs_do_dqerror) {
690 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
691 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
692 xfs_debug(mp, "Returning error in dqget");
693 return (EIO);
697 ASSERT(type == XFS_DQ_USER ||
698 type == XFS_DQ_PROJ ||
699 type == XFS_DQ_GROUP);
700 if (ip) {
701 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
702 if (type == XFS_DQ_USER)
703 ASSERT(ip->i_udquot == NULL);
704 else
705 ASSERT(ip->i_gdquot == NULL);
707 #endif
709 restart:
710 mutex_lock(&h->qh_lock);
713 * Look in the cache (hashtable).
714 * The chain is kept locked during lookup.
716 switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) {
717 case -1:
718 XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
719 mutex_unlock(&h->qh_lock);
720 delay(1);
721 goto restart;
722 case 0:
723 XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
725 * The dquot was found, moved to the front of the chain,
726 * taken off the freelist if it was on it, and locked
727 * at this point. Just unlock the hashchain and return.
729 ASSERT(*O_dqpp);
730 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
731 mutex_unlock(&h->qh_lock);
732 trace_xfs_dqget_hit(*O_dqpp);
733 return 0; /* success */
734 default:
735 XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
736 break;
740 * Dquot cache miss. We don't want to keep the inode lock across
741 * a (potential) disk read. Also we don't want to deal with the lock
742 * ordering between quotainode and this inode. OTOH, dropping the inode
743 * lock here means dealing with a chown that can happen before
744 * we re-acquire the lock.
746 if (ip)
747 xfs_iunlock(ip, XFS_ILOCK_EXCL);
749 * Save the hashchain version stamp, and unlock the chain, so that
750 * we don't keep the lock across a disk read
752 version = h->qh_version;
753 mutex_unlock(&h->qh_lock);
755 error = xfs_qm_dqread(mp, id, type, flags, &dqp);
757 if (ip)
758 xfs_ilock(ip, XFS_ILOCK_EXCL);
760 if (error)
761 return error;
764 * Dquot lock comes after hashlock in the lock ordering
766 if (ip) {
768 * A dquot could be attached to this inode by now, since
769 * we had dropped the ilock.
771 if (type == XFS_DQ_USER) {
772 if (!XFS_IS_UQUOTA_ON(mp)) {
773 /* inode stays locked on return */
774 xfs_qm_dqdestroy(dqp);
775 return XFS_ERROR(ESRCH);
777 if (ip->i_udquot) {
778 xfs_qm_dqdestroy(dqp);
779 dqp = ip->i_udquot;
780 xfs_dqlock(dqp);
781 goto dqret;
783 } else {
784 if (!XFS_IS_OQUOTA_ON(mp)) {
785 /* inode stays locked on return */
786 xfs_qm_dqdestroy(dqp);
787 return XFS_ERROR(ESRCH);
789 if (ip->i_gdquot) {
790 xfs_qm_dqdestroy(dqp);
791 dqp = ip->i_gdquot;
792 xfs_dqlock(dqp);
793 goto dqret;
799 * Hashlock comes after ilock in lock order
801 mutex_lock(&h->qh_lock);
802 if (version != h->qh_version) {
803 xfs_dquot_t *tmpdqp;
805 * Now, see if somebody else put the dquot in the
806 * hashtable before us. This can happen because we didn't
807 * keep the hashchain lock. We don't have to worry about
808 * lock order between the two dquots here since dqp isn't
809 * on any findable lists yet.
811 switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) {
812 case 0:
813 case -1:
815 * Duplicate found, either in cache or on its way out.
816 * Just throw away the new dquot and start over.
818 if (tmpdqp)
819 xfs_qm_dqput(tmpdqp);
820 mutex_unlock(&h->qh_lock);
821 xfs_qm_dqdestroy(dqp);
822 XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
823 goto restart;
824 default:
825 break;
830 * Put the dquot at the beginning of the hash-chain and mp's list
831 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
833 ASSERT(mutex_is_locked(&h->qh_lock));
834 dqp->q_hash = h;
835 list_add(&dqp->q_hashlist, &h->qh_list);
836 h->qh_version++;
839 * Attach this dquot to this filesystem's list of all dquots,
840 * kept inside the mount structure in m_quotainfo field
842 mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
845 * We return a locked dquot to the caller, with a reference taken
847 xfs_dqlock(dqp);
848 dqp->q_nrefs = 1;
850 list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist);
851 mp->m_quotainfo->qi_dquots++;
852 mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
853 mutex_unlock(&h->qh_lock);
854 dqret:
855 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
856 trace_xfs_dqget_miss(dqp);
857 *O_dqpp = dqp;
858 return (0);
863 * Release a reference to the dquot (decrement ref-count)
864 * and unlock it. If there is a group quota attached to this
865 * dquot, carefully release that too without tripping over
866 * deadlocks'n'stuff.
868 void
869 xfs_qm_dqput(
870 struct xfs_dquot *dqp)
872 struct xfs_dquot *gdqp;
874 ASSERT(dqp->q_nrefs > 0);
875 ASSERT(XFS_DQ_IS_LOCKED(dqp));
877 trace_xfs_dqput(dqp);
879 recurse:
880 if (--dqp->q_nrefs > 0) {
881 xfs_dqunlock(dqp);
882 return;
885 trace_xfs_dqput_free(dqp);
887 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
888 if (list_empty(&dqp->q_freelist)) {
889 list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
890 xfs_Gqm->qm_dqfrlist_cnt++;
892 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
895 * If we just added a udquot to the freelist, then we want to release
896 * the gdquot reference that it (probably) has. Otherwise it'll keep
897 * the gdquot from getting reclaimed.
899 gdqp = dqp->q_gdquot;
900 if (gdqp) {
901 xfs_dqlock(gdqp);
902 dqp->q_gdquot = NULL;
904 xfs_dqunlock(dqp);
907 * If we had a group quota hint, release it now.
909 if (gdqp) {
910 dqp = gdqp;
911 goto recurse;
916 * Release a dquot. Flush it if dirty, then dqput() it.
917 * dquot must not be locked.
919 void
920 xfs_qm_dqrele(
921 xfs_dquot_t *dqp)
923 if (!dqp)
924 return;
926 trace_xfs_dqrele(dqp);
928 xfs_dqlock(dqp);
930 * We don't care to flush it if the dquot is dirty here.
931 * That will create stutters that we want to avoid.
932 * Instead we do a delayed write when we try to reclaim
933 * a dirty dquot. Also xfs_sync will take part of the burden...
935 xfs_qm_dqput(dqp);
939 * This is the dquot flushing I/O completion routine. It is called
940 * from interrupt level when the buffer containing the dquot is
941 * flushed to disk. It is responsible for removing the dquot logitem
942 * from the AIL if it has not been re-logged, and unlocking the dquot's
943 * flush lock. This behavior is very similar to that of inodes..
945 STATIC void
946 xfs_qm_dqflush_done(
947 struct xfs_buf *bp,
948 struct xfs_log_item *lip)
950 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
951 xfs_dquot_t *dqp = qip->qli_dquot;
952 struct xfs_ail *ailp = lip->li_ailp;
955 * We only want to pull the item from the AIL if its
956 * location in the log has not changed since we started the flush.
957 * Thus, we only bother if the dquot's lsn has
958 * not changed. First we check the lsn outside the lock
959 * since it's cheaper, and then we recheck while
960 * holding the lock before removing the dquot from the AIL.
962 if ((lip->li_flags & XFS_LI_IN_AIL) &&
963 lip->li_lsn == qip->qli_flush_lsn) {
965 /* xfs_trans_ail_delete() drops the AIL lock. */
966 spin_lock(&ailp->xa_lock);
967 if (lip->li_lsn == qip->qli_flush_lsn)
968 xfs_trans_ail_delete(ailp, lip);
969 else
970 spin_unlock(&ailp->xa_lock);
974 * Release the dq's flush lock since we're done with it.
976 xfs_dqfunlock(dqp);
980 * Write a modified dquot to disk.
981 * The dquot must be locked and the flush lock too taken by caller.
982 * The flush lock will not be unlocked until the dquot reaches the disk,
983 * but the dquot is free to be unlocked and modified by the caller
984 * in the interim. Dquot is still locked on return. This behavior is
985 * identical to that of inodes.
988 xfs_qm_dqflush(
989 xfs_dquot_t *dqp,
990 uint flags)
992 struct xfs_mount *mp = dqp->q_mount;
993 struct xfs_buf *bp;
994 struct xfs_disk_dquot *ddqp;
995 int error;
997 ASSERT(XFS_DQ_IS_LOCKED(dqp));
998 ASSERT(!completion_done(&dqp->q_flush));
1000 trace_xfs_dqflush(dqp);
1003 * If not dirty, or it's pinned and we are not supposed to block, nada.
1005 if (!XFS_DQ_IS_DIRTY(dqp) ||
1006 ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) {
1007 xfs_dqfunlock(dqp);
1008 return 0;
1010 xfs_qm_dqunpin_wait(dqp);
1013 * This may have been unpinned because the filesystem is shutting
1014 * down forcibly. If that's the case we must not write this dquot
1015 * to disk, because the log record didn't make it to disk!
1017 if (XFS_FORCED_SHUTDOWN(mp)) {
1018 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1019 xfs_dqfunlock(dqp);
1020 return XFS_ERROR(EIO);
1024 * Get the buffer containing the on-disk dquot
1026 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1027 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1028 if (error) {
1029 ASSERT(error != ENOENT);
1030 xfs_dqfunlock(dqp);
1031 return error;
1035 * Calculate the location of the dquot inside the buffer.
1037 ddqp = bp->b_addr + dqp->q_bufoffset;
1040 * A simple sanity check in case we got a corrupted dquot..
1042 error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1043 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1044 if (error) {
1045 xfs_buf_relse(bp);
1046 xfs_dqfunlock(dqp);
1047 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1048 return XFS_ERROR(EIO);
1051 /* This is the only portion of data that needs to persist */
1052 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1055 * Clear the dirty field and remember the flush lsn for later use.
1057 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1059 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1060 &dqp->q_logitem.qli_item.li_lsn);
1063 * Attach an iodone routine so that we can remove this dquot from the
1064 * AIL and release the flush lock once the dquot is synced to disk.
1066 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1067 &dqp->q_logitem.qli_item);
1070 * If the buffer is pinned then push on the log so we won't
1071 * get stuck waiting in the write for too long.
1073 if (xfs_buf_ispinned(bp)) {
1074 trace_xfs_dqflush_force(dqp);
1075 xfs_log_force(mp, 0);
1078 if (flags & SYNC_WAIT)
1079 error = xfs_bwrite(bp);
1080 else
1081 xfs_buf_delwri_queue(bp);
1083 xfs_buf_relse(bp);
1085 trace_xfs_dqflush_done(dqp);
1088 * dqp is still locked, but caller is free to unlock it now.
1090 return error;
1094 void
1095 xfs_dqunlock(
1096 xfs_dquot_t *dqp)
1098 xfs_dqunlock_nonotify(dqp);
1099 if (dqp->q_logitem.qli_dquot == dqp) {
1100 xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp,
1101 &dqp->q_logitem.qli_item);
1106 * Lock two xfs_dquot structures.
1108 * To avoid deadlocks we always lock the quota structure with
1109 * the lowerd id first.
1111 void
1112 xfs_dqlock2(
1113 xfs_dquot_t *d1,
1114 xfs_dquot_t *d2)
1116 if (d1 && d2) {
1117 ASSERT(d1 != d2);
1118 if (be32_to_cpu(d1->q_core.d_id) >
1119 be32_to_cpu(d2->q_core.d_id)) {
1120 mutex_lock(&d2->q_qlock);
1121 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1122 } else {
1123 mutex_lock(&d1->q_qlock);
1124 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1126 } else if (d1) {
1127 mutex_lock(&d1->q_qlock);
1128 } else if (d2) {
1129 mutex_lock(&d2->q_qlock);
1134 * Take a dquot out of the mount's dqlist as well as the hashlist. This is
1135 * called via unmount as well as quotaoff, and the purge will always succeed.
1137 void
1138 xfs_qm_dqpurge(
1139 struct xfs_dquot *dqp)
1141 struct xfs_mount *mp = dqp->q_mount;
1142 struct xfs_dqhash *qh = dqp->q_hash;
1144 xfs_dqlock(dqp);
1147 * If we're turning off quotas, we have to make sure that, for
1148 * example, we don't delete quota disk blocks while dquots are
1149 * in the process of getting written to those disk blocks.
1150 * This dquot might well be on AIL, and we can't leave it there
1151 * if we're turning off quotas. Basically, we need this flush
1152 * lock, and are willing to block on it.
1154 if (!xfs_dqflock_nowait(dqp)) {
1156 * Block on the flush lock after nudging dquot buffer,
1157 * if it is incore.
1159 xfs_dqflock_pushbuf_wait(dqp);
1163 * If we are turning this type of quotas off, we don't care
1164 * about the dirty metadata sitting in this dquot. OTOH, if
1165 * we're unmounting, we do care, so we flush it and wait.
1167 if (XFS_DQ_IS_DIRTY(dqp)) {
1168 int error;
1171 * We don't care about getting disk errors here. We need
1172 * to purge this dquot anyway, so we go ahead regardless.
1174 error = xfs_qm_dqflush(dqp, SYNC_WAIT);
1175 if (error)
1176 xfs_warn(mp, "%s: dquot %p flush failed",
1177 __func__, dqp);
1178 xfs_dqflock(dqp);
1181 ASSERT(atomic_read(&dqp->q_pincount) == 0);
1182 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1183 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
1185 xfs_dqfunlock(dqp);
1186 xfs_dqunlock(dqp);
1188 mutex_lock(&qh->qh_lock);
1189 list_del_init(&dqp->q_hashlist);
1190 qh->qh_version++;
1191 mutex_unlock(&qh->qh_lock);
1193 mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
1194 list_del_init(&dqp->q_mplist);
1195 mp->m_quotainfo->qi_dqreclaims++;
1196 mp->m_quotainfo->qi_dquots--;
1197 mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
1200 * We move dquots to the freelist as soon as their reference count
1201 * hits zero, so it really should be on the freelist here.
1203 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
1204 ASSERT(!list_empty(&dqp->q_freelist));
1205 list_del_init(&dqp->q_freelist);
1206 xfs_Gqm->qm_dqfrlist_cnt--;
1207 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1209 xfs_qm_dqdestroy(dqp);
1213 * Give the buffer a little push if it is incore and
1214 * wait on the flush lock.
1216 void
1217 xfs_dqflock_pushbuf_wait(
1218 xfs_dquot_t *dqp)
1220 xfs_mount_t *mp = dqp->q_mount;
1221 xfs_buf_t *bp;
1224 * Check to see if the dquot has been flushed delayed
1225 * write. If so, grab its buffer and send it
1226 * out immediately. We'll be able to acquire
1227 * the flush lock when the I/O completes.
1229 bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
1230 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
1231 if (!bp)
1232 goto out_lock;
1234 if (XFS_BUF_ISDELAYWRITE(bp)) {
1235 if (xfs_buf_ispinned(bp))
1236 xfs_log_force(mp, 0);
1237 xfs_buf_delwri_promote(bp);
1238 wake_up_process(bp->b_target->bt_task);
1240 xfs_buf_relse(bp);
1241 out_lock:
1242 xfs_dqflock(dqp);