Linux 3.12.5
[linux/fpc-iii.git] / fs / xfs / xfs_trans_dquot.c
blob54ee3c5dee76093b6a6136a5fde759a8be309ccd
1 /*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_alloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_itable.h"
31 #include "xfs_bmap.h"
32 #include "xfs_rtalloc.h"
33 #include "xfs_error.h"
34 #include "xfs_attr.h"
35 #include "xfs_buf_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_qm.h"
39 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
42 * Add the locked dquot to the transaction.
43 * The dquot must be locked, and it cannot be associated with any
44 * transaction.
46 void
47 xfs_trans_dqjoin(
48 xfs_trans_t *tp,
49 xfs_dquot_t *dqp)
51 ASSERT(dqp->q_transp != tp);
52 ASSERT(XFS_DQ_IS_LOCKED(dqp));
53 ASSERT(dqp->q_logitem.qli_dquot == dqp);
56 * Get a log_item_desc to point at the new item.
58 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
61 * Initialize d_transp so we can later determine if this dquot is
62 * associated with this transaction.
64 dqp->q_transp = tp;
69 * This is called to mark the dquot as needing
70 * to be logged when the transaction is committed. The dquot must
71 * already be associated with the given transaction.
72 * Note that it marks the entire transaction as dirty. In the ordinary
73 * case, this gets called via xfs_trans_commit, after the transaction
74 * is already dirty. However, there's nothing stop this from getting
75 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
76 * flag.
78 void
79 xfs_trans_log_dquot(
80 xfs_trans_t *tp,
81 xfs_dquot_t *dqp)
83 ASSERT(dqp->q_transp == tp);
84 ASSERT(XFS_DQ_IS_LOCKED(dqp));
86 tp->t_flags |= XFS_TRANS_DIRTY;
87 dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
91 * Carry forward whatever is left of the quota blk reservation to
92 * the spanky new transaction
94 void
95 xfs_trans_dup_dqinfo(
96 xfs_trans_t *otp,
97 xfs_trans_t *ntp)
99 xfs_dqtrx_t *oq, *nq;
100 int i,j;
101 xfs_dqtrx_t *oqa, *nqa;
103 if (!otp->t_dqinfo)
104 return;
106 xfs_trans_alloc_dqinfo(ntp);
109 * Because the quota blk reservation is carried forward,
110 * it is also necessary to carry forward the DQ_DIRTY flag.
112 if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
113 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
115 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
116 oqa = otp->t_dqinfo->dqs[j];
117 nqa = ntp->t_dqinfo->dqs[j];
118 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
119 if (oqa[i].qt_dquot == NULL)
120 break;
121 oq = &oqa[i];
122 nq = &nqa[i];
124 nq->qt_dquot = oq->qt_dquot;
125 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
126 nq->qt_rtbcount_delta = 0;
129 * Transfer whatever is left of the reservations.
131 nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
132 oq->qt_blk_res = oq->qt_blk_res_used;
134 nq->qt_rtblk_res = oq->qt_rtblk_res -
135 oq->qt_rtblk_res_used;
136 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
138 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
139 oq->qt_ino_res = oq->qt_ino_res_used;
146 * Wrap around mod_dquot to account for both user and group quotas.
148 void
149 xfs_trans_mod_dquot_byino(
150 xfs_trans_t *tp,
151 xfs_inode_t *ip,
152 uint field,
153 long delta)
155 xfs_mount_t *mp = tp->t_mountp;
157 if (!XFS_IS_QUOTA_RUNNING(mp) ||
158 !XFS_IS_QUOTA_ON(mp) ||
159 xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
160 return;
162 if (tp->t_dqinfo == NULL)
163 xfs_trans_alloc_dqinfo(tp);
165 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
166 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
167 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
168 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
169 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
170 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
173 STATIC struct xfs_dqtrx *
174 xfs_trans_get_dqtrx(
175 struct xfs_trans *tp,
176 struct xfs_dquot *dqp)
178 int i;
179 struct xfs_dqtrx *qa;
181 if (XFS_QM_ISUDQ(dqp))
182 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
183 else if (XFS_QM_ISGDQ(dqp))
184 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
185 else if (XFS_QM_ISPDQ(dqp))
186 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
187 else
188 return NULL;
190 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
191 if (qa[i].qt_dquot == NULL ||
192 qa[i].qt_dquot == dqp)
193 return &qa[i];
196 return NULL;
200 * Make the changes in the transaction structure.
201 * The moral equivalent to xfs_trans_mod_sb().
202 * We don't touch any fields in the dquot, so we don't care
203 * if it's locked or not (most of the time it won't be).
205 void
206 xfs_trans_mod_dquot(
207 xfs_trans_t *tp,
208 xfs_dquot_t *dqp,
209 uint field,
210 long delta)
212 xfs_dqtrx_t *qtrx;
214 ASSERT(tp);
215 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
216 qtrx = NULL;
218 if (tp->t_dqinfo == NULL)
219 xfs_trans_alloc_dqinfo(tp);
221 * Find either the first free slot or the slot that belongs
222 * to this dquot.
224 qtrx = xfs_trans_get_dqtrx(tp, dqp);
225 ASSERT(qtrx);
226 if (qtrx->qt_dquot == NULL)
227 qtrx->qt_dquot = dqp;
229 switch (field) {
232 * regular disk blk reservation
234 case XFS_TRANS_DQ_RES_BLKS:
235 qtrx->qt_blk_res += (ulong)delta;
236 break;
239 * inode reservation
241 case XFS_TRANS_DQ_RES_INOS:
242 qtrx->qt_ino_res += (ulong)delta;
243 break;
246 * disk blocks used.
248 case XFS_TRANS_DQ_BCOUNT:
249 if (qtrx->qt_blk_res && delta > 0) {
250 qtrx->qt_blk_res_used += (ulong)delta;
251 ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
253 qtrx->qt_bcount_delta += delta;
254 break;
256 case XFS_TRANS_DQ_DELBCOUNT:
257 qtrx->qt_delbcnt_delta += delta;
258 break;
261 * Inode Count
263 case XFS_TRANS_DQ_ICOUNT:
264 if (qtrx->qt_ino_res && delta > 0) {
265 qtrx->qt_ino_res_used += (ulong)delta;
266 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
268 qtrx->qt_icount_delta += delta;
269 break;
272 * rtblk reservation
274 case XFS_TRANS_DQ_RES_RTBLKS:
275 qtrx->qt_rtblk_res += (ulong)delta;
276 break;
279 * rtblk count
281 case XFS_TRANS_DQ_RTBCOUNT:
282 if (qtrx->qt_rtblk_res && delta > 0) {
283 qtrx->qt_rtblk_res_used += (ulong)delta;
284 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
286 qtrx->qt_rtbcount_delta += delta;
287 break;
289 case XFS_TRANS_DQ_DELRTBCOUNT:
290 qtrx->qt_delrtb_delta += delta;
291 break;
293 default:
294 ASSERT(0);
296 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
301 * Given an array of dqtrx structures, lock all the dquots associated and join
302 * them to the transaction, provided they have been modified. We know that the
303 * highest number of dquots of one type - usr, grp OR prj - involved in a
304 * transaction is 2 so we don't need to make this very generic.
306 STATIC void
307 xfs_trans_dqlockedjoin(
308 xfs_trans_t *tp,
309 xfs_dqtrx_t *q)
311 ASSERT(q[0].qt_dquot != NULL);
312 if (q[1].qt_dquot == NULL) {
313 xfs_dqlock(q[0].qt_dquot);
314 xfs_trans_dqjoin(tp, q[0].qt_dquot);
315 } else {
316 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
317 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
318 xfs_trans_dqjoin(tp, q[0].qt_dquot);
319 xfs_trans_dqjoin(tp, q[1].qt_dquot);
325 * Called by xfs_trans_commit() and similar in spirit to
326 * xfs_trans_apply_sb_deltas().
327 * Go thru all the dquots belonging to this transaction and modify the
328 * INCORE dquot to reflect the actual usages.
329 * Unreserve just the reservations done by this transaction.
330 * dquot is still left locked at exit.
332 void
333 xfs_trans_apply_dquot_deltas(
334 struct xfs_trans *tp)
336 int i, j;
337 struct xfs_dquot *dqp;
338 struct xfs_dqtrx *qtrx, *qa;
339 struct xfs_disk_dquot *d;
340 long totalbdelta;
341 long totalrtbdelta;
343 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
344 return;
346 ASSERT(tp->t_dqinfo);
347 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
348 qa = tp->t_dqinfo->dqs[j];
349 if (qa[0].qt_dquot == NULL)
350 continue;
353 * Lock all of the dquots and join them to the transaction.
355 xfs_trans_dqlockedjoin(tp, qa);
357 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
358 qtrx = &qa[i];
360 * The array of dquots is filled
361 * sequentially, not sparsely.
363 if ((dqp = qtrx->qt_dquot) == NULL)
364 break;
366 ASSERT(XFS_DQ_IS_LOCKED(dqp));
367 ASSERT(dqp->q_transp == tp);
370 * adjust the actual number of blocks used
372 d = &dqp->q_core;
375 * The issue here is - sometimes we don't make a blkquota
376 * reservation intentionally to be fair to users
377 * (when the amount is small). On the other hand,
378 * delayed allocs do make reservations, but that's
379 * outside of a transaction, so we have no
380 * idea how much was really reserved.
381 * So, here we've accumulated delayed allocation blks and
382 * non-delay blks. The assumption is that the
383 * delayed ones are always reserved (outside of a
384 * transaction), and the others may or may not have
385 * quota reservations.
387 totalbdelta = qtrx->qt_bcount_delta +
388 qtrx->qt_delbcnt_delta;
389 totalrtbdelta = qtrx->qt_rtbcount_delta +
390 qtrx->qt_delrtb_delta;
391 #ifdef DEBUG
392 if (totalbdelta < 0)
393 ASSERT(be64_to_cpu(d->d_bcount) >=
394 -totalbdelta);
396 if (totalrtbdelta < 0)
397 ASSERT(be64_to_cpu(d->d_rtbcount) >=
398 -totalrtbdelta);
400 if (qtrx->qt_icount_delta < 0)
401 ASSERT(be64_to_cpu(d->d_icount) >=
402 -qtrx->qt_icount_delta);
403 #endif
404 if (totalbdelta)
405 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
407 if (qtrx->qt_icount_delta)
408 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
410 if (totalrtbdelta)
411 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
414 * Get any default limits in use.
415 * Start/reset the timer(s) if needed.
417 if (d->d_id) {
418 xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
419 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
422 dqp->dq_flags |= XFS_DQ_DIRTY;
424 * add this to the list of items to get logged
426 xfs_trans_log_dquot(tp, dqp);
428 * Take off what's left of the original reservation.
429 * In case of delayed allocations, there's no
430 * reservation that a transaction structure knows of.
432 if (qtrx->qt_blk_res != 0) {
433 if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
434 if (qtrx->qt_blk_res >
435 qtrx->qt_blk_res_used)
436 dqp->q_res_bcount -= (xfs_qcnt_t)
437 (qtrx->qt_blk_res -
438 qtrx->qt_blk_res_used);
439 else
440 dqp->q_res_bcount -= (xfs_qcnt_t)
441 (qtrx->qt_blk_res_used -
442 qtrx->qt_blk_res);
444 } else {
446 * These blks were never reserved, either inside
447 * a transaction or outside one (in a delayed
448 * allocation). Also, this isn't always a
449 * negative number since we sometimes
450 * deliberately skip quota reservations.
452 if (qtrx->qt_bcount_delta) {
453 dqp->q_res_bcount +=
454 (xfs_qcnt_t)qtrx->qt_bcount_delta;
458 * Adjust the RT reservation.
460 if (qtrx->qt_rtblk_res != 0) {
461 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
462 if (qtrx->qt_rtblk_res >
463 qtrx->qt_rtblk_res_used)
464 dqp->q_res_rtbcount -= (xfs_qcnt_t)
465 (qtrx->qt_rtblk_res -
466 qtrx->qt_rtblk_res_used);
467 else
468 dqp->q_res_rtbcount -= (xfs_qcnt_t)
469 (qtrx->qt_rtblk_res_used -
470 qtrx->qt_rtblk_res);
472 } else {
473 if (qtrx->qt_rtbcount_delta)
474 dqp->q_res_rtbcount +=
475 (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
479 * Adjust the inode reservation.
481 if (qtrx->qt_ino_res != 0) {
482 ASSERT(qtrx->qt_ino_res >=
483 qtrx->qt_ino_res_used);
484 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
485 dqp->q_res_icount -= (xfs_qcnt_t)
486 (qtrx->qt_ino_res -
487 qtrx->qt_ino_res_used);
488 } else {
489 if (qtrx->qt_icount_delta)
490 dqp->q_res_icount +=
491 (xfs_qcnt_t)qtrx->qt_icount_delta;
494 ASSERT(dqp->q_res_bcount >=
495 be64_to_cpu(dqp->q_core.d_bcount));
496 ASSERT(dqp->q_res_icount >=
497 be64_to_cpu(dqp->q_core.d_icount));
498 ASSERT(dqp->q_res_rtbcount >=
499 be64_to_cpu(dqp->q_core.d_rtbcount));
505 * Release the reservations, and adjust the dquots accordingly.
506 * This is called only when the transaction is being aborted. If by
507 * any chance we have done dquot modifications incore (ie. deltas) already,
508 * we simply throw those away, since that's the expected behavior
509 * when a transaction is curtailed without a commit.
511 void
512 xfs_trans_unreserve_and_mod_dquots(
513 xfs_trans_t *tp)
515 int i, j;
516 xfs_dquot_t *dqp;
517 xfs_dqtrx_t *qtrx, *qa;
518 bool locked;
520 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
521 return;
523 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
524 qa = tp->t_dqinfo->dqs[j];
526 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
527 qtrx = &qa[i];
529 * We assume that the array of dquots is filled
530 * sequentially, not sparsely.
532 if ((dqp = qtrx->qt_dquot) == NULL)
533 break;
535 * Unreserve the original reservation. We don't care
536 * about the number of blocks used field, or deltas.
537 * Also we don't bother to zero the fields.
539 locked = false;
540 if (qtrx->qt_blk_res) {
541 xfs_dqlock(dqp);
542 locked = true;
543 dqp->q_res_bcount -=
544 (xfs_qcnt_t)qtrx->qt_blk_res;
546 if (qtrx->qt_ino_res) {
547 if (!locked) {
548 xfs_dqlock(dqp);
549 locked = true;
551 dqp->q_res_icount -=
552 (xfs_qcnt_t)qtrx->qt_ino_res;
555 if (qtrx->qt_rtblk_res) {
556 if (!locked) {
557 xfs_dqlock(dqp);
558 locked = true;
560 dqp->q_res_rtbcount -=
561 (xfs_qcnt_t)qtrx->qt_rtblk_res;
563 if (locked)
564 xfs_dqunlock(dqp);
570 STATIC void
571 xfs_quota_warn(
572 struct xfs_mount *mp,
573 struct xfs_dquot *dqp,
574 int type)
576 /* no warnings for project quotas - we just return ENOSPC later */
577 if (dqp->dq_flags & XFS_DQ_PROJ)
578 return;
579 quota_send_warning(make_kqid(&init_user_ns,
580 (dqp->dq_flags & XFS_DQ_USER) ?
581 USRQUOTA : GRPQUOTA,
582 be32_to_cpu(dqp->q_core.d_id)),
583 mp->m_super->s_dev, type);
587 * This reserves disk blocks and inodes against a dquot.
588 * Flags indicate if the dquot is to be locked here and also
589 * if the blk reservation is for RT or regular blocks.
590 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
592 STATIC int
593 xfs_trans_dqresv(
594 xfs_trans_t *tp,
595 xfs_mount_t *mp,
596 xfs_dquot_t *dqp,
597 long nblks,
598 long ninos,
599 uint flags)
601 xfs_qcnt_t hardlimit;
602 xfs_qcnt_t softlimit;
603 time_t timer;
604 xfs_qwarncnt_t warns;
605 xfs_qwarncnt_t warnlimit;
606 xfs_qcnt_t total_count;
607 xfs_qcnt_t *resbcountp;
608 xfs_quotainfo_t *q = mp->m_quotainfo;
611 xfs_dqlock(dqp);
613 if (flags & XFS_TRANS_DQ_RES_BLKS) {
614 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
615 if (!hardlimit)
616 hardlimit = q->qi_bhardlimit;
617 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
618 if (!softlimit)
619 softlimit = q->qi_bsoftlimit;
620 timer = be32_to_cpu(dqp->q_core.d_btimer);
621 warns = be16_to_cpu(dqp->q_core.d_bwarns);
622 warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
623 resbcountp = &dqp->q_res_bcount;
624 } else {
625 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
626 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
627 if (!hardlimit)
628 hardlimit = q->qi_rtbhardlimit;
629 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
630 if (!softlimit)
631 softlimit = q->qi_rtbsoftlimit;
632 timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
633 warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
634 warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
635 resbcountp = &dqp->q_res_rtbcount;
638 if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
639 dqp->q_core.d_id &&
640 ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
641 (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
642 (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
643 if (nblks > 0) {
645 * dquot is locked already. See if we'd go over the
646 * hardlimit or exceed the timelimit if we allocate
647 * nblks.
649 total_count = *resbcountp + nblks;
650 if (hardlimit && total_count > hardlimit) {
651 xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
652 goto error_return;
654 if (softlimit && total_count > softlimit) {
655 if ((timer != 0 && get_seconds() > timer) ||
656 (warns != 0 && warns >= warnlimit)) {
657 xfs_quota_warn(mp, dqp,
658 QUOTA_NL_BSOFTLONGWARN);
659 goto error_return;
662 xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
665 if (ninos > 0) {
666 total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
667 timer = be32_to_cpu(dqp->q_core.d_itimer);
668 warns = be16_to_cpu(dqp->q_core.d_iwarns);
669 warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
670 hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
671 if (!hardlimit)
672 hardlimit = q->qi_ihardlimit;
673 softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
674 if (!softlimit)
675 softlimit = q->qi_isoftlimit;
677 if (hardlimit && total_count > hardlimit) {
678 xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
679 goto error_return;
681 if (softlimit && total_count > softlimit) {
682 if ((timer != 0 && get_seconds() > timer) ||
683 (warns != 0 && warns >= warnlimit)) {
684 xfs_quota_warn(mp, dqp,
685 QUOTA_NL_ISOFTLONGWARN);
686 goto error_return;
688 xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
694 * Change the reservation, but not the actual usage.
695 * Note that q_res_bcount = q_core.d_bcount + resv
697 (*resbcountp) += (xfs_qcnt_t)nblks;
698 if (ninos != 0)
699 dqp->q_res_icount += (xfs_qcnt_t)ninos;
702 * note the reservation amt in the trans struct too,
703 * so that the transaction knows how much was reserved by
704 * it against this particular dquot.
705 * We don't do this when we are reserving for a delayed allocation,
706 * because we don't have the luxury of a transaction envelope then.
708 if (tp) {
709 ASSERT(tp->t_dqinfo);
710 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
711 if (nblks != 0)
712 xfs_trans_mod_dquot(tp, dqp,
713 flags & XFS_QMOPT_RESBLK_MASK,
714 nblks);
715 if (ninos != 0)
716 xfs_trans_mod_dquot(tp, dqp,
717 XFS_TRANS_DQ_RES_INOS,
718 ninos);
720 ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
721 ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
722 ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
724 xfs_dqunlock(dqp);
725 return 0;
727 error_return:
728 xfs_dqunlock(dqp);
729 if (flags & XFS_QMOPT_ENOSPC)
730 return ENOSPC;
731 return EDQUOT;
736 * Given dquot(s), make disk block and/or inode reservations against them.
737 * The fact that this does the reservation against user, group and
738 * project quotas is important, because this follows a all-or-nothing
739 * approach.
741 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
742 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
743 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
744 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
745 * dquots are unlocked on return, if they were not locked by caller.
748 xfs_trans_reserve_quota_bydquots(
749 struct xfs_trans *tp,
750 struct xfs_mount *mp,
751 struct xfs_dquot *udqp,
752 struct xfs_dquot *gdqp,
753 struct xfs_dquot *pdqp,
754 long nblks,
755 long ninos,
756 uint flags)
758 int error;
760 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
761 return 0;
763 if (tp && tp->t_dqinfo == NULL)
764 xfs_trans_alloc_dqinfo(tp);
766 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
768 if (udqp) {
769 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
770 (flags & ~XFS_QMOPT_ENOSPC));
771 if (error)
772 return error;
775 if (gdqp) {
776 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
777 if (error)
778 goto unwind_usr;
781 if (pdqp) {
782 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
783 if (error)
784 goto unwind_grp;
788 * Didn't change anything critical, so, no need to log
790 return 0;
792 unwind_grp:
793 flags |= XFS_QMOPT_FORCE_RES;
794 if (gdqp)
795 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
796 unwind_usr:
797 flags |= XFS_QMOPT_FORCE_RES;
798 if (udqp)
799 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
800 return error;
805 * Lock the dquot and change the reservation if we can.
806 * This doesn't change the actual usage, just the reservation.
807 * The inode sent in is locked.
810 xfs_trans_reserve_quota_nblks(
811 struct xfs_trans *tp,
812 struct xfs_inode *ip,
813 long nblks,
814 long ninos,
815 uint flags)
817 struct xfs_mount *mp = ip->i_mount;
819 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
820 return 0;
821 if (XFS_IS_PQUOTA_ON(mp))
822 flags |= XFS_QMOPT_ENOSPC;
824 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
826 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
827 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
828 XFS_TRANS_DQ_RES_RTBLKS ||
829 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
830 XFS_TRANS_DQ_RES_BLKS);
833 * Reserve nblks against these dquots, with trans as the mediator.
835 return xfs_trans_reserve_quota_bydquots(tp, mp,
836 ip->i_udquot, ip->i_gdquot,
837 ip->i_pdquot,
838 nblks, ninos, flags);
842 * This routine is called to allocate a quotaoff log item.
844 xfs_qoff_logitem_t *
845 xfs_trans_get_qoff_item(
846 xfs_trans_t *tp,
847 xfs_qoff_logitem_t *startqoff,
848 uint flags)
850 xfs_qoff_logitem_t *q;
852 ASSERT(tp != NULL);
854 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
855 ASSERT(q != NULL);
858 * Get a log_item_desc to point at the new item.
860 xfs_trans_add_item(tp, &q->qql_item);
861 return q;
866 * This is called to mark the quotaoff logitem as needing
867 * to be logged when the transaction is committed. The logitem must
868 * already be associated with the given transaction.
870 void
871 xfs_trans_log_quotaoff_item(
872 xfs_trans_t *tp,
873 xfs_qoff_logitem_t *qlp)
875 tp->t_flags |= XFS_TRANS_DIRTY;
876 qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
879 STATIC void
880 xfs_trans_alloc_dqinfo(
881 xfs_trans_t *tp)
883 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
886 void
887 xfs_trans_free_dqinfo(
888 xfs_trans_t *tp)
890 if (!tp->t_dqinfo)
891 return;
892 kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
893 tp->t_dqinfo = NULL;