[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / fs / xfs / quota / xfs_trans_dquot.c
blob149b2a1fd949beed7a82f63786f825db677bba83
1 /*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
26 * http://www.sgi.com
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
33 #include "xfs.h"
34 #include "xfs_fs.h"
35 #include "xfs_inum.h"
36 #include "xfs_log.h"
37 #include "xfs_trans.h"
38 #include "xfs_sb.h"
39 #include "xfs_ag.h"
40 #include "xfs_dir.h"
41 #include "xfs_dir2.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_quota.h"
45 #include "xfs_mount.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_bmap_btree.h"
48 #include "xfs_ialloc_btree.h"
49 #include "xfs_btree.h"
50 #include "xfs_ialloc.h"
51 #include "xfs_attr_sf.h"
52 #include "xfs_dir_sf.h"
53 #include "xfs_dir2_sf.h"
54 #include "xfs_dinode.h"
55 #include "xfs_inode.h"
56 #include "xfs_bmap.h"
57 #include "xfs_bit.h"
58 #include "xfs_rtalloc.h"
59 #include "xfs_error.h"
60 #include "xfs_itable.h"
61 #include "xfs_rw.h"
62 #include "xfs_acl.h"
63 #include "xfs_cap.h"
64 #include "xfs_mac.h"
65 #include "xfs_attr.h"
66 #include "xfs_buf_item.h"
67 #include "xfs_trans_priv.h"
69 #include "xfs_qm.h"
71 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
74 * Add the locked dquot to the transaction.
75 * The dquot must be locked, and it cannot be associated with any
76 * transaction.
78 void
79 xfs_trans_dqjoin(
80 xfs_trans_t *tp,
81 xfs_dquot_t *dqp)
83 xfs_dq_logitem_t *lp;
85 ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
86 ASSERT(XFS_DQ_IS_LOCKED(dqp));
87 ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp));
88 lp = &dqp->q_logitem;
91 * Get a log_item_desc to point at the new item.
93 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp));
96 * Initialize i_transp so we can later determine if this dquot is
97 * associated with this transaction.
99 dqp->q_transp = tp;
104 * This is called to mark the dquot as needing
105 * to be logged when the transaction is committed. The dquot must
106 * already be associated with the given transaction.
107 * Note that it marks the entire transaction as dirty. In the ordinary
108 * case, this gets called via xfs_trans_commit, after the transaction
109 * is already dirty. However, there's nothing stop this from getting
110 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
111 * flag.
113 void
114 xfs_trans_log_dquot(
115 xfs_trans_t *tp,
116 xfs_dquot_t *dqp)
118 xfs_log_item_desc_t *lidp;
120 ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
121 ASSERT(XFS_DQ_IS_LOCKED(dqp));
123 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
124 ASSERT(lidp != NULL);
126 tp->t_flags |= XFS_TRANS_DIRTY;
127 lidp->lid_flags |= XFS_LID_DIRTY;
131 * Carry forward whatever is left of the quota blk reservation to
132 * the spanky new transaction
134 STATIC void
135 xfs_trans_dup_dqinfo(
136 xfs_trans_t *otp,
137 xfs_trans_t *ntp)
139 xfs_dqtrx_t *oq, *nq;
140 int i,j;
141 xfs_dqtrx_t *oqa, *nqa;
143 if (!otp->t_dqinfo)
144 return;
146 xfs_trans_alloc_dqinfo(ntp);
147 oqa = otp->t_dqinfo->dqa_usrdquots;
148 nqa = ntp->t_dqinfo->dqa_usrdquots;
151 * Because the quota blk reservation is carried forward,
152 * it is also necessary to carry forward the DQ_DIRTY flag.
154 if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
155 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
157 for (j = 0; j < 2; j++) {
158 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
159 if (oqa[i].qt_dquot == NULL)
160 break;
161 oq = &oqa[i];
162 nq = &nqa[i];
164 nq->qt_dquot = oq->qt_dquot;
165 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
166 nq->qt_rtbcount_delta = 0;
169 * Transfer whatever is left of the reservations.
171 nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
172 oq->qt_blk_res = oq->qt_blk_res_used;
174 nq->qt_rtblk_res = oq->qt_rtblk_res -
175 oq->qt_rtblk_res_used;
176 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
178 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
179 oq->qt_ino_res = oq->qt_ino_res_used;
182 oqa = otp->t_dqinfo->dqa_grpdquots;
183 nqa = ntp->t_dqinfo->dqa_grpdquots;
188 * Wrap around mod_dquot to account for both user and group quotas.
190 void
191 xfs_trans_mod_dquot_byino(
192 xfs_trans_t *tp,
193 xfs_inode_t *ip,
194 uint field,
195 long delta)
197 xfs_mount_t *mp;
199 ASSERT(tp);
200 mp = tp->t_mountp;
202 if (!XFS_IS_QUOTA_ON(mp) ||
203 ip->i_ino == mp->m_sb.sb_uquotino ||
204 ip->i_ino == mp->m_sb.sb_gquotino)
205 return;
207 if (tp->t_dqinfo == NULL)
208 xfs_trans_alloc_dqinfo(tp);
210 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) {
211 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
213 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) {
214 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
218 STATIC xfs_dqtrx_t *
219 xfs_trans_get_dqtrx(
220 xfs_trans_t *tp,
221 xfs_dquot_t *dqp)
223 int i;
224 xfs_dqtrx_t *qa;
226 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
227 qa = XFS_QM_DQP_TO_DQACCT(tp, dqp);
229 if (qa[i].qt_dquot == NULL ||
230 qa[i].qt_dquot == dqp) {
231 return (&qa[i]);
235 return (NULL);
239 * Make the changes in the transaction structure.
240 * The moral equivalent to xfs_trans_mod_sb().
241 * We don't touch any fields in the dquot, so we don't care
242 * if it's locked or not (most of the time it won't be).
244 void
245 xfs_trans_mod_dquot(
246 xfs_trans_t *tp,
247 xfs_dquot_t *dqp,
248 uint field,
249 long delta)
251 xfs_dqtrx_t *qtrx;
253 ASSERT(tp);
254 qtrx = NULL;
256 if (tp->t_dqinfo == NULL)
257 xfs_trans_alloc_dqinfo(tp);
259 * Find either the first free slot or the slot that belongs
260 * to this dquot.
262 qtrx = xfs_trans_get_dqtrx(tp, dqp);
263 ASSERT(qtrx);
264 if (qtrx->qt_dquot == NULL)
265 qtrx->qt_dquot = dqp;
267 switch (field) {
270 * regular disk blk reservation
272 case XFS_TRANS_DQ_RES_BLKS:
273 qtrx->qt_blk_res += (ulong)delta;
274 break;
277 * inode reservation
279 case XFS_TRANS_DQ_RES_INOS:
280 qtrx->qt_ino_res += (ulong)delta;
281 break;
284 * disk blocks used.
286 case XFS_TRANS_DQ_BCOUNT:
287 if (qtrx->qt_blk_res && delta > 0) {
288 qtrx->qt_blk_res_used += (ulong)delta;
289 ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
291 qtrx->qt_bcount_delta += delta;
292 break;
294 case XFS_TRANS_DQ_DELBCOUNT:
295 qtrx->qt_delbcnt_delta += delta;
296 break;
299 * Inode Count
301 case XFS_TRANS_DQ_ICOUNT:
302 if (qtrx->qt_ino_res && delta > 0) {
303 qtrx->qt_ino_res_used += (ulong)delta;
304 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
306 qtrx->qt_icount_delta += delta;
307 break;
310 * rtblk reservation
312 case XFS_TRANS_DQ_RES_RTBLKS:
313 qtrx->qt_rtblk_res += (ulong)delta;
314 break;
317 * rtblk count
319 case XFS_TRANS_DQ_RTBCOUNT:
320 if (qtrx->qt_rtblk_res && delta > 0) {
321 qtrx->qt_rtblk_res_used += (ulong)delta;
322 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
324 qtrx->qt_rtbcount_delta += delta;
325 break;
327 case XFS_TRANS_DQ_DELRTBCOUNT:
328 qtrx->qt_delrtb_delta += delta;
329 break;
331 default:
332 ASSERT(0);
334 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
339 * Given an array of dqtrx structures, lock all the dquots associated
340 * and join them to the transaction, provided they have been modified.
341 * We know that the highest number of dquots (of one type - usr OR grp),
342 * involved in a transaction is 2 and that both usr and grp combined - 3.
343 * So, we don't attempt to make this very generic.
345 STATIC void
346 xfs_trans_dqlockedjoin(
347 xfs_trans_t *tp,
348 xfs_dqtrx_t *q)
350 ASSERT(q[0].qt_dquot != NULL);
351 if (q[1].qt_dquot == NULL) {
352 xfs_dqlock(q[0].qt_dquot);
353 xfs_trans_dqjoin(tp, q[0].qt_dquot);
354 } else {
355 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
356 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
357 xfs_trans_dqjoin(tp, q[0].qt_dquot);
358 xfs_trans_dqjoin(tp, q[1].qt_dquot);
364 * Called by xfs_trans_commit() and similar in spirit to
365 * xfs_trans_apply_sb_deltas().
366 * Go thru all the dquots belonging to this transaction and modify the
367 * INCORE dquot to reflect the actual usages.
368 * Unreserve just the reservations done by this transaction.
369 * dquot is still left locked at exit.
371 void
372 xfs_trans_apply_dquot_deltas(
373 xfs_trans_t *tp)
375 int i, j;
376 xfs_dquot_t *dqp;
377 xfs_dqtrx_t *qtrx, *qa;
378 xfs_disk_dquot_t *d;
379 long totalbdelta;
380 long totalrtbdelta;
382 if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY))
383 return;
385 ASSERT(tp->t_dqinfo);
386 qa = tp->t_dqinfo->dqa_usrdquots;
387 for (j = 0; j < 2; j++) {
388 if (qa[0].qt_dquot == NULL) {
389 qa = tp->t_dqinfo->dqa_grpdquots;
390 continue;
394 * Lock all of the dquots and join them to the transaction.
396 xfs_trans_dqlockedjoin(tp, qa);
398 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
399 qtrx = &qa[i];
401 * The array of dquots is filled
402 * sequentially, not sparsely.
404 if ((dqp = qtrx->qt_dquot) == NULL)
405 break;
407 ASSERT(XFS_DQ_IS_LOCKED(dqp));
408 ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
411 * adjust the actual number of blocks used
413 d = &dqp->q_core;
416 * The issue here is - sometimes we don't make a blkquota
417 * reservation intentionally to be fair to users
418 * (when the amount is small). On the other hand,
419 * delayed allocs do make reservations, but that's
420 * outside of a transaction, so we have no
421 * idea how much was really reserved.
422 * So, here we've accumulated delayed allocation blks and
423 * non-delay blks. The assumption is that the
424 * delayed ones are always reserved (outside of a
425 * transaction), and the others may or may not have
426 * quota reservations.
428 totalbdelta = qtrx->qt_bcount_delta +
429 qtrx->qt_delbcnt_delta;
430 totalrtbdelta = qtrx->qt_rtbcount_delta +
431 qtrx->qt_delrtb_delta;
432 #ifdef QUOTADEBUG
433 if (totalbdelta < 0)
434 ASSERT(INT_GET(d->d_bcount, ARCH_CONVERT) >=
435 (xfs_qcnt_t) -totalbdelta);
437 if (totalrtbdelta < 0)
438 ASSERT(INT_GET(d->d_rtbcount, ARCH_CONVERT) >=
439 (xfs_qcnt_t) -totalrtbdelta);
441 if (qtrx->qt_icount_delta < 0)
442 ASSERT(INT_GET(d->d_icount, ARCH_CONVERT) >=
443 (xfs_qcnt_t) -qtrx->qt_icount_delta);
444 #endif
445 if (totalbdelta)
446 INT_MOD(d->d_bcount, ARCH_CONVERT, (xfs_qcnt_t)totalbdelta);
448 if (qtrx->qt_icount_delta)
449 INT_MOD(d->d_icount, ARCH_CONVERT, (xfs_qcnt_t)qtrx->qt_icount_delta);
451 if (totalrtbdelta)
452 INT_MOD(d->d_rtbcount, ARCH_CONVERT, (xfs_qcnt_t)totalrtbdelta);
455 * Get any default limits in use.
456 * Start/reset the timer(s) if needed.
458 if (d->d_id) {
459 xfs_qm_adjust_dqlimits(tp->t_mountp, d);
460 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
463 dqp->dq_flags |= XFS_DQ_DIRTY;
465 * add this to the list of items to get logged
467 xfs_trans_log_dquot(tp, dqp);
469 * Take off what's left of the original reservation.
470 * In case of delayed allocations, there's no
471 * reservation that a transaction structure knows of.
473 if (qtrx->qt_blk_res != 0) {
474 if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
475 if (qtrx->qt_blk_res >
476 qtrx->qt_blk_res_used)
477 dqp->q_res_bcount -= (xfs_qcnt_t)
478 (qtrx->qt_blk_res -
479 qtrx->qt_blk_res_used);
480 else
481 dqp->q_res_bcount -= (xfs_qcnt_t)
482 (qtrx->qt_blk_res_used -
483 qtrx->qt_blk_res);
485 } else {
487 * These blks were never reserved, either inside
488 * a transaction or outside one (in a delayed
489 * allocation). Also, this isn't always a
490 * negative number since we sometimes
491 * deliberately skip quota reservations.
493 if (qtrx->qt_bcount_delta) {
494 dqp->q_res_bcount +=
495 (xfs_qcnt_t)qtrx->qt_bcount_delta;
499 * Adjust the RT reservation.
501 if (qtrx->qt_rtblk_res != 0) {
502 if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
503 if (qtrx->qt_rtblk_res >
504 qtrx->qt_rtblk_res_used)
505 dqp->q_res_rtbcount -= (xfs_qcnt_t)
506 (qtrx->qt_rtblk_res -
507 qtrx->qt_rtblk_res_used);
508 else
509 dqp->q_res_rtbcount -= (xfs_qcnt_t)
510 (qtrx->qt_rtblk_res_used -
511 qtrx->qt_rtblk_res);
513 } else {
514 if (qtrx->qt_rtbcount_delta)
515 dqp->q_res_rtbcount +=
516 (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
520 * Adjust the inode reservation.
522 if (qtrx->qt_ino_res != 0) {
523 ASSERT(qtrx->qt_ino_res >=
524 qtrx->qt_ino_res_used);
525 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
526 dqp->q_res_icount -= (xfs_qcnt_t)
527 (qtrx->qt_ino_res -
528 qtrx->qt_ino_res_used);
529 } else {
530 if (qtrx->qt_icount_delta)
531 dqp->q_res_icount +=
532 (xfs_qcnt_t)qtrx->qt_icount_delta;
536 #ifdef QUOTADEBUG
537 if (qtrx->qt_rtblk_res != 0)
538 cmn_err(CE_DEBUG, "RT res %d for 0x%p\n",
539 (int) qtrx->qt_rtblk_res, dqp);
540 #endif
541 ASSERT(dqp->q_res_bcount >=
542 INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
543 ASSERT(dqp->q_res_icount >=
544 INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
545 ASSERT(dqp->q_res_rtbcount >=
546 INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
549 * Do the group quotas next
551 qa = tp->t_dqinfo->dqa_grpdquots;
556 * Release the reservations, and adjust the dquots accordingly.
557 * This is called only when the transaction is being aborted. If by
558 * any chance we have done dquot modifications incore (ie. deltas) already,
559 * we simply throw those away, since that's the expected behavior
560 * when a transaction is curtailed without a commit.
562 STATIC void
563 xfs_trans_unreserve_and_mod_dquots(
564 xfs_trans_t *tp)
566 int i, j;
567 xfs_dquot_t *dqp;
568 xfs_dqtrx_t *qtrx, *qa;
569 boolean_t locked;
571 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
572 return;
574 qa = tp->t_dqinfo->dqa_usrdquots;
576 for (j = 0; j < 2; j++) {
577 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
578 qtrx = &qa[i];
580 * We assume that the array of dquots is filled
581 * sequentially, not sparsely.
583 if ((dqp = qtrx->qt_dquot) == NULL)
584 break;
586 * Unreserve the original reservation. We don't care
587 * about the number of blocks used field, or deltas.
588 * Also we don't bother to zero the fields.
590 locked = B_FALSE;
591 if (qtrx->qt_blk_res) {
592 xfs_dqlock(dqp);
593 locked = B_TRUE;
594 dqp->q_res_bcount -=
595 (xfs_qcnt_t)qtrx->qt_blk_res;
597 if (qtrx->qt_ino_res) {
598 if (!locked) {
599 xfs_dqlock(dqp);
600 locked = B_TRUE;
602 dqp->q_res_icount -=
603 (xfs_qcnt_t)qtrx->qt_ino_res;
606 if (qtrx->qt_rtblk_res) {
607 if (!locked) {
608 xfs_dqlock(dqp);
609 locked = B_TRUE;
611 dqp->q_res_rtbcount -=
612 (xfs_qcnt_t)qtrx->qt_rtblk_res;
614 if (locked)
615 xfs_dqunlock(dqp);
618 qa = tp->t_dqinfo->dqa_grpdquots;
623 * This reserves disk blocks and inodes against a dquot.
624 * Flags indicate if the dquot is to be locked here and also
625 * if the blk reservation is for RT or regular blocks.
626 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
627 * Returns EDQUOT if quota is exceeded.
629 STATIC int
630 xfs_trans_dqresv(
631 xfs_trans_t *tp,
632 xfs_mount_t *mp,
633 xfs_dquot_t *dqp,
634 long nblks,
635 long ninos,
636 uint flags)
638 int error;
639 xfs_qcnt_t hardlimit;
640 xfs_qcnt_t softlimit;
641 time_t btimer;
642 xfs_qcnt_t *resbcountp;
643 xfs_quotainfo_t *q = mp->m_quotainfo;
645 if (! (flags & XFS_QMOPT_DQLOCK)) {
646 xfs_dqlock(dqp);
648 ASSERT(XFS_DQ_IS_LOCKED(dqp));
649 if (flags & XFS_TRANS_DQ_RES_BLKS) {
650 hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT);
651 if (!hardlimit)
652 hardlimit = q->qi_bhardlimit;
653 softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT);
654 if (!softlimit)
655 softlimit = q->qi_bsoftlimit;
656 btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT);
657 resbcountp = &dqp->q_res_bcount;
658 } else {
659 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
660 hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT);
661 if (!hardlimit)
662 hardlimit = q->qi_rtbhardlimit;
663 softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT);
664 if (!softlimit)
665 softlimit = q->qi_rtbsoftlimit;
666 btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT);
667 resbcountp = &dqp->q_res_rtbcount;
669 error = 0;
671 if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
672 dqp->q_core.d_id &&
673 XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {
674 #ifdef QUOTADEBUG
675 cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
676 " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
677 #endif
678 if (nblks > 0) {
680 * dquot is locked already. See if we'd go over the
681 * hardlimit or exceed the timelimit if we allocate
682 * nblks.
684 if (hardlimit > 0ULL &&
685 (hardlimit <= nblks + *resbcountp)) {
686 error = EDQUOT;
687 goto error_return;
690 if (softlimit > 0ULL &&
691 (softlimit <= nblks + *resbcountp)) {
693 * If timer or warnings has expired,
694 * return EDQUOT
696 if ((btimer != 0 && get_seconds() > btimer) ||
697 (dqp->q_core.d_bwarns &&
698 INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >=
699 XFS_QI_BWARNLIMIT(dqp->q_mount))) {
700 error = EDQUOT;
701 goto error_return;
705 if (ninos > 0) {
706 hardlimit = INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT);
707 if (!hardlimit)
708 hardlimit = q->qi_ihardlimit;
709 softlimit = INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT);
710 if (!softlimit)
711 softlimit = q->qi_isoftlimit;
712 if (hardlimit > 0ULL &&
713 INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= hardlimit) {
714 error = EDQUOT;
715 goto error_return;
716 } else if (softlimit > 0ULL &&
717 INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= softlimit) {
719 * If timer or warnings has expired,
720 * return EDQUOT
722 if ((dqp->q_core.d_itimer &&
723 get_seconds() > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) ||
724 (dqp->q_core.d_iwarns &&
725 INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >=
726 XFS_QI_IWARNLIMIT(dqp->q_mount))) {
727 error = EDQUOT;
728 goto error_return;
735 * Change the reservation, but not the actual usage.
736 * Note that q_res_bcount = q_core.d_bcount + resv
738 (*resbcountp) += (xfs_qcnt_t)nblks;
739 if (ninos != 0)
740 dqp->q_res_icount += (xfs_qcnt_t)ninos;
743 * note the reservation amt in the trans struct too,
744 * so that the transaction knows how much was reserved by
745 * it against this particular dquot.
746 * We don't do this when we are reserving for a delayed allocation,
747 * because we don't have the luxury of a transaction envelope then.
749 if (tp) {
750 ASSERT(tp->t_dqinfo);
751 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
752 if (nblks != 0)
753 xfs_trans_mod_dquot(tp, dqp,
754 flags & XFS_QMOPT_RESBLK_MASK,
755 nblks);
756 if (ninos != 0)
757 xfs_trans_mod_dquot(tp, dqp,
758 XFS_TRANS_DQ_RES_INOS,
759 ninos);
761 ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
762 ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
763 ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
765 error_return:
766 if (! (flags & XFS_QMOPT_DQLOCK)) {
767 xfs_dqunlock(dqp);
769 return (error);
774 * Given a dquot(s), make disk block and/or inode reservations against them.
775 * The fact that this does the reservation against both the usr and
776 * grp quotas is important, because this follows a both-or-nothing
777 * approach.
779 * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked.
780 * XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
781 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
782 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
783 * dquots are unlocked on return, if they were not locked by caller.
786 xfs_trans_reserve_quota_bydquots(
787 xfs_trans_t *tp,
788 xfs_mount_t *mp,
789 xfs_dquot_t *udqp,
790 xfs_dquot_t *gdqp,
791 long nblks,
792 long ninos,
793 uint flags)
795 int resvd;
797 if (! XFS_IS_QUOTA_ON(mp))
798 return (0);
800 if (tp && tp->t_dqinfo == NULL)
801 xfs_trans_alloc_dqinfo(tp);
803 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
804 resvd = 0;
806 if (udqp) {
807 if (xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags))
808 return (EDQUOT);
809 resvd = 1;
812 if (gdqp) {
813 if (xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags)) {
815 * can't do it, so backout previous reservation
817 if (resvd) {
818 flags |= XFS_QMOPT_FORCE_RES;
819 xfs_trans_dqresv(tp, mp, udqp,
820 -nblks, -ninos, flags);
822 return (EDQUOT);
827 * Didnt change anything critical, so, no need to log
829 return (0);
834 * Lock the dquot and change the reservation if we can.
835 * This doesn't change the actual usage, just the reservation.
836 * The inode sent in is locked.
838 * Returns 0 on success, EDQUOT or other errors otherwise
840 STATIC int
841 xfs_trans_reserve_quota_nblks(
842 xfs_trans_t *tp,
843 xfs_mount_t *mp,
844 xfs_inode_t *ip,
845 long nblks,
846 long ninos,
847 uint type)
849 int error;
851 if (!XFS_IS_QUOTA_ON(mp))
852 return (0);
854 ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
855 ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
857 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
858 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
859 ASSERT((type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_RTBLKS ||
860 (type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_BLKS);
863 * Reserve nblks against these dquots, with trans as the mediator.
865 error = xfs_trans_reserve_quota_bydquots(tp, mp,
866 ip->i_udquot, ip->i_gdquot,
867 nblks, ninos,
868 type);
869 return (error);
873 * This routine is called to allocate a quotaoff log item.
875 xfs_qoff_logitem_t *
876 xfs_trans_get_qoff_item(
877 xfs_trans_t *tp,
878 xfs_qoff_logitem_t *startqoff,
879 uint flags)
881 xfs_qoff_logitem_t *q;
883 ASSERT(tp != NULL);
885 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
886 ASSERT(q != NULL);
889 * Get a log_item_desc to point at the new item.
891 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q);
893 return (q);
898 * This is called to mark the quotaoff logitem as needing
899 * to be logged when the transaction is committed. The logitem must
900 * already be associated with the given transaction.
902 void
903 xfs_trans_log_quotaoff_item(
904 xfs_trans_t *tp,
905 xfs_qoff_logitem_t *qlp)
907 xfs_log_item_desc_t *lidp;
909 lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
910 ASSERT(lidp != NULL);
912 tp->t_flags |= XFS_TRANS_DIRTY;
913 lidp->lid_flags |= XFS_LID_DIRTY;
916 STATIC void
917 xfs_trans_alloc_dqinfo(
918 xfs_trans_t *tp)
920 (tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
923 STATIC void
924 xfs_trans_free_dqinfo(
925 xfs_trans_t *tp)
927 if (!tp->t_dqinfo)
928 return;
929 kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo);
930 (tp)->t_dqinfo = NULL;
933 xfs_dqtrxops_t xfs_trans_dquot_ops = {
934 .qo_dup_dqinfo = xfs_trans_dup_dqinfo,
935 .qo_free_dqinfo = xfs_trans_free_dqinfo,
936 .qo_mod_dquot_byino = xfs_trans_mod_dquot_byino,
937 .qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas,
938 .qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks,
939 .qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots,
940 .qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots,