2 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_itable.h"
33 #include "xfs_rtalloc.h"
34 #include "xfs_error.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_trans_priv.h"
40 STATIC
void xfs_trans_alloc_dqinfo(xfs_trans_t
*);
43 * Add the locked dquot to the transaction.
44 * The dquot must be locked, and it cannot be associated with any
52 ASSERT(dqp
->q_transp
!= tp
);
53 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
54 ASSERT(dqp
->q_logitem
.qli_dquot
== dqp
);
57 * Get a log_item_desc to point at the new item.
59 xfs_trans_add_item(tp
, &dqp
->q_logitem
.qli_item
);
62 * Initialize d_transp so we can later determine if this dquot is
63 * associated with this transaction.
70 * This is called to mark the dquot as needing
71 * to be logged when the transaction is committed. The dquot must
72 * already be associated with the given transaction.
73 * Note that it marks the entire transaction as dirty. In the ordinary
74 * case, this gets called via xfs_trans_commit, after the transaction
75 * is already dirty. However, there's nothing stop this from getting
76 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
84 ASSERT(dqp
->q_transp
== tp
);
85 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
87 tp
->t_flags
|= XFS_TRANS_DIRTY
;
88 dqp
->q_logitem
.qli_item
.li_desc
->lid_flags
|= XFS_LID_DIRTY
;
92 * Carry forward whatever is left of the quota blk reservation to
93 * the spanky new transaction
100 xfs_dqtrx_t
*oq
, *nq
;
102 xfs_dqtrx_t
*oqa
, *nqa
;
107 xfs_trans_alloc_dqinfo(ntp
);
108 oqa
= otp
->t_dqinfo
->dqa_usrdquots
;
109 nqa
= ntp
->t_dqinfo
->dqa_usrdquots
;
112 * Because the quota blk reservation is carried forward,
113 * it is also necessary to carry forward the DQ_DIRTY flag.
115 if(otp
->t_flags
& XFS_TRANS_DQ_DIRTY
)
116 ntp
->t_flags
|= XFS_TRANS_DQ_DIRTY
;
118 for (j
= 0; j
< 2; j
++) {
119 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
120 if (oqa
[i
].qt_dquot
== NULL
)
125 nq
->qt_dquot
= oq
->qt_dquot
;
126 nq
->qt_bcount_delta
= nq
->qt_icount_delta
= 0;
127 nq
->qt_rtbcount_delta
= 0;
130 * Transfer whatever is left of the reservations.
132 nq
->qt_blk_res
= oq
->qt_blk_res
- oq
->qt_blk_res_used
;
133 oq
->qt_blk_res
= oq
->qt_blk_res_used
;
135 nq
->qt_rtblk_res
= oq
->qt_rtblk_res
-
136 oq
->qt_rtblk_res_used
;
137 oq
->qt_rtblk_res
= oq
->qt_rtblk_res_used
;
139 nq
->qt_ino_res
= oq
->qt_ino_res
- oq
->qt_ino_res_used
;
140 oq
->qt_ino_res
= oq
->qt_ino_res_used
;
143 oqa
= otp
->t_dqinfo
->dqa_grpdquots
;
144 nqa
= ntp
->t_dqinfo
->dqa_grpdquots
;
149 * Wrap around mod_dquot to account for both user and group quotas.
152 xfs_trans_mod_dquot_byino(
158 xfs_mount_t
*mp
= tp
->t_mountp
;
160 if (!XFS_IS_QUOTA_RUNNING(mp
) ||
161 !XFS_IS_QUOTA_ON(mp
) ||
162 ip
->i_ino
== mp
->m_sb
.sb_uquotino
||
163 ip
->i_ino
== mp
->m_sb
.sb_gquotino
)
166 if (tp
->t_dqinfo
== NULL
)
167 xfs_trans_alloc_dqinfo(tp
);
169 if (XFS_IS_UQUOTA_ON(mp
) && ip
->i_udquot
)
170 (void) xfs_trans_mod_dquot(tp
, ip
->i_udquot
, field
, delta
);
171 if (XFS_IS_OQUOTA_ON(mp
) && ip
->i_gdquot
)
172 (void) xfs_trans_mod_dquot(tp
, ip
->i_gdquot
, field
, delta
);
183 qa
= XFS_QM_ISUDQ(dqp
) ?
184 tp
->t_dqinfo
->dqa_usrdquots
: tp
->t_dqinfo
->dqa_grpdquots
;
186 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
187 if (qa
[i
].qt_dquot
== NULL
||
188 qa
[i
].qt_dquot
== dqp
)
196 * Make the changes in the transaction structure.
197 * The moral equivalent to xfs_trans_mod_sb().
198 * We don't touch any fields in the dquot, so we don't care
199 * if it's locked or not (most of the time it won't be).
211 ASSERT(XFS_IS_QUOTA_RUNNING(tp
->t_mountp
));
214 if (tp
->t_dqinfo
== NULL
)
215 xfs_trans_alloc_dqinfo(tp
);
217 * Find either the first free slot or the slot that belongs
220 qtrx
= xfs_trans_get_dqtrx(tp
, dqp
);
222 if (qtrx
->qt_dquot
== NULL
)
223 qtrx
->qt_dquot
= dqp
;
228 * regular disk blk reservation
230 case XFS_TRANS_DQ_RES_BLKS
:
231 qtrx
->qt_blk_res
+= (ulong
)delta
;
237 case XFS_TRANS_DQ_RES_INOS
:
238 qtrx
->qt_ino_res
+= (ulong
)delta
;
244 case XFS_TRANS_DQ_BCOUNT
:
245 if (qtrx
->qt_blk_res
&& delta
> 0) {
246 qtrx
->qt_blk_res_used
+= (ulong
)delta
;
247 ASSERT(qtrx
->qt_blk_res
>= qtrx
->qt_blk_res_used
);
249 qtrx
->qt_bcount_delta
+= delta
;
252 case XFS_TRANS_DQ_DELBCOUNT
:
253 qtrx
->qt_delbcnt_delta
+= delta
;
259 case XFS_TRANS_DQ_ICOUNT
:
260 if (qtrx
->qt_ino_res
&& delta
> 0) {
261 qtrx
->qt_ino_res_used
+= (ulong
)delta
;
262 ASSERT(qtrx
->qt_ino_res
>= qtrx
->qt_ino_res_used
);
264 qtrx
->qt_icount_delta
+= delta
;
270 case XFS_TRANS_DQ_RES_RTBLKS
:
271 qtrx
->qt_rtblk_res
+= (ulong
)delta
;
277 case XFS_TRANS_DQ_RTBCOUNT
:
278 if (qtrx
->qt_rtblk_res
&& delta
> 0) {
279 qtrx
->qt_rtblk_res_used
+= (ulong
)delta
;
280 ASSERT(qtrx
->qt_rtblk_res
>= qtrx
->qt_rtblk_res_used
);
282 qtrx
->qt_rtbcount_delta
+= delta
;
285 case XFS_TRANS_DQ_DELRTBCOUNT
:
286 qtrx
->qt_delrtb_delta
+= delta
;
292 tp
->t_flags
|= XFS_TRANS_DQ_DIRTY
;
297 * Given an array of dqtrx structures, lock all the dquots associated
298 * and join them to the transaction, provided they have been modified.
299 * We know that the highest number of dquots (of one type - usr OR grp),
300 * involved in a transaction is 2 and that both usr and grp combined - 3.
301 * So, we don't attempt to make this very generic.
304 xfs_trans_dqlockedjoin(
308 ASSERT(q
[0].qt_dquot
!= NULL
);
309 if (q
[1].qt_dquot
== NULL
) {
310 xfs_dqlock(q
[0].qt_dquot
);
311 xfs_trans_dqjoin(tp
, q
[0].qt_dquot
);
313 ASSERT(XFS_QM_TRANS_MAXDQS
== 2);
314 xfs_dqlock2(q
[0].qt_dquot
, q
[1].qt_dquot
);
315 xfs_trans_dqjoin(tp
, q
[0].qt_dquot
);
316 xfs_trans_dqjoin(tp
, q
[1].qt_dquot
);
322 * Called by xfs_trans_commit() and similar in spirit to
323 * xfs_trans_apply_sb_deltas().
324 * Go thru all the dquots belonging to this transaction and modify the
325 * INCORE dquot to reflect the actual usages.
326 * Unreserve just the reservations done by this transaction.
327 * dquot is still left locked at exit.
330 xfs_trans_apply_dquot_deltas(
335 xfs_dqtrx_t
*qtrx
, *qa
;
340 if (!(tp
->t_flags
& XFS_TRANS_DQ_DIRTY
))
343 ASSERT(tp
->t_dqinfo
);
344 qa
= tp
->t_dqinfo
->dqa_usrdquots
;
345 for (j
= 0; j
< 2; j
++) {
346 if (qa
[0].qt_dquot
== NULL
) {
347 qa
= tp
->t_dqinfo
->dqa_grpdquots
;
352 * Lock all of the dquots and join them to the transaction.
354 xfs_trans_dqlockedjoin(tp
, qa
);
356 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
359 * The array of dquots is filled
360 * sequentially, not sparsely.
362 if ((dqp
= qtrx
->qt_dquot
) == NULL
)
365 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
366 ASSERT(dqp
->q_transp
== tp
);
369 * adjust the actual number of blocks used
374 * The issue here is - sometimes we don't make a blkquota
375 * reservation intentionally to be fair to users
376 * (when the amount is small). On the other hand,
377 * delayed allocs do make reservations, but that's
378 * outside of a transaction, so we have no
379 * idea how much was really reserved.
380 * So, here we've accumulated delayed allocation blks and
381 * non-delay blks. The assumption is that the
382 * delayed ones are always reserved (outside of a
383 * transaction), and the others may or may not have
384 * quota reservations.
386 totalbdelta
= qtrx
->qt_bcount_delta
+
387 qtrx
->qt_delbcnt_delta
;
388 totalrtbdelta
= qtrx
->qt_rtbcount_delta
+
389 qtrx
->qt_delrtb_delta
;
392 ASSERT(be64_to_cpu(d
->d_bcount
) >=
395 if (totalrtbdelta
< 0)
396 ASSERT(be64_to_cpu(d
->d_rtbcount
) >=
399 if (qtrx
->qt_icount_delta
< 0)
400 ASSERT(be64_to_cpu(d
->d_icount
) >=
401 -qtrx
->qt_icount_delta
);
404 be64_add_cpu(&d
->d_bcount
, (xfs_qcnt_t
)totalbdelta
);
406 if (qtrx
->qt_icount_delta
)
407 be64_add_cpu(&d
->d_icount
, (xfs_qcnt_t
)qtrx
->qt_icount_delta
);
410 be64_add_cpu(&d
->d_rtbcount
, (xfs_qcnt_t
)totalrtbdelta
);
413 * Get any default limits in use.
414 * Start/reset the timer(s) if needed.
417 xfs_qm_adjust_dqlimits(tp
->t_mountp
, d
);
418 xfs_qm_adjust_dqtimers(tp
->t_mountp
, d
);
421 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
423 * add this to the list of items to get logged
425 xfs_trans_log_dquot(tp
, dqp
);
427 * Take off what's left of the original reservation.
428 * In case of delayed allocations, there's no
429 * reservation that a transaction structure knows of.
431 if (qtrx
->qt_blk_res
!= 0) {
432 if (qtrx
->qt_blk_res
!= qtrx
->qt_blk_res_used
) {
433 if (qtrx
->qt_blk_res
>
434 qtrx
->qt_blk_res_used
)
435 dqp
->q_res_bcount
-= (xfs_qcnt_t
)
437 qtrx
->qt_blk_res_used
);
439 dqp
->q_res_bcount
-= (xfs_qcnt_t
)
440 (qtrx
->qt_blk_res_used
-
445 * These blks were never reserved, either inside
446 * a transaction or outside one (in a delayed
447 * allocation). Also, this isn't always a
448 * negative number since we sometimes
449 * deliberately skip quota reservations.
451 if (qtrx
->qt_bcount_delta
) {
453 (xfs_qcnt_t
)qtrx
->qt_bcount_delta
;
457 * Adjust the RT reservation.
459 if (qtrx
->qt_rtblk_res
!= 0) {
460 if (qtrx
->qt_rtblk_res
!= qtrx
->qt_rtblk_res_used
) {
461 if (qtrx
->qt_rtblk_res
>
462 qtrx
->qt_rtblk_res_used
)
463 dqp
->q_res_rtbcount
-= (xfs_qcnt_t
)
464 (qtrx
->qt_rtblk_res
-
465 qtrx
->qt_rtblk_res_used
);
467 dqp
->q_res_rtbcount
-= (xfs_qcnt_t
)
468 (qtrx
->qt_rtblk_res_used
-
472 if (qtrx
->qt_rtbcount_delta
)
473 dqp
->q_res_rtbcount
+=
474 (xfs_qcnt_t
)qtrx
->qt_rtbcount_delta
;
478 * Adjust the inode reservation.
480 if (qtrx
->qt_ino_res
!= 0) {
481 ASSERT(qtrx
->qt_ino_res
>=
482 qtrx
->qt_ino_res_used
);
483 if (qtrx
->qt_ino_res
> qtrx
->qt_ino_res_used
)
484 dqp
->q_res_icount
-= (xfs_qcnt_t
)
486 qtrx
->qt_ino_res_used
);
488 if (qtrx
->qt_icount_delta
)
490 (xfs_qcnt_t
)qtrx
->qt_icount_delta
;
493 ASSERT(dqp
->q_res_bcount
>=
494 be64_to_cpu(dqp
->q_core
.d_bcount
));
495 ASSERT(dqp
->q_res_icount
>=
496 be64_to_cpu(dqp
->q_core
.d_icount
));
497 ASSERT(dqp
->q_res_rtbcount
>=
498 be64_to_cpu(dqp
->q_core
.d_rtbcount
));
501 * Do the group quotas next
503 qa
= tp
->t_dqinfo
->dqa_grpdquots
;
508 * Release the reservations, and adjust the dquots accordingly.
509 * This is called only when the transaction is being aborted. If by
510 * any chance we have done dquot modifications incore (ie. deltas) already,
511 * we simply throw those away, since that's the expected behavior
512 * when a transaction is curtailed without a commit.
515 xfs_trans_unreserve_and_mod_dquots(
520 xfs_dqtrx_t
*qtrx
, *qa
;
523 if (!tp
->t_dqinfo
|| !(tp
->t_flags
& XFS_TRANS_DQ_DIRTY
))
526 qa
= tp
->t_dqinfo
->dqa_usrdquots
;
528 for (j
= 0; j
< 2; j
++) {
529 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
532 * We assume that the array of dquots is filled
533 * sequentially, not sparsely.
535 if ((dqp
= qtrx
->qt_dquot
) == NULL
)
538 * Unreserve the original reservation. We don't care
539 * about the number of blocks used field, or deltas.
540 * Also we don't bother to zero the fields.
543 if (qtrx
->qt_blk_res
) {
547 (xfs_qcnt_t
)qtrx
->qt_blk_res
;
549 if (qtrx
->qt_ino_res
) {
555 (xfs_qcnt_t
)qtrx
->qt_ino_res
;
558 if (qtrx
->qt_rtblk_res
) {
563 dqp
->q_res_rtbcount
-=
564 (xfs_qcnt_t
)qtrx
->qt_rtblk_res
;
570 qa
= tp
->t_dqinfo
->dqa_grpdquots
;
576 struct xfs_mount
*mp
,
577 struct xfs_dquot
*dqp
,
580 /* no warnings for project quotas - we just return ENOSPC later */
581 if (dqp
->dq_flags
& XFS_DQ_PROJ
)
583 quota_send_warning((dqp
->dq_flags
& XFS_DQ_USER
) ? USRQUOTA
: GRPQUOTA
,
584 be32_to_cpu(dqp
->q_core
.d_id
), mp
->m_super
->s_dev
,
589 * This reserves disk blocks and inodes against a dquot.
590 * Flags indicate if the dquot is to be locked here and also
591 * if the blk reservation is for RT or regular blocks.
592 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
603 xfs_qcnt_t hardlimit
;
604 xfs_qcnt_t softlimit
;
606 xfs_qwarncnt_t warns
;
607 xfs_qwarncnt_t warnlimit
;
609 xfs_qcnt_t
*resbcountp
;
610 xfs_quotainfo_t
*q
= mp
->m_quotainfo
;
615 if (flags
& XFS_TRANS_DQ_RES_BLKS
) {
616 hardlimit
= be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
);
618 hardlimit
= q
->qi_bhardlimit
;
619 softlimit
= be64_to_cpu(dqp
->q_core
.d_blk_softlimit
);
621 softlimit
= q
->qi_bsoftlimit
;
622 timer
= be32_to_cpu(dqp
->q_core
.d_btimer
);
623 warns
= be16_to_cpu(dqp
->q_core
.d_bwarns
);
624 warnlimit
= dqp
->q_mount
->m_quotainfo
->qi_bwarnlimit
;
625 resbcountp
= &dqp
->q_res_bcount
;
627 ASSERT(flags
& XFS_TRANS_DQ_RES_RTBLKS
);
628 hardlimit
= be64_to_cpu(dqp
->q_core
.d_rtb_hardlimit
);
630 hardlimit
= q
->qi_rtbhardlimit
;
631 softlimit
= be64_to_cpu(dqp
->q_core
.d_rtb_softlimit
);
633 softlimit
= q
->qi_rtbsoftlimit
;
634 timer
= be32_to_cpu(dqp
->q_core
.d_rtbtimer
);
635 warns
= be16_to_cpu(dqp
->q_core
.d_rtbwarns
);
636 warnlimit
= dqp
->q_mount
->m_quotainfo
->qi_rtbwarnlimit
;
637 resbcountp
= &dqp
->q_res_rtbcount
;
640 if ((flags
& XFS_QMOPT_FORCE_RES
) == 0 &&
642 ((XFS_IS_UQUOTA_ENFORCED(dqp
->q_mount
) && XFS_QM_ISUDQ(dqp
)) ||
643 (XFS_IS_OQUOTA_ENFORCED(dqp
->q_mount
) &&
644 (XFS_QM_ISPDQ(dqp
) || XFS_QM_ISGDQ(dqp
))))) {
647 * dquot is locked already. See if we'd go over the
648 * hardlimit or exceed the timelimit if we allocate
651 if (hardlimit
> 0ULL &&
652 hardlimit
< nblks
+ *resbcountp
) {
653 xfs_quota_warn(mp
, dqp
, QUOTA_NL_BHARDWARN
);
656 if (softlimit
> 0ULL &&
657 softlimit
< nblks
+ *resbcountp
) {
658 if ((timer
!= 0 && get_seconds() > timer
) ||
659 (warns
!= 0 && warns
>= warnlimit
)) {
660 xfs_quota_warn(mp
, dqp
,
661 QUOTA_NL_BSOFTLONGWARN
);
665 xfs_quota_warn(mp
, dqp
, QUOTA_NL_BSOFTWARN
);
669 count
= be64_to_cpu(dqp
->q_core
.d_icount
);
670 timer
= be32_to_cpu(dqp
->q_core
.d_itimer
);
671 warns
= be16_to_cpu(dqp
->q_core
.d_iwarns
);
672 warnlimit
= dqp
->q_mount
->m_quotainfo
->qi_iwarnlimit
;
673 hardlimit
= be64_to_cpu(dqp
->q_core
.d_ino_hardlimit
);
675 hardlimit
= q
->qi_ihardlimit
;
676 softlimit
= be64_to_cpu(dqp
->q_core
.d_ino_softlimit
);
678 softlimit
= q
->qi_isoftlimit
;
680 if (hardlimit
> 0ULL &&
681 hardlimit
< ninos
+ count
) {
682 xfs_quota_warn(mp
, dqp
, QUOTA_NL_IHARDWARN
);
685 if (softlimit
> 0ULL &&
686 softlimit
< ninos
+ count
) {
687 if ((timer
!= 0 && get_seconds() > timer
) ||
688 (warns
!= 0 && warns
>= warnlimit
)) {
689 xfs_quota_warn(mp
, dqp
,
690 QUOTA_NL_ISOFTLONGWARN
);
693 xfs_quota_warn(mp
, dqp
, QUOTA_NL_ISOFTWARN
);
699 * Change the reservation, but not the actual usage.
700 * Note that q_res_bcount = q_core.d_bcount + resv
702 (*resbcountp
) += (xfs_qcnt_t
)nblks
;
704 dqp
->q_res_icount
+= (xfs_qcnt_t
)ninos
;
707 * note the reservation amt in the trans struct too,
708 * so that the transaction knows how much was reserved by
709 * it against this particular dquot.
710 * We don't do this when we are reserving for a delayed allocation,
711 * because we don't have the luxury of a transaction envelope then.
714 ASSERT(tp
->t_dqinfo
);
715 ASSERT(flags
& XFS_QMOPT_RESBLK_MASK
);
717 xfs_trans_mod_dquot(tp
, dqp
,
718 flags
& XFS_QMOPT_RESBLK_MASK
,
721 xfs_trans_mod_dquot(tp
, dqp
,
722 XFS_TRANS_DQ_RES_INOS
,
725 ASSERT(dqp
->q_res_bcount
>= be64_to_cpu(dqp
->q_core
.d_bcount
));
726 ASSERT(dqp
->q_res_rtbcount
>= be64_to_cpu(dqp
->q_core
.d_rtbcount
));
727 ASSERT(dqp
->q_res_icount
>= be64_to_cpu(dqp
->q_core
.d_icount
));
734 if (flags
& XFS_QMOPT_ENOSPC
)
741 * Given dquot(s), make disk block and/or inode reservations against them.
742 * The fact that this does the reservation against both the usr and
743 * grp/prj quotas is important, because this follows a both-or-nothing
746 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
747 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
748 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
749 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
750 * dquots are unlocked on return, if they were not locked by caller.
753 xfs_trans_reserve_quota_bydquots(
762 int resvd
= 0, error
;
764 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
767 if (tp
&& tp
->t_dqinfo
== NULL
)
768 xfs_trans_alloc_dqinfo(tp
);
770 ASSERT(flags
& XFS_QMOPT_RESBLK_MASK
);
773 error
= xfs_trans_dqresv(tp
, mp
, udqp
, nblks
, ninos
,
774 (flags
& ~XFS_QMOPT_ENOSPC
));
781 error
= xfs_trans_dqresv(tp
, mp
, gdqp
, nblks
, ninos
, flags
);
784 * can't do it, so backout previous reservation
787 flags
|= XFS_QMOPT_FORCE_RES
;
788 xfs_trans_dqresv(tp
, mp
, udqp
,
789 -nblks
, -ninos
, flags
);
796 * Didn't change anything critical, so, no need to log
803 * Lock the dquot and change the reservation if we can.
804 * This doesn't change the actual usage, just the reservation.
805 * The inode sent in is locked.
808 xfs_trans_reserve_quota_nblks(
809 struct xfs_trans
*tp
,
810 struct xfs_inode
*ip
,
815 struct xfs_mount
*mp
= ip
->i_mount
;
817 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
819 if (XFS_IS_PQUOTA_ON(mp
))
820 flags
|= XFS_QMOPT_ENOSPC
;
822 ASSERT(ip
->i_ino
!= mp
->m_sb
.sb_uquotino
);
823 ASSERT(ip
->i_ino
!= mp
->m_sb
.sb_gquotino
);
825 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
826 ASSERT((flags
& ~(XFS_QMOPT_FORCE_RES
| XFS_QMOPT_ENOSPC
)) ==
827 XFS_TRANS_DQ_RES_RTBLKS
||
828 (flags
& ~(XFS_QMOPT_FORCE_RES
| XFS_QMOPT_ENOSPC
)) ==
829 XFS_TRANS_DQ_RES_BLKS
);
832 * Reserve nblks against these dquots, with trans as the mediator.
834 return xfs_trans_reserve_quota_bydquots(tp
, mp
,
835 ip
->i_udquot
, ip
->i_gdquot
,
836 nblks
, ninos
, flags
);
840 * This routine is called to allocate a quotaoff log item.
843 xfs_trans_get_qoff_item(
845 xfs_qoff_logitem_t
*startqoff
,
848 xfs_qoff_logitem_t
*q
;
852 q
= xfs_qm_qoff_logitem_init(tp
->t_mountp
, startqoff
, flags
);
856 * Get a log_item_desc to point at the new item.
858 xfs_trans_add_item(tp
, &q
->qql_item
);
864 * This is called to mark the quotaoff logitem as needing
865 * to be logged when the transaction is committed. The logitem must
866 * already be associated with the given transaction.
869 xfs_trans_log_quotaoff_item(
871 xfs_qoff_logitem_t
*qlp
)
873 tp
->t_flags
|= XFS_TRANS_DIRTY
;
874 qlp
->qql_item
.li_desc
->lid_flags
|= XFS_LID_DIRTY
;
878 xfs_trans_alloc_dqinfo(
881 tp
->t_dqinfo
= kmem_zone_zalloc(xfs_Gqm
->qm_dqtrxzone
, KM_SLEEP
);
885 xfs_trans_free_dqinfo(
890 kmem_zone_free(xfs_Gqm
->qm_dqtrxzone
, tp
->t_dqinfo
);