2 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_itable.h"
40 #include "xfs_btree.h"
42 #include "xfs_rtalloc.h"
43 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_trans_priv.h"
50 STATIC
void xfs_trans_alloc_dqinfo(xfs_trans_t
*);
53 * Add the locked dquot to the transaction.
54 * The dquot must be locked, and it cannot be associated with any
64 ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp
, dqp
));
65 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
66 ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp
));
70 * Get a log_item_desc to point at the new item.
72 (void) xfs_trans_add_item(tp
, (xfs_log_item_t
*)(lp
));
75 * Initialize i_transp so we can later determine if this dquot is
76 * associated with this transaction.
83 * This is called to mark the dquot as needing
84 * to be logged when the transaction is committed. The dquot must
85 * already be associated with the given transaction.
86 * Note that it marks the entire transaction as dirty. In the ordinary
87 * case, this gets called via xfs_trans_commit, after the transaction
88 * is already dirty. However, there's nothing stop this from getting
89 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
97 xfs_log_item_desc_t
*lidp
;
99 ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp
, dqp
));
100 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
102 lidp
= xfs_trans_find_item(tp
, (xfs_log_item_t
*)(&dqp
->q_logitem
));
103 ASSERT(lidp
!= NULL
);
105 tp
->t_flags
|= XFS_TRANS_DIRTY
;
106 lidp
->lid_flags
|= XFS_LID_DIRTY
;
110 * Carry forward whatever is left of the quota blk reservation to
111 * the spanky new transaction
114 xfs_trans_dup_dqinfo(
118 xfs_dqtrx_t
*oq
, *nq
;
120 xfs_dqtrx_t
*oqa
, *nqa
;
125 xfs_trans_alloc_dqinfo(ntp
);
126 oqa
= otp
->t_dqinfo
->dqa_usrdquots
;
127 nqa
= ntp
->t_dqinfo
->dqa_usrdquots
;
130 * Because the quota blk reservation is carried forward,
131 * it is also necessary to carry forward the DQ_DIRTY flag.
133 if(otp
->t_flags
& XFS_TRANS_DQ_DIRTY
)
134 ntp
->t_flags
|= XFS_TRANS_DQ_DIRTY
;
136 for (j
= 0; j
< 2; j
++) {
137 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
138 if (oqa
[i
].qt_dquot
== NULL
)
143 nq
->qt_dquot
= oq
->qt_dquot
;
144 nq
->qt_bcount_delta
= nq
->qt_icount_delta
= 0;
145 nq
->qt_rtbcount_delta
= 0;
148 * Transfer whatever is left of the reservations.
150 nq
->qt_blk_res
= oq
->qt_blk_res
- oq
->qt_blk_res_used
;
151 oq
->qt_blk_res
= oq
->qt_blk_res_used
;
153 nq
->qt_rtblk_res
= oq
->qt_rtblk_res
-
154 oq
->qt_rtblk_res_used
;
155 oq
->qt_rtblk_res
= oq
->qt_rtblk_res_used
;
157 nq
->qt_ino_res
= oq
->qt_ino_res
- oq
->qt_ino_res_used
;
158 oq
->qt_ino_res
= oq
->qt_ino_res_used
;
161 oqa
= otp
->t_dqinfo
->dqa_grpdquots
;
162 nqa
= ntp
->t_dqinfo
->dqa_grpdquots
;
167 * Wrap around mod_dquot to account for both user and group quotas.
170 xfs_trans_mod_dquot_byino(
176 xfs_mount_t
*mp
= tp
->t_mountp
;
178 if (!XFS_IS_QUOTA_RUNNING(mp
) ||
179 !XFS_IS_QUOTA_ON(mp
) ||
180 ip
->i_ino
== mp
->m_sb
.sb_uquotino
||
181 ip
->i_ino
== mp
->m_sb
.sb_gquotino
)
184 if (tp
->t_dqinfo
== NULL
)
185 xfs_trans_alloc_dqinfo(tp
);
187 if (XFS_IS_UQUOTA_ON(mp
) && ip
->i_udquot
)
188 (void) xfs_trans_mod_dquot(tp
, ip
->i_udquot
, field
, delta
);
189 if (XFS_IS_OQUOTA_ON(mp
) && ip
->i_gdquot
)
190 (void) xfs_trans_mod_dquot(tp
, ip
->i_gdquot
, field
, delta
);
201 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
202 qa
= XFS_QM_DQP_TO_DQACCT(tp
, dqp
);
204 if (qa
[i
].qt_dquot
== NULL
||
205 qa
[i
].qt_dquot
== dqp
) {
214 * Make the changes in the transaction structure.
215 * The moral equivalent to xfs_trans_mod_sb().
216 * We don't touch any fields in the dquot, so we don't care
217 * if it's locked or not (most of the time it won't be).
229 ASSERT(XFS_IS_QUOTA_RUNNING(tp
->t_mountp
));
232 if (tp
->t_dqinfo
== NULL
)
233 xfs_trans_alloc_dqinfo(tp
);
235 * Find either the first free slot or the slot that belongs
238 qtrx
= xfs_trans_get_dqtrx(tp
, dqp
);
240 if (qtrx
->qt_dquot
== NULL
)
241 qtrx
->qt_dquot
= dqp
;
246 * regular disk blk reservation
248 case XFS_TRANS_DQ_RES_BLKS
:
249 qtrx
->qt_blk_res
+= (ulong
)delta
;
255 case XFS_TRANS_DQ_RES_INOS
:
256 qtrx
->qt_ino_res
+= (ulong
)delta
;
262 case XFS_TRANS_DQ_BCOUNT
:
263 if (qtrx
->qt_blk_res
&& delta
> 0) {
264 qtrx
->qt_blk_res_used
+= (ulong
)delta
;
265 ASSERT(qtrx
->qt_blk_res
>= qtrx
->qt_blk_res_used
);
267 qtrx
->qt_bcount_delta
+= delta
;
270 case XFS_TRANS_DQ_DELBCOUNT
:
271 qtrx
->qt_delbcnt_delta
+= delta
;
277 case XFS_TRANS_DQ_ICOUNT
:
278 if (qtrx
->qt_ino_res
&& delta
> 0) {
279 qtrx
->qt_ino_res_used
+= (ulong
)delta
;
280 ASSERT(qtrx
->qt_ino_res
>= qtrx
->qt_ino_res_used
);
282 qtrx
->qt_icount_delta
+= delta
;
288 case XFS_TRANS_DQ_RES_RTBLKS
:
289 qtrx
->qt_rtblk_res
+= (ulong
)delta
;
295 case XFS_TRANS_DQ_RTBCOUNT
:
296 if (qtrx
->qt_rtblk_res
&& delta
> 0) {
297 qtrx
->qt_rtblk_res_used
+= (ulong
)delta
;
298 ASSERT(qtrx
->qt_rtblk_res
>= qtrx
->qt_rtblk_res_used
);
300 qtrx
->qt_rtbcount_delta
+= delta
;
303 case XFS_TRANS_DQ_DELRTBCOUNT
:
304 qtrx
->qt_delrtb_delta
+= delta
;
310 tp
->t_flags
|= XFS_TRANS_DQ_DIRTY
;
315 * Given an array of dqtrx structures, lock all the dquots associated
316 * and join them to the transaction, provided they have been modified.
317 * We know that the highest number of dquots (of one type - usr OR grp),
318 * involved in a transaction is 2 and that both usr and grp combined - 3.
319 * So, we don't attempt to make this very generic.
322 xfs_trans_dqlockedjoin(
326 ASSERT(q
[0].qt_dquot
!= NULL
);
327 if (q
[1].qt_dquot
== NULL
) {
328 xfs_dqlock(q
[0].qt_dquot
);
329 xfs_trans_dqjoin(tp
, q
[0].qt_dquot
);
331 ASSERT(XFS_QM_TRANS_MAXDQS
== 2);
332 xfs_dqlock2(q
[0].qt_dquot
, q
[1].qt_dquot
);
333 xfs_trans_dqjoin(tp
, q
[0].qt_dquot
);
334 xfs_trans_dqjoin(tp
, q
[1].qt_dquot
);
340 * Called by xfs_trans_commit() and similar in spirit to
341 * xfs_trans_apply_sb_deltas().
342 * Go thru all the dquots belonging to this transaction and modify the
343 * INCORE dquot to reflect the actual usages.
344 * Unreserve just the reservations done by this transaction.
345 * dquot is still left locked at exit.
348 xfs_trans_apply_dquot_deltas(
353 xfs_dqtrx_t
*qtrx
, *qa
;
358 if (!(tp
->t_flags
& XFS_TRANS_DQ_DIRTY
))
361 ASSERT(tp
->t_dqinfo
);
362 qa
= tp
->t_dqinfo
->dqa_usrdquots
;
363 for (j
= 0; j
< 2; j
++) {
364 if (qa
[0].qt_dquot
== NULL
) {
365 qa
= tp
->t_dqinfo
->dqa_grpdquots
;
370 * Lock all of the dquots and join them to the transaction.
372 xfs_trans_dqlockedjoin(tp
, qa
);
374 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
377 * The array of dquots is filled
378 * sequentially, not sparsely.
380 if ((dqp
= qtrx
->qt_dquot
) == NULL
)
383 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
384 ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp
, dqp
));
387 * adjust the actual number of blocks used
392 * The issue here is - sometimes we don't make a blkquota
393 * reservation intentionally to be fair to users
394 * (when the amount is small). On the other hand,
395 * delayed allocs do make reservations, but that's
396 * outside of a transaction, so we have no
397 * idea how much was really reserved.
398 * So, here we've accumulated delayed allocation blks and
399 * non-delay blks. The assumption is that the
400 * delayed ones are always reserved (outside of a
401 * transaction), and the others may or may not have
402 * quota reservations.
404 totalbdelta
= qtrx
->qt_bcount_delta
+
405 qtrx
->qt_delbcnt_delta
;
406 totalrtbdelta
= qtrx
->qt_rtbcount_delta
+
407 qtrx
->qt_delrtb_delta
;
410 ASSERT(be64_to_cpu(d
->d_bcount
) >=
411 (xfs_qcnt_t
) -totalbdelta
);
413 if (totalrtbdelta
< 0)
414 ASSERT(be64_to_cpu(d
->d_rtbcount
) >=
415 (xfs_qcnt_t
) -totalrtbdelta
);
417 if (qtrx
->qt_icount_delta
< 0)
418 ASSERT(be64_to_cpu(d
->d_icount
) >=
419 (xfs_qcnt_t
) -qtrx
->qt_icount_delta
);
422 be64_add_cpu(&d
->d_bcount
, (xfs_qcnt_t
)totalbdelta
);
424 if (qtrx
->qt_icount_delta
)
425 be64_add_cpu(&d
->d_icount
, (xfs_qcnt_t
)qtrx
->qt_icount_delta
);
428 be64_add_cpu(&d
->d_rtbcount
, (xfs_qcnt_t
)totalrtbdelta
);
431 * Get any default limits in use.
432 * Start/reset the timer(s) if needed.
435 xfs_qm_adjust_dqlimits(tp
->t_mountp
, d
);
436 xfs_qm_adjust_dqtimers(tp
->t_mountp
, d
);
439 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
441 * add this to the list of items to get logged
443 xfs_trans_log_dquot(tp
, dqp
);
445 * Take off what's left of the original reservation.
446 * In case of delayed allocations, there's no
447 * reservation that a transaction structure knows of.
449 if (qtrx
->qt_blk_res
!= 0) {
450 if (qtrx
->qt_blk_res
!= qtrx
->qt_blk_res_used
) {
451 if (qtrx
->qt_blk_res
>
452 qtrx
->qt_blk_res_used
)
453 dqp
->q_res_bcount
-= (xfs_qcnt_t
)
455 qtrx
->qt_blk_res_used
);
457 dqp
->q_res_bcount
-= (xfs_qcnt_t
)
458 (qtrx
->qt_blk_res_used
-
463 * These blks were never reserved, either inside
464 * a transaction or outside one (in a delayed
465 * allocation). Also, this isn't always a
466 * negative number since we sometimes
467 * deliberately skip quota reservations.
469 if (qtrx
->qt_bcount_delta
) {
471 (xfs_qcnt_t
)qtrx
->qt_bcount_delta
;
475 * Adjust the RT reservation.
477 if (qtrx
->qt_rtblk_res
!= 0) {
478 if (qtrx
->qt_rtblk_res
!= qtrx
->qt_rtblk_res_used
) {
479 if (qtrx
->qt_rtblk_res
>
480 qtrx
->qt_rtblk_res_used
)
481 dqp
->q_res_rtbcount
-= (xfs_qcnt_t
)
482 (qtrx
->qt_rtblk_res
-
483 qtrx
->qt_rtblk_res_used
);
485 dqp
->q_res_rtbcount
-= (xfs_qcnt_t
)
486 (qtrx
->qt_rtblk_res_used
-
490 if (qtrx
->qt_rtbcount_delta
)
491 dqp
->q_res_rtbcount
+=
492 (xfs_qcnt_t
)qtrx
->qt_rtbcount_delta
;
496 * Adjust the inode reservation.
498 if (qtrx
->qt_ino_res
!= 0) {
499 ASSERT(qtrx
->qt_ino_res
>=
500 qtrx
->qt_ino_res_used
);
501 if (qtrx
->qt_ino_res
> qtrx
->qt_ino_res_used
)
502 dqp
->q_res_icount
-= (xfs_qcnt_t
)
504 qtrx
->qt_ino_res_used
);
506 if (qtrx
->qt_icount_delta
)
508 (xfs_qcnt_t
)qtrx
->qt_icount_delta
;
511 ASSERT(dqp
->q_res_bcount
>=
512 be64_to_cpu(dqp
->q_core
.d_bcount
));
513 ASSERT(dqp
->q_res_icount
>=
514 be64_to_cpu(dqp
->q_core
.d_icount
));
515 ASSERT(dqp
->q_res_rtbcount
>=
516 be64_to_cpu(dqp
->q_core
.d_rtbcount
));
519 * Do the group quotas next
521 qa
= tp
->t_dqinfo
->dqa_grpdquots
;
526 * Release the reservations, and adjust the dquots accordingly.
527 * This is called only when the transaction is being aborted. If by
528 * any chance we have done dquot modifications incore (ie. deltas) already,
529 * we simply throw those away, since that's the expected behavior
530 * when a transaction is curtailed without a commit.
533 xfs_trans_unreserve_and_mod_dquots(
538 xfs_dqtrx_t
*qtrx
, *qa
;
541 if (!tp
->t_dqinfo
|| !(tp
->t_flags
& XFS_TRANS_DQ_DIRTY
))
544 qa
= tp
->t_dqinfo
->dqa_usrdquots
;
546 for (j
= 0; j
< 2; j
++) {
547 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
550 * We assume that the array of dquots is filled
551 * sequentially, not sparsely.
553 if ((dqp
= qtrx
->qt_dquot
) == NULL
)
556 * Unreserve the original reservation. We don't care
557 * about the number of blocks used field, or deltas.
558 * Also we don't bother to zero the fields.
561 if (qtrx
->qt_blk_res
) {
565 (xfs_qcnt_t
)qtrx
->qt_blk_res
;
567 if (qtrx
->qt_ino_res
) {
573 (xfs_qcnt_t
)qtrx
->qt_ino_res
;
576 if (qtrx
->qt_rtblk_res
) {
581 dqp
->q_res_rtbcount
-=
582 (xfs_qcnt_t
)qtrx
->qt_rtblk_res
;
588 qa
= tp
->t_dqinfo
->dqa_grpdquots
;
593 xfs_quota_error(uint flags
)
595 if (flags
& XFS_QMOPT_ENOSPC
)
601 * This reserves disk blocks and inodes against a dquot.
602 * Flags indicate if the dquot is to be locked here and also
603 * if the blk reservation is for RT or regular blocks.
604 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
616 xfs_qcnt_t hardlimit
;
617 xfs_qcnt_t softlimit
;
619 xfs_qwarncnt_t warns
;
620 xfs_qwarncnt_t warnlimit
;
622 xfs_qcnt_t
*resbcountp
;
623 xfs_quotainfo_t
*q
= mp
->m_quotainfo
;
628 if (flags
& XFS_TRANS_DQ_RES_BLKS
) {
629 hardlimit
= be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
);
631 hardlimit
= q
->qi_bhardlimit
;
632 softlimit
= be64_to_cpu(dqp
->q_core
.d_blk_softlimit
);
634 softlimit
= q
->qi_bsoftlimit
;
635 timer
= be32_to_cpu(dqp
->q_core
.d_btimer
);
636 warns
= be16_to_cpu(dqp
->q_core
.d_bwarns
);
637 warnlimit
= XFS_QI_BWARNLIMIT(dqp
->q_mount
);
638 resbcountp
= &dqp
->q_res_bcount
;
640 ASSERT(flags
& XFS_TRANS_DQ_RES_RTBLKS
);
641 hardlimit
= be64_to_cpu(dqp
->q_core
.d_rtb_hardlimit
);
643 hardlimit
= q
->qi_rtbhardlimit
;
644 softlimit
= be64_to_cpu(dqp
->q_core
.d_rtb_softlimit
);
646 softlimit
= q
->qi_rtbsoftlimit
;
647 timer
= be32_to_cpu(dqp
->q_core
.d_rtbtimer
);
648 warns
= be16_to_cpu(dqp
->q_core
.d_rtbwarns
);
649 warnlimit
= XFS_QI_RTBWARNLIMIT(dqp
->q_mount
);
650 resbcountp
= &dqp
->q_res_rtbcount
;
654 if ((flags
& XFS_QMOPT_FORCE_RES
) == 0 &&
656 ((XFS_IS_UQUOTA_ENFORCED(dqp
->q_mount
) && XFS_QM_ISUDQ(dqp
)) ||
657 (XFS_IS_OQUOTA_ENFORCED(dqp
->q_mount
) &&
658 (XFS_QM_ISPDQ(dqp
) || XFS_QM_ISGDQ(dqp
))))) {
660 cmn_err(CE_DEBUG
, "BLK Res: nblks=%ld + resbcount=%Ld"
661 " > hardlimit=%Ld?", nblks
, *resbcountp
, hardlimit
);
665 * dquot is locked already. See if we'd go over the
666 * hardlimit or exceed the timelimit if we allocate
669 if (hardlimit
> 0ULL &&
670 (hardlimit
<= nblks
+ *resbcountp
)) {
671 error
= xfs_quota_error(flags
);
675 if (softlimit
> 0ULL &&
676 (softlimit
<= nblks
+ *resbcountp
)) {
677 if ((timer
!= 0 && get_seconds() > timer
) ||
678 (warns
!= 0 && warns
>= warnlimit
)) {
679 error
= xfs_quota_error(flags
);
685 count
= be64_to_cpu(dqp
->q_core
.d_icount
);
686 timer
= be32_to_cpu(dqp
->q_core
.d_itimer
);
687 warns
= be16_to_cpu(dqp
->q_core
.d_iwarns
);
688 warnlimit
= XFS_QI_IWARNLIMIT(dqp
->q_mount
);
689 hardlimit
= be64_to_cpu(dqp
->q_core
.d_ino_hardlimit
);
691 hardlimit
= q
->qi_ihardlimit
;
692 softlimit
= be64_to_cpu(dqp
->q_core
.d_ino_softlimit
);
694 softlimit
= q
->qi_isoftlimit
;
695 if (hardlimit
> 0ULL && count
>= hardlimit
) {
696 error
= xfs_quota_error(flags
);
698 } else if (softlimit
> 0ULL && count
>= softlimit
) {
699 if ((timer
!= 0 && get_seconds() > timer
) ||
700 (warns
!= 0 && warns
>= warnlimit
)) {
701 error
= xfs_quota_error(flags
);
709 * Change the reservation, but not the actual usage.
710 * Note that q_res_bcount = q_core.d_bcount + resv
712 (*resbcountp
) += (xfs_qcnt_t
)nblks
;
714 dqp
->q_res_icount
+= (xfs_qcnt_t
)ninos
;
717 * note the reservation amt in the trans struct too,
718 * so that the transaction knows how much was reserved by
719 * it against this particular dquot.
720 * We don't do this when we are reserving for a delayed allocation,
721 * because we don't have the luxury of a transaction envelope then.
724 ASSERT(tp
->t_dqinfo
);
725 ASSERT(flags
& XFS_QMOPT_RESBLK_MASK
);
727 xfs_trans_mod_dquot(tp
, dqp
,
728 flags
& XFS_QMOPT_RESBLK_MASK
,
731 xfs_trans_mod_dquot(tp
, dqp
,
732 XFS_TRANS_DQ_RES_INOS
,
735 ASSERT(dqp
->q_res_bcount
>= be64_to_cpu(dqp
->q_core
.d_bcount
));
736 ASSERT(dqp
->q_res_rtbcount
>= be64_to_cpu(dqp
->q_core
.d_rtbcount
));
737 ASSERT(dqp
->q_res_icount
>= be64_to_cpu(dqp
->q_core
.d_icount
));
746 * Given dquot(s), make disk block and/or inode reservations against them.
747 * The fact that this does the reservation against both the usr and
748 * grp/prj quotas is important, because this follows a both-or-nothing
751 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
752 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
753 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
754 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
755 * dquots are unlocked on return, if they were not locked by caller.
758 xfs_trans_reserve_quota_bydquots(
767 int resvd
= 0, error
;
769 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
772 if (tp
&& tp
->t_dqinfo
== NULL
)
773 xfs_trans_alloc_dqinfo(tp
);
775 ASSERT(flags
& XFS_QMOPT_RESBLK_MASK
);
778 error
= xfs_trans_dqresv(tp
, mp
, udqp
, nblks
, ninos
,
779 (flags
& ~XFS_QMOPT_ENOSPC
));
786 error
= xfs_trans_dqresv(tp
, mp
, gdqp
, nblks
, ninos
, flags
);
789 * can't do it, so backout previous reservation
792 flags
|= XFS_QMOPT_FORCE_RES
;
793 xfs_trans_dqresv(tp
, mp
, udqp
,
794 -nblks
, -ninos
, flags
);
801 * Didn't change anything critical, so, no need to log
808 * Lock the dquot and change the reservation if we can.
809 * This doesn't change the actual usage, just the reservation.
810 * The inode sent in is locked.
813 xfs_trans_reserve_quota_nblks(
814 struct xfs_trans
*tp
,
815 struct xfs_inode
*ip
,
820 struct xfs_mount
*mp
= ip
->i_mount
;
822 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
824 if (XFS_IS_PQUOTA_ON(mp
))
825 flags
|= XFS_QMOPT_ENOSPC
;
827 ASSERT(ip
->i_ino
!= mp
->m_sb
.sb_uquotino
);
828 ASSERT(ip
->i_ino
!= mp
->m_sb
.sb_gquotino
);
830 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
831 ASSERT((flags
& ~(XFS_QMOPT_FORCE_RES
| XFS_QMOPT_ENOSPC
)) ==
832 XFS_TRANS_DQ_RES_RTBLKS
||
833 (flags
& ~(XFS_QMOPT_FORCE_RES
| XFS_QMOPT_ENOSPC
)) ==
834 XFS_TRANS_DQ_RES_BLKS
);
837 * Reserve nblks against these dquots, with trans as the mediator.
839 return xfs_trans_reserve_quota_bydquots(tp
, mp
,
840 ip
->i_udquot
, ip
->i_gdquot
,
841 nblks
, ninos
, flags
);
845 * This routine is called to allocate a quotaoff log item.
848 xfs_trans_get_qoff_item(
850 xfs_qoff_logitem_t
*startqoff
,
853 xfs_qoff_logitem_t
*q
;
857 q
= xfs_qm_qoff_logitem_init(tp
->t_mountp
, startqoff
, flags
);
861 * Get a log_item_desc to point at the new item.
863 (void) xfs_trans_add_item(tp
, (xfs_log_item_t
*)q
);
870 * This is called to mark the quotaoff logitem as needing
871 * to be logged when the transaction is committed. The logitem must
872 * already be associated with the given transaction.
875 xfs_trans_log_quotaoff_item(
877 xfs_qoff_logitem_t
*qlp
)
879 xfs_log_item_desc_t
*lidp
;
881 lidp
= xfs_trans_find_item(tp
, (xfs_log_item_t
*)qlp
);
882 ASSERT(lidp
!= NULL
);
884 tp
->t_flags
|= XFS_TRANS_DIRTY
;
885 lidp
->lid_flags
|= XFS_LID_DIRTY
;
889 xfs_trans_alloc_dqinfo(
892 tp
->t_dqinfo
= kmem_zone_zalloc(xfs_Gqm
->qm_dqtrxzone
, KM_SLEEP
);
896 xfs_trans_free_dqinfo(
901 kmem_zone_free(xfs_Gqm
->qm_dqtrxzone
, tp
->t_dqinfo
);