1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_error.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_quota.h"
20 STATIC
void xfs_trans_alloc_dqinfo(xfs_trans_t
*);
23 * Add the locked dquot to the transaction.
24 * The dquot must be locked, and it cannot be associated with any
32 ASSERT(dqp
->q_transp
!= tp
);
33 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
34 ASSERT(dqp
->q_logitem
.qli_dquot
== dqp
);
37 * Get a log_item_desc to point at the new item.
39 xfs_trans_add_item(tp
, &dqp
->q_logitem
.qli_item
);
42 * Initialize d_transp so we can later determine if this dquot is
43 * associated with this transaction.
50 * This is called to mark the dquot as needing
51 * to be logged when the transaction is committed. The dquot must
52 * already be associated with the given transaction.
53 * Note that it marks the entire transaction as dirty. In the ordinary
54 * case, this gets called via xfs_trans_commit, after the transaction
55 * is already dirty. However, there's nothing stop this from getting
56 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
64 ASSERT(dqp
->q_transp
== tp
);
65 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
67 tp
->t_flags
|= XFS_TRANS_DIRTY
;
68 set_bit(XFS_LI_DIRTY
, &dqp
->q_logitem
.qli_item
.li_flags
);
72 * Carry forward whatever is left of the quota blk reservation to
73 * the spanky new transaction
82 xfs_dqtrx_t
*oqa
, *nqa
;
88 xfs_trans_alloc_dqinfo(ntp
);
91 * Because the quota blk reservation is carried forward,
92 * it is also necessary to carry forward the DQ_DIRTY flag.
94 if (otp
->t_flags
& XFS_TRANS_DQ_DIRTY
)
95 ntp
->t_flags
|= XFS_TRANS_DQ_DIRTY
;
97 for (j
= 0; j
< XFS_QM_TRANS_DQTYPES
; j
++) {
98 oqa
= otp
->t_dqinfo
->dqs
[j
];
99 nqa
= ntp
->t_dqinfo
->dqs
[j
];
100 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
103 if (oqa
[i
].qt_dquot
== NULL
)
108 if (oq
->qt_blk_res
&& oq
->qt_bcount_delta
> 0)
109 blk_res_used
= oq
->qt_bcount_delta
;
111 nq
->qt_dquot
= oq
->qt_dquot
;
112 nq
->qt_bcount_delta
= nq
->qt_icount_delta
= 0;
113 nq
->qt_rtbcount_delta
= 0;
116 * Transfer whatever is left of the reservations.
118 nq
->qt_blk_res
= oq
->qt_blk_res
- blk_res_used
;
119 oq
->qt_blk_res
= blk_res_used
;
121 nq
->qt_rtblk_res
= oq
->qt_rtblk_res
-
122 oq
->qt_rtblk_res_used
;
123 oq
->qt_rtblk_res
= oq
->qt_rtblk_res_used
;
125 nq
->qt_ino_res
= oq
->qt_ino_res
- oq
->qt_ino_res_used
;
126 oq
->qt_ino_res
= oq
->qt_ino_res_used
;
133 * Wrap around mod_dquot to account for both user and group quotas.
136 xfs_trans_mod_dquot_byino(
142 xfs_mount_t
*mp
= tp
->t_mountp
;
144 if (!XFS_IS_QUOTA_RUNNING(mp
) ||
145 !XFS_IS_QUOTA_ON(mp
) ||
146 xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
149 if (tp
->t_dqinfo
== NULL
)
150 xfs_trans_alloc_dqinfo(tp
);
152 if (XFS_IS_UQUOTA_ON(mp
) && ip
->i_udquot
)
153 (void) xfs_trans_mod_dquot(tp
, ip
->i_udquot
, field
, delta
);
154 if (XFS_IS_GQUOTA_ON(mp
) && ip
->i_gdquot
)
155 (void) xfs_trans_mod_dquot(tp
, ip
->i_gdquot
, field
, delta
);
156 if (XFS_IS_PQUOTA_ON(mp
) && ip
->i_pdquot
)
157 (void) xfs_trans_mod_dquot(tp
, ip
->i_pdquot
, field
, delta
);
160 STATIC
struct xfs_dqtrx
*
162 struct xfs_trans
*tp
,
163 struct xfs_dquot
*dqp
)
166 struct xfs_dqtrx
*qa
;
168 if (XFS_QM_ISUDQ(dqp
))
169 qa
= tp
->t_dqinfo
->dqs
[XFS_QM_TRANS_USR
];
170 else if (XFS_QM_ISGDQ(dqp
))
171 qa
= tp
->t_dqinfo
->dqs
[XFS_QM_TRANS_GRP
];
172 else if (XFS_QM_ISPDQ(dqp
))
173 qa
= tp
->t_dqinfo
->dqs
[XFS_QM_TRANS_PRJ
];
177 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
178 if (qa
[i
].qt_dquot
== NULL
||
179 qa
[i
].qt_dquot
== dqp
)
187 * Make the changes in the transaction structure.
188 * The moral equivalent to xfs_trans_mod_sb().
189 * We don't touch any fields in the dquot, so we don't care
190 * if it's locked or not (most of the time it won't be).
202 ASSERT(XFS_IS_QUOTA_RUNNING(tp
->t_mountp
));
205 if (tp
->t_dqinfo
== NULL
)
206 xfs_trans_alloc_dqinfo(tp
);
208 * Find either the first free slot or the slot that belongs
211 qtrx
= xfs_trans_get_dqtrx(tp
, dqp
);
213 if (qtrx
->qt_dquot
== NULL
)
214 qtrx
->qt_dquot
= dqp
;
219 * regular disk blk reservation
221 case XFS_TRANS_DQ_RES_BLKS
:
222 qtrx
->qt_blk_res
+= (ulong
)delta
;
228 case XFS_TRANS_DQ_RES_INOS
:
229 qtrx
->qt_ino_res
+= (ulong
)delta
;
235 case XFS_TRANS_DQ_BCOUNT
:
236 qtrx
->qt_bcount_delta
+= delta
;
239 case XFS_TRANS_DQ_DELBCOUNT
:
240 qtrx
->qt_delbcnt_delta
+= delta
;
246 case XFS_TRANS_DQ_ICOUNT
:
247 if (qtrx
->qt_ino_res
&& delta
> 0) {
248 qtrx
->qt_ino_res_used
+= (ulong
)delta
;
249 ASSERT(qtrx
->qt_ino_res
>= qtrx
->qt_ino_res_used
);
251 qtrx
->qt_icount_delta
+= delta
;
257 case XFS_TRANS_DQ_RES_RTBLKS
:
258 qtrx
->qt_rtblk_res
+= (ulong
)delta
;
264 case XFS_TRANS_DQ_RTBCOUNT
:
265 if (qtrx
->qt_rtblk_res
&& delta
> 0) {
266 qtrx
->qt_rtblk_res_used
+= (ulong
)delta
;
267 ASSERT(qtrx
->qt_rtblk_res
>= qtrx
->qt_rtblk_res_used
);
269 qtrx
->qt_rtbcount_delta
+= delta
;
272 case XFS_TRANS_DQ_DELRTBCOUNT
:
273 qtrx
->qt_delrtb_delta
+= delta
;
279 tp
->t_flags
|= XFS_TRANS_DQ_DIRTY
;
284 * Given an array of dqtrx structures, lock all the dquots associated and join
285 * them to the transaction, provided they have been modified. We know that the
286 * highest number of dquots of one type - usr, grp and prj - involved in a
287 * transaction is 3 so we don't need to make this very generic.
290 xfs_trans_dqlockedjoin(
294 ASSERT(q
[0].qt_dquot
!= NULL
);
295 if (q
[1].qt_dquot
== NULL
) {
296 xfs_dqlock(q
[0].qt_dquot
);
297 xfs_trans_dqjoin(tp
, q
[0].qt_dquot
);
299 ASSERT(XFS_QM_TRANS_MAXDQS
== 2);
300 xfs_dqlock2(q
[0].qt_dquot
, q
[1].qt_dquot
);
301 xfs_trans_dqjoin(tp
, q
[0].qt_dquot
);
302 xfs_trans_dqjoin(tp
, q
[1].qt_dquot
);
308 * Called by xfs_trans_commit() and similar in spirit to
309 * xfs_trans_apply_sb_deltas().
310 * Go thru all the dquots belonging to this transaction and modify the
311 * INCORE dquot to reflect the actual usages.
312 * Unreserve just the reservations done by this transaction.
313 * dquot is still left locked at exit.
316 xfs_trans_apply_dquot_deltas(
317 struct xfs_trans
*tp
)
320 struct xfs_dquot
*dqp
;
321 struct xfs_dqtrx
*qtrx
, *qa
;
322 struct xfs_disk_dquot
*d
;
326 if (!(tp
->t_flags
& XFS_TRANS_DQ_DIRTY
))
329 ASSERT(tp
->t_dqinfo
);
330 for (j
= 0; j
< XFS_QM_TRANS_DQTYPES
; j
++) {
331 qa
= tp
->t_dqinfo
->dqs
[j
];
332 if (qa
[0].qt_dquot
== NULL
)
336 * Lock all of the dquots and join them to the transaction.
338 xfs_trans_dqlockedjoin(tp
, qa
);
340 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
343 * The array of dquots is filled
344 * sequentially, not sparsely.
346 if ((dqp
= qtrx
->qt_dquot
) == NULL
)
349 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
350 ASSERT(dqp
->q_transp
== tp
);
353 * adjust the actual number of blocks used
358 * The issue here is - sometimes we don't make a blkquota
359 * reservation intentionally to be fair to users
360 * (when the amount is small). On the other hand,
361 * delayed allocs do make reservations, but that's
362 * outside of a transaction, so we have no
363 * idea how much was really reserved.
364 * So, here we've accumulated delayed allocation blks and
365 * non-delay blks. The assumption is that the
366 * delayed ones are always reserved (outside of a
367 * transaction), and the others may or may not have
368 * quota reservations.
370 totalbdelta
= qtrx
->qt_bcount_delta
+
371 qtrx
->qt_delbcnt_delta
;
372 totalrtbdelta
= qtrx
->qt_rtbcount_delta
+
373 qtrx
->qt_delrtb_delta
;
376 ASSERT(be64_to_cpu(d
->d_bcount
) >=
379 if (totalrtbdelta
< 0)
380 ASSERT(be64_to_cpu(d
->d_rtbcount
) >=
383 if (qtrx
->qt_icount_delta
< 0)
384 ASSERT(be64_to_cpu(d
->d_icount
) >=
385 -qtrx
->qt_icount_delta
);
388 be64_add_cpu(&d
->d_bcount
, (xfs_qcnt_t
)totalbdelta
);
390 if (qtrx
->qt_icount_delta
)
391 be64_add_cpu(&d
->d_icount
, (xfs_qcnt_t
)qtrx
->qt_icount_delta
);
394 be64_add_cpu(&d
->d_rtbcount
, (xfs_qcnt_t
)totalrtbdelta
);
397 * Get any default limits in use.
398 * Start/reset the timer(s) if needed.
401 xfs_qm_adjust_dqlimits(tp
->t_mountp
, dqp
);
402 xfs_qm_adjust_dqtimers(tp
->t_mountp
, d
);
405 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
407 * add this to the list of items to get logged
409 xfs_trans_log_dquot(tp
, dqp
);
411 * Take off what's left of the original reservation.
412 * In case of delayed allocations, there's no
413 * reservation that a transaction structure knows of.
415 if (qtrx
->qt_blk_res
!= 0) {
416 ulong blk_res_used
= 0;
418 if (qtrx
->qt_bcount_delta
> 0)
419 blk_res_used
= qtrx
->qt_bcount_delta
;
421 if (qtrx
->qt_blk_res
!= blk_res_used
) {
422 if (qtrx
->qt_blk_res
> blk_res_used
)
423 dqp
->q_res_bcount
-= (xfs_qcnt_t
)
427 dqp
->q_res_bcount
-= (xfs_qcnt_t
)
433 * These blks were never reserved, either inside
434 * a transaction or outside one (in a delayed
435 * allocation). Also, this isn't always a
436 * negative number since we sometimes
437 * deliberately skip quota reservations.
439 if (qtrx
->qt_bcount_delta
) {
441 (xfs_qcnt_t
)qtrx
->qt_bcount_delta
;
445 * Adjust the RT reservation.
447 if (qtrx
->qt_rtblk_res
!= 0) {
448 if (qtrx
->qt_rtblk_res
!= qtrx
->qt_rtblk_res_used
) {
449 if (qtrx
->qt_rtblk_res
>
450 qtrx
->qt_rtblk_res_used
)
451 dqp
->q_res_rtbcount
-= (xfs_qcnt_t
)
452 (qtrx
->qt_rtblk_res
-
453 qtrx
->qt_rtblk_res_used
);
455 dqp
->q_res_rtbcount
-= (xfs_qcnt_t
)
456 (qtrx
->qt_rtblk_res_used
-
460 if (qtrx
->qt_rtbcount_delta
)
461 dqp
->q_res_rtbcount
+=
462 (xfs_qcnt_t
)qtrx
->qt_rtbcount_delta
;
466 * Adjust the inode reservation.
468 if (qtrx
->qt_ino_res
!= 0) {
469 ASSERT(qtrx
->qt_ino_res
>=
470 qtrx
->qt_ino_res_used
);
471 if (qtrx
->qt_ino_res
> qtrx
->qt_ino_res_used
)
472 dqp
->q_res_icount
-= (xfs_qcnt_t
)
474 qtrx
->qt_ino_res_used
);
476 if (qtrx
->qt_icount_delta
)
478 (xfs_qcnt_t
)qtrx
->qt_icount_delta
;
481 ASSERT(dqp
->q_res_bcount
>=
482 be64_to_cpu(dqp
->q_core
.d_bcount
));
483 ASSERT(dqp
->q_res_icount
>=
484 be64_to_cpu(dqp
->q_core
.d_icount
));
485 ASSERT(dqp
->q_res_rtbcount
>=
486 be64_to_cpu(dqp
->q_core
.d_rtbcount
));
492 * Release the reservations, and adjust the dquots accordingly.
493 * This is called only when the transaction is being aborted. If by
494 * any chance we have done dquot modifications incore (ie. deltas) already,
495 * we simply throw those away, since that's the expected behavior
496 * when a transaction is curtailed without a commit.
499 xfs_trans_unreserve_and_mod_dquots(
504 xfs_dqtrx_t
*qtrx
, *qa
;
507 if (!tp
->t_dqinfo
|| !(tp
->t_flags
& XFS_TRANS_DQ_DIRTY
))
510 for (j
= 0; j
< XFS_QM_TRANS_DQTYPES
; j
++) {
511 qa
= tp
->t_dqinfo
->dqs
[j
];
513 for (i
= 0; i
< XFS_QM_TRANS_MAXDQS
; i
++) {
516 * We assume that the array of dquots is filled
517 * sequentially, not sparsely.
519 if ((dqp
= qtrx
->qt_dquot
) == NULL
)
522 * Unreserve the original reservation. We don't care
523 * about the number of blocks used field, or deltas.
524 * Also we don't bother to zero the fields.
527 if (qtrx
->qt_blk_res
) {
531 (xfs_qcnt_t
)qtrx
->qt_blk_res
;
533 if (qtrx
->qt_ino_res
) {
539 (xfs_qcnt_t
)qtrx
->qt_ino_res
;
542 if (qtrx
->qt_rtblk_res
) {
547 dqp
->q_res_rtbcount
-=
548 (xfs_qcnt_t
)qtrx
->qt_rtblk_res
;
559 struct xfs_mount
*mp
,
560 struct xfs_dquot
*dqp
,
563 enum quota_type qtype
;
565 if (dqp
->dq_flags
& XFS_DQ_PROJ
)
567 else if (dqp
->dq_flags
& XFS_DQ_USER
)
572 quota_send_warning(make_kqid(&init_user_ns
, qtype
,
573 be32_to_cpu(dqp
->q_core
.d_id
)),
574 mp
->m_super
->s_dev
, type
);
578 * This reserves disk blocks and inodes against a dquot.
579 * Flags indicate if the dquot is to be locked here and also
580 * if the blk reservation is for RT or regular blocks.
581 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
592 xfs_qcnt_t hardlimit
;
593 xfs_qcnt_t softlimit
;
595 xfs_qwarncnt_t warns
;
596 xfs_qwarncnt_t warnlimit
;
597 xfs_qcnt_t total_count
;
598 xfs_qcnt_t
*resbcountp
;
599 xfs_quotainfo_t
*q
= mp
->m_quotainfo
;
600 struct xfs_def_quota
*defq
;
605 defq
= xfs_get_defquota(dqp
, q
);
607 if (flags
& XFS_TRANS_DQ_RES_BLKS
) {
608 hardlimit
= be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
);
610 hardlimit
= defq
->bhardlimit
;
611 softlimit
= be64_to_cpu(dqp
->q_core
.d_blk_softlimit
);
613 softlimit
= defq
->bsoftlimit
;
614 timer
= be32_to_cpu(dqp
->q_core
.d_btimer
);
615 warns
= be16_to_cpu(dqp
->q_core
.d_bwarns
);
616 warnlimit
= dqp
->q_mount
->m_quotainfo
->qi_bwarnlimit
;
617 resbcountp
= &dqp
->q_res_bcount
;
619 ASSERT(flags
& XFS_TRANS_DQ_RES_RTBLKS
);
620 hardlimit
= be64_to_cpu(dqp
->q_core
.d_rtb_hardlimit
);
622 hardlimit
= defq
->rtbhardlimit
;
623 softlimit
= be64_to_cpu(dqp
->q_core
.d_rtb_softlimit
);
625 softlimit
= defq
->rtbsoftlimit
;
626 timer
= be32_to_cpu(dqp
->q_core
.d_rtbtimer
);
627 warns
= be16_to_cpu(dqp
->q_core
.d_rtbwarns
);
628 warnlimit
= dqp
->q_mount
->m_quotainfo
->qi_rtbwarnlimit
;
629 resbcountp
= &dqp
->q_res_rtbcount
;
632 if ((flags
& XFS_QMOPT_FORCE_RES
) == 0 &&
634 ((XFS_IS_UQUOTA_ENFORCED(dqp
->q_mount
) && XFS_QM_ISUDQ(dqp
)) ||
635 (XFS_IS_GQUOTA_ENFORCED(dqp
->q_mount
) && XFS_QM_ISGDQ(dqp
)) ||
636 (XFS_IS_PQUOTA_ENFORCED(dqp
->q_mount
) && XFS_QM_ISPDQ(dqp
)))) {
639 * dquot is locked already. See if we'd go over the
640 * hardlimit or exceed the timelimit if we allocate
643 total_count
= *resbcountp
+ nblks
;
644 if (hardlimit
&& total_count
> hardlimit
) {
645 xfs_quota_warn(mp
, dqp
, QUOTA_NL_BHARDWARN
);
648 if (softlimit
&& total_count
> softlimit
) {
649 if ((timer
!= 0 && get_seconds() > timer
) ||
650 (warns
!= 0 && warns
>= warnlimit
)) {
651 xfs_quota_warn(mp
, dqp
,
652 QUOTA_NL_BSOFTLONGWARN
);
656 xfs_quota_warn(mp
, dqp
, QUOTA_NL_BSOFTWARN
);
660 total_count
= be64_to_cpu(dqp
->q_core
.d_icount
) + ninos
;
661 timer
= be32_to_cpu(dqp
->q_core
.d_itimer
);
662 warns
= be16_to_cpu(dqp
->q_core
.d_iwarns
);
663 warnlimit
= dqp
->q_mount
->m_quotainfo
->qi_iwarnlimit
;
664 hardlimit
= be64_to_cpu(dqp
->q_core
.d_ino_hardlimit
);
666 hardlimit
= defq
->ihardlimit
;
667 softlimit
= be64_to_cpu(dqp
->q_core
.d_ino_softlimit
);
669 softlimit
= defq
->isoftlimit
;
671 if (hardlimit
&& total_count
> hardlimit
) {
672 xfs_quota_warn(mp
, dqp
, QUOTA_NL_IHARDWARN
);
675 if (softlimit
&& total_count
> softlimit
) {
676 if ((timer
!= 0 && get_seconds() > timer
) ||
677 (warns
!= 0 && warns
>= warnlimit
)) {
678 xfs_quota_warn(mp
, dqp
,
679 QUOTA_NL_ISOFTLONGWARN
);
682 xfs_quota_warn(mp
, dqp
, QUOTA_NL_ISOFTWARN
);
688 * Change the reservation, but not the actual usage.
689 * Note that q_res_bcount = q_core.d_bcount + resv
691 (*resbcountp
) += (xfs_qcnt_t
)nblks
;
693 dqp
->q_res_icount
+= (xfs_qcnt_t
)ninos
;
696 * note the reservation amt in the trans struct too,
697 * so that the transaction knows how much was reserved by
698 * it against this particular dquot.
699 * We don't do this when we are reserving for a delayed allocation,
700 * because we don't have the luxury of a transaction envelope then.
703 ASSERT(tp
->t_dqinfo
);
704 ASSERT(flags
& XFS_QMOPT_RESBLK_MASK
);
706 xfs_trans_mod_dquot(tp
, dqp
,
707 flags
& XFS_QMOPT_RESBLK_MASK
,
710 xfs_trans_mod_dquot(tp
, dqp
,
711 XFS_TRANS_DQ_RES_INOS
,
714 ASSERT(dqp
->q_res_bcount
>= be64_to_cpu(dqp
->q_core
.d_bcount
));
715 ASSERT(dqp
->q_res_rtbcount
>= be64_to_cpu(dqp
->q_core
.d_rtbcount
));
716 ASSERT(dqp
->q_res_icount
>= be64_to_cpu(dqp
->q_core
.d_icount
));
723 if (flags
& XFS_QMOPT_ENOSPC
)
730 * Given dquot(s), make disk block and/or inode reservations against them.
731 * The fact that this does the reservation against user, group and
732 * project quotas is important, because this follows a all-or-nothing
735 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
736 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
737 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
738 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
739 * dquots are unlocked on return, if they were not locked by caller.
742 xfs_trans_reserve_quota_bydquots(
743 struct xfs_trans
*tp
,
744 struct xfs_mount
*mp
,
745 struct xfs_dquot
*udqp
,
746 struct xfs_dquot
*gdqp
,
747 struct xfs_dquot
*pdqp
,
754 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
757 if (tp
&& tp
->t_dqinfo
== NULL
)
758 xfs_trans_alloc_dqinfo(tp
);
760 ASSERT(flags
& XFS_QMOPT_RESBLK_MASK
);
763 error
= xfs_trans_dqresv(tp
, mp
, udqp
, nblks
, ninos
,
764 (flags
& ~XFS_QMOPT_ENOSPC
));
770 error
= xfs_trans_dqresv(tp
, mp
, gdqp
, nblks
, ninos
, flags
);
776 error
= xfs_trans_dqresv(tp
, mp
, pdqp
, nblks
, ninos
, flags
);
782 * Didn't change anything critical, so, no need to log
787 flags
|= XFS_QMOPT_FORCE_RES
;
789 xfs_trans_dqresv(tp
, mp
, gdqp
, -nblks
, -ninos
, flags
);
791 flags
|= XFS_QMOPT_FORCE_RES
;
793 xfs_trans_dqresv(tp
, mp
, udqp
, -nblks
, -ninos
, flags
);
799 * Lock the dquot and change the reservation if we can.
800 * This doesn't change the actual usage, just the reservation.
801 * The inode sent in is locked.
804 xfs_trans_reserve_quota_nblks(
805 struct xfs_trans
*tp
,
806 struct xfs_inode
*ip
,
811 struct xfs_mount
*mp
= ip
->i_mount
;
813 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
815 if (XFS_IS_PQUOTA_ON(mp
))
816 flags
|= XFS_QMOPT_ENOSPC
;
818 ASSERT(!xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
));
820 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
821 ASSERT((flags
& ~(XFS_QMOPT_FORCE_RES
| XFS_QMOPT_ENOSPC
)) ==
822 XFS_TRANS_DQ_RES_RTBLKS
||
823 (flags
& ~(XFS_QMOPT_FORCE_RES
| XFS_QMOPT_ENOSPC
)) ==
824 XFS_TRANS_DQ_RES_BLKS
);
827 * Reserve nblks against these dquots, with trans as the mediator.
829 return xfs_trans_reserve_quota_bydquots(tp
, mp
,
830 ip
->i_udquot
, ip
->i_gdquot
,
832 nblks
, ninos
, flags
);
836 * This routine is called to allocate a quotaoff log item.
839 xfs_trans_get_qoff_item(
841 xfs_qoff_logitem_t
*startqoff
,
844 xfs_qoff_logitem_t
*q
;
848 q
= xfs_qm_qoff_logitem_init(tp
->t_mountp
, startqoff
, flags
);
852 * Get a log_item_desc to point at the new item.
854 xfs_trans_add_item(tp
, &q
->qql_item
);
860 * This is called to mark the quotaoff logitem as needing
861 * to be logged when the transaction is committed. The logitem must
862 * already be associated with the given transaction.
865 xfs_trans_log_quotaoff_item(
867 xfs_qoff_logitem_t
*qlp
)
869 tp
->t_flags
|= XFS_TRANS_DIRTY
;
870 set_bit(XFS_LI_DIRTY
, &qlp
->qql_item
.li_flags
);
874 xfs_trans_alloc_dqinfo(
877 tp
->t_dqinfo
= kmem_zone_zalloc(xfs_qm_dqtrxzone
, KM_SLEEP
);
881 xfs_trans_free_dqinfo(
886 kmem_zone_free(xfs_qm_dqtrxzone
, tp
->t_dqinfo
);