1 /* $NetBSD: ulfs_quota2.c,v 1.15 2013/10/18 19:45:40 christos Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3 /* from NetBSD: ffs_quota2.c,v 1.4 2011/06/12 03:36:00 rmind Exp */
6 * Copyright (c) 2010 Manuel Bouyer
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.15 2013/10/18 19:45:40 christos Exp $");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/fstrans.h>
44 #include <sys/kauth.h>
45 #include <sys/wapbl.h>
46 #include <sys/quota.h>
47 #include <sys/quotactl.h>
49 #include <ufs/lfs/lfs_extern.h>
51 #include <ufs/lfs/ulfs_quota2.h>
52 #include <ufs/lfs/ulfs_inode.h>
53 #include <ufs/lfs/ulfsmount.h>
54 #include <ufs/lfs/ulfs_bswap.h>
55 #include <ufs/lfs/ulfs_extern.h>
56 #include <ufs/lfs/ulfs_quota.h>
60 * Data in the entries are protected by the associated struct dquot's
61 * dq_interlock (this means we can't read or change a quota entry without
62 * grabing a dquot for it).
63 * The header and lists (including pointers in the data entries, and q2e_uid)
64 * are protected by the global dqlock.
65 * the locking order is dq_interlock -> dqlock
68 static int quota2_bwrite(struct mount
*, struct buf
*);
69 static int getinoquota2(struct inode
*, bool, bool, struct buf
**,
70 struct quota2_entry
**);
71 static int getq2h(struct ulfsmount
*, int, struct buf
**,
72 struct quota2_header
**, int);
73 static int getq2e(struct ulfsmount
*, int, daddr_t
, int, struct buf
**,
74 struct quota2_entry
**, int);
75 static int quota2_walk_list(struct ulfsmount
*, struct buf
*, int,
76 uint64_t *, int, void *,
77 int (*func
)(struct ulfsmount
*, uint64_t *, struct quota2_entry
*,
80 static const char *limnames
[] = INITQLNAMES
;
83 quota2_dict_update_q2e_limits(int objtype
, const struct quotaval
*val
,
84 struct quota2_entry
*q2e
)
86 /* make sure we can index q2e_val[] by the fs-independent objtype */
87 CTASSERT(QUOTA_OBJTYPE_BLOCKS
== QL_BLOCK
);
88 CTASSERT(QUOTA_OBJTYPE_FILES
== QL_FILE
);
90 q2e
->q2e_val
[objtype
].q2v_hardlimit
= val
->qv_hardlimit
;
91 q2e
->q2e_val
[objtype
].q2v_softlimit
= val
->qv_softlimit
;
92 q2e
->q2e_val
[objtype
].q2v_grace
= val
->qv_grace
;
96 * Convert internal representation to FS-independent representation.
97 * (Note that while the two types are currently identical, the
98 * internal representation is an on-disk struct and the FS-independent
99 * representation is not, and they might diverge in the future.)
102 q2val_to_quotaval(struct quota2_val
*q2v
, struct quotaval
*qv
)
104 qv
->qv_softlimit
= q2v
->q2v_softlimit
;
105 qv
->qv_hardlimit
= q2v
->q2v_hardlimit
;
106 qv
->qv_usage
= q2v
->q2v_cur
;
107 qv
->qv_expiretime
= q2v
->q2v_time
;
108 qv
->qv_grace
= q2v
->q2v_grace
;
112 * Convert a quota2entry and default-flag to the FS-independent
116 q2e_to_quotaval(struct quota2_entry
*q2e
, int def
,
117 id_t
*id
, int objtype
, struct quotaval
*ret
)
120 *id
= QUOTA_DEFAULTID
;
125 KASSERT(objtype
>= 0 && objtype
< N_QL
);
126 q2val_to_quotaval(&q2e
->q2e_val
[objtype
], ret
);
131 quota2_bwrite(struct mount
*mp
, struct buf
*bp
)
133 if (mp
->mnt_flag
& MNT_SYNCHRONOUS
)
142 getq2h(struct ulfsmount
*ump
, int type
,
143 struct buf
**bpp
, struct quota2_header
**q2hp
, int flags
)
145 struct lfs
*fs
= ump
->um_lfs
;
146 const int needswap
= ULFS_MPNEEDSWAP(fs
);
149 struct quota2_header
*q2h
;
151 KASSERT(mutex_owned(&lfs_dqlock
));
152 error
= bread(ump
->um_quotas
[type
], 0, ump
->umq2_bsize
,
153 ump
->um_cred
[type
], flags
, &bp
);
156 if (bp
->b_resid
!= 0)
157 panic("dq2get: %s quota file truncated", lfs_quotatypes
[type
]);
159 q2h
= (void *)bp
->b_data
;
160 if (ulfs_rw32(q2h
->q2h_magic_number
, needswap
) != Q2_HEAD_MAGIC
||
161 q2h
->q2h_type
!= type
)
162 panic("dq2get: corrupted %s quota header", lfs_quotatypes
[type
]);
169 getq2e(struct ulfsmount
*ump
, int type
, daddr_t lblkno
, int blkoffset
,
170 struct buf
**bpp
, struct quota2_entry
**q2ep
, int flags
)
175 if (blkoffset
& (sizeof(uint64_t) - 1)) {
176 panic("dq2get: %s quota file corrupted",
177 lfs_quotatypes
[type
]);
179 error
= bread(ump
->um_quotas
[type
], lblkno
, ump
->umq2_bsize
,
180 ump
->um_cred
[type
], flags
, &bp
);
183 if (bp
->b_resid
!= 0) {
184 panic("dq2get: %s quota file corrupted",
185 lfs_quotatypes
[type
]);
187 *q2ep
= (void *)((char *)bp
->b_data
+ blkoffset
);
192 /* walk a quota entry list, calling the callback for each entry */
193 #define Q2WL_ABORT 0x10000000
196 quota2_walk_list(struct ulfsmount
*ump
, struct buf
*hbp
, int type
,
197 uint64_t *offp
, int flags
, void *a
,
198 int (*func
)(struct ulfsmount
*, uint64_t *, struct quota2_entry
*, uint64_t, void *))
200 struct lfs
*fs
= ump
->um_lfs
;
201 const int needswap
= ULFS_MPNEEDSWAP(fs
);
202 daddr_t off
= ulfs_rw64(*offp
, needswap
);
203 struct buf
*bp
, *obp
= hbp
;
204 int ret
= 0, ret2
= 0;
205 struct quota2_entry
*q2e
;
206 daddr_t lblkno
, blkoff
, olblkno
= 0;
208 KASSERT(mutex_owner(&lfs_dqlock
));
211 lblkno
= (off
>> ump
->um_mountp
->mnt_fs_bshift
);
212 blkoff
= (off
& ump
->umq2_bmask
);
214 /* in the header block */
216 } else if (lblkno
== olblkno
) {
217 /* still in the same buf */
220 ret
= bread(ump
->um_quotas
[type
], lblkno
,
222 ump
->um_cred
[type
], flags
, &bp
);
225 if (bp
->b_resid
!= 0) {
226 panic("quota2_walk_list: %s quota file corrupted",
227 lfs_quotatypes
[type
]);
230 q2e
= (void *)((char *)(bp
->b_data
) + blkoff
);
231 ret
= (*func
)(ump
, offp
, q2e
, off
, a
);
232 if (off
!= ulfs_rw64(*offp
, needswap
)) {
233 /* callback changed parent's pointer, redo */
234 off
= ulfs_rw64(*offp
, needswap
);
235 if (bp
!= hbp
&& bp
!= obp
)
238 /* parent if now current */
239 if (obp
!= bp
&& obp
!= hbp
) {
240 if (flags
& B_MODIFY
)
247 offp
= &(q2e
->q2e_next
);
248 off
= ulfs_rw64(*offp
, needswap
);
258 if (flags
& B_MODIFY
)
263 if (ret
& Q2WL_ABORT
)
271 lfsquota2_umount(struct mount
*mp
, int flags
)
274 struct ulfsmount
*ump
= VFSTOULFS(mp
);
275 struct lfs
*fs
= ump
->um_lfs
;
277 if ((fs
->um_flags
& ULFS_QUOTA2
) == 0)
280 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
281 if (ump
->um_quotas
[i
] != NULLVP
) {
282 error
= vn_close(ump
->um_quotas
[i
], FREAD
|FWRITE
,
285 printf("quota2_umount failed: close(%p) %d\n",
286 ump
->um_quotas
[i
], error
);
290 ump
->um_quotas
[i
] = NULLVP
;
296 quota2_q2ealloc(struct ulfsmount
*ump
, int type
, uid_t uid
, struct dquot
*dq
)
299 struct buf
*hbp
, *bp
;
300 struct quota2_header
*q2h
;
301 struct quota2_entry
*q2e
;
304 struct lfs
*fs
= ump
->um_lfs
;
305 const int needswap
= ULFS_MPNEEDSWAP(fs
);
307 KASSERT(mutex_owned(&dq
->dq_interlock
));
308 KASSERT(mutex_owned(&lfs_dqlock
));
309 error
= getq2h(ump
, type
, &hbp
, &q2h
, B_MODIFY
);
312 offset
= ulfs_rw64(q2h
->q2h_free
, needswap
);
314 struct vnode
*vp
= ump
->um_quotas
[type
];
315 struct inode
*ip
= VTOI(vp
);
316 uint64_t size
= ip
->i_size
;
317 /* need to alocate a new disk block */
318 error
= lfs_balloc(vp
, size
, ump
->umq2_bsize
,
319 ump
->um_cred
[type
], B_CLRBUF
| B_SYNC
, &bp
);
324 KASSERT((ip
->i_size
% ump
->umq2_bsize
) == 0);
325 ip
->i_size
+= ump
->umq2_bsize
;
326 DIP_ASSIGN(ip
, size
, ip
->i_size
);
327 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
328 uvm_vnp_setsize(vp
, ip
->i_size
);
329 lfsquota2_addfreeq2e(q2h
, bp
->b_data
, size
, ump
->umq2_bsize
,
332 error2
= lfs_update(vp
, NULL
, NULL
, UPDATE_WAIT
);
333 if (error
|| error2
) {
339 offset
= ulfs_rw64(q2h
->q2h_free
, needswap
);
340 KASSERT(offset
!= 0);
342 dq
->dq2_lblkno
= (offset
>> ump
->um_mountp
->mnt_fs_bshift
);
343 dq
->dq2_blkoff
= (offset
& ump
->umq2_bmask
);
344 if (dq
->dq2_lblkno
== 0) {
346 q2e
= (void *)((char *)bp
->b_data
+ dq
->dq2_blkoff
);
348 error
= getq2e(ump
, type
, dq
->dq2_lblkno
,
349 dq
->dq2_blkoff
, &bp
, &q2e
, B_MODIFY
);
355 hash_mask
= ((1 << q2h
->q2h_hash_shift
) - 1);
356 /* remove from free list */
357 q2h
->q2h_free
= q2e
->q2e_next
;
359 memcpy(q2e
, &q2h
->q2h_defentry
, sizeof(*q2e
));
360 q2e
->q2e_uid
= ulfs_rw32(uid
, needswap
);
361 /* insert in hash list */
362 q2e
->q2e_next
= q2h
->q2h_entries
[uid
& hash_mask
];
363 q2h
->q2h_entries
[uid
& hash_mask
] = ulfs_rw64(offset
, needswap
);
372 getinoquota2(struct inode
*ip
, bool alloc
, bool modify
, struct buf
**bpp
,
373 struct quota2_entry
**q2ep
)
378 struct ulfsmount
*ump
= ip
->i_ump
;
379 u_int32_t ino_ids
[ULFS_MAXQUOTAS
];
381 error
= lfs_getinoquota(ip
);
385 ino_ids
[ULFS_USRQUOTA
] = ip
->i_uid
;
386 ino_ids
[ULFS_GRPQUOTA
] = ip
->i_gid
;
387 /* first get the interlock for all dquot */
388 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
392 mutex_enter(&dq
->dq_interlock
);
394 /* now get the corresponding quota entry */
395 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
401 if (__predict_false(ump
->um_quotas
[i
] == NULL
)) {
403 * quotas have been turned off. This can happen
406 mutex_exit(&dq
->dq_interlock
);
407 lfs_dqrele(NULLVP
, dq
);
408 ip
->i_dquot
[i
] = NULL
;
412 if ((dq
->dq2_lblkno
| dq
->dq2_blkoff
) == 0) {
416 /* need to alloc a new on-disk quot */
417 mutex_enter(&lfs_dqlock
);
418 error
= quota2_q2ealloc(ump
, i
, ino_ids
[i
], dq
);
419 mutex_exit(&lfs_dqlock
);
423 KASSERT(dq
->dq2_lblkno
!= 0 || dq
->dq2_blkoff
!= 0);
424 error
= getq2e(ump
, i
, dq
->dq2_lblkno
,
425 dq
->dq2_blkoff
, &bpp
[i
], &q2ep
[i
],
426 modify
? B_MODIFY
: 0);
433 __inline
static int __unused
434 lfsquota2_check_limit(struct quota2_val
*q2v
, uint64_t change
, time_t now
)
436 return lfsquota_check_limit(q2v
->q2v_cur
, change
, q2v
->q2v_softlimit
,
437 q2v
->q2v_hardlimit
, q2v
->q2v_time
, now
);
441 quota2_check(struct inode
*ip
, int vtype
, int64_t change
, kauth_cred_t cred
,
445 struct buf
*bp
[ULFS_MAXQUOTAS
];
446 struct quota2_entry
*q2e
[ULFS_MAXQUOTAS
];
447 struct quota2_val
*q2vp
;
450 struct ulfsmount
*ump
= ip
->i_ump
;
451 struct lfs
*fs
= ip
->i_lfs
;
452 struct mount
*mp
= ump
->um_mountp
;
453 const int needswap
= ULFS_MPNEEDSWAP(fs
);
456 if ((error
= getinoquota2(ip
, change
> 0, change
!= 0, bp
, q2e
)) != 0)
459 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
465 mutex_exit(&dq
->dq_interlock
);
470 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
474 if (q2e
[i
] == NULL
) {
475 mutex_exit(&dq
->dq_interlock
);
478 q2vp
= &q2e
[i
]->q2e_val
[vtype
];
479 ncurblks
= ulfs_rw64(q2vp
->q2v_cur
, needswap
);
480 if (ncurblks
< -change
)
484 q2vp
->q2v_cur
= ulfs_rw64(ncurblks
, needswap
);
485 quota2_bwrite(mp
, bp
[i
]);
486 mutex_exit(&dq
->dq_interlock
);
490 /* see if the allocation is allowed */
491 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
492 struct quota2_val q2v
;
497 KASSERT(q2e
[i
] != NULL
);
498 lfsquota2_ulfs_rwq2v(&q2e
[i
]->q2e_val
[vtype
], &q2v
, needswap
);
499 ql_stat
= lfsquota2_check_limit(&q2v
, change
, time_second
);
501 if ((flags
& FORCE
) == 0 &&
502 kauth_authorize_system(cred
, KAUTH_SYSTEM_FS_QUOTA
,
503 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT
,
504 KAUTH_ARG(i
), KAUTH_ARG(vtype
), NULL
) != 0) {
505 /* enforce this limit */
506 switch(QL_STATUS(ql_stat
)) {
508 if ((dq
->dq_flags
& DQ_WARN(vtype
)) == 0) {
509 uprintf("\n%s: write failed, %s %s "
511 mp
->mnt_stat
.f_mntonname
,
512 lfs_quotatypes
[i
], limnames
[vtype
]);
513 dq
->dq_flags
|= DQ_WARN(vtype
);
517 case QL_S_DENY_GRACE
:
518 if ((dq
->dq_flags
& DQ_WARN(vtype
)) == 0) {
519 uprintf("\n%s: write failed, %s %s "
521 mp
->mnt_stat
.f_mntonname
,
522 lfs_quotatypes
[i
], limnames
[vtype
]);
523 dq
->dq_flags
|= DQ_WARN(vtype
);
527 case QL_S_ALLOW_SOFT
:
528 if ((dq
->dq_flags
& DQ_WARN(vtype
)) == 0) {
529 uprintf("\n%s: warning, %s %s "
531 mp
->mnt_stat
.f_mntonname
,
532 lfs_quotatypes
[i
], limnames
[vtype
]);
533 dq
->dq_flags
|= DQ_WARN(vtype
);
539 * always do this; we don't know if the allocation will
540 * succed or not in the end. if we don't do the allocation
541 * q2v_time will be ignored anyway
543 if (ql_stat
& QL_F_CROSS
) {
544 q2v
.q2v_time
= time_second
+ q2v
.q2v_grace
;
545 lfsquota2_ulfs_rwq2v(&q2v
, &q2e
[i
]->q2e_val
[vtype
],
550 /* now do the allocation if allowed */
551 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
555 KASSERT(q2e
[i
] != NULL
);
557 q2vp
= &q2e
[i
]->q2e_val
[vtype
];
558 ncurblks
= ulfs_rw64(q2vp
->q2v_cur
, needswap
);
559 q2vp
->q2v_cur
= ulfs_rw64(ncurblks
+ change
, needswap
);
560 quota2_bwrite(mp
, bp
[i
]);
563 mutex_exit(&dq
->dq_interlock
);
569 lfs_chkdq2(struct inode
*ip
, int64_t change
, kauth_cred_t cred
, int flags
)
571 return quota2_check(ip
, QL_BLOCK
, change
, cred
, flags
);
575 lfs_chkiq2(struct inode
*ip
, int32_t change
, kauth_cred_t cred
, int flags
)
577 return quota2_check(ip
, QL_FILE
, change
, cred
, flags
);
581 lfsquota2_handle_cmd_put(struct ulfsmount
*ump
, const struct quotakey
*key
,
582 const struct quotaval
*val
)
586 struct quota2_header
*q2h
;
587 struct quota2_entry q2e
, *q2ep
;
589 struct lfs
*fs
= ump
->um_lfs
;
590 const int needswap
= ULFS_MPNEEDSWAP(fs
);
592 /* make sure we can index by the fs-independent idtype */
593 CTASSERT(QUOTA_IDTYPE_USER
== ULFS_USRQUOTA
);
594 CTASSERT(QUOTA_IDTYPE_GROUP
== ULFS_GRPQUOTA
);
596 if (ump
->um_quotas
[key
->qk_idtype
] == NULLVP
)
599 if (key
->qk_id
== QUOTA_DEFAULTID
) {
600 mutex_enter(&lfs_dqlock
);
601 error
= getq2h(ump
, key
->qk_idtype
, &bp
, &q2h
, B_MODIFY
);
603 mutex_exit(&lfs_dqlock
);
606 lfsquota2_ulfs_rwq2e(&q2h
->q2h_defentry
, &q2e
, needswap
);
607 quota2_dict_update_q2e_limits(key
->qk_objtype
, val
, &q2e
);
608 lfsquota2_ulfs_rwq2e(&q2e
, &q2h
->q2h_defentry
, needswap
);
609 mutex_exit(&lfs_dqlock
);
610 quota2_bwrite(ump
->um_mountp
, bp
);
614 error
= lfs_dqget(NULLVP
, key
->qk_id
, ump
, key
->qk_idtype
, &dq
);
618 mutex_enter(&dq
->dq_interlock
);
619 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
620 /* need to alloc a new on-disk quot */
621 mutex_enter(&lfs_dqlock
);
622 error
= quota2_q2ealloc(ump
, key
->qk_idtype
, key
->qk_id
, dq
);
623 mutex_exit(&lfs_dqlock
);
627 KASSERT(dq
->dq2_lblkno
!= 0 || dq
->dq2_blkoff
!= 0);
628 error
= getq2e(ump
, key
->qk_idtype
, dq
->dq2_lblkno
,
629 dq
->dq2_blkoff
, &bp
, &q2ep
, B_MODIFY
);
633 lfsquota2_ulfs_rwq2e(q2ep
, &q2e
, needswap
);
634 quota2_dict_update_q2e_limits(key
->qk_objtype
, val
, &q2e
);
635 lfsquota2_ulfs_rwq2e(&q2e
, q2ep
, needswap
);
636 quota2_bwrite(ump
->um_mountp
, bp
);
639 mutex_exit(&dq
->dq_interlock
);
640 lfs_dqrele(NULLVP
, dq
);
645 struct dq2clear_callback
{
648 struct quota2_header
*q2h
;
652 dq2clear_callback(struct ulfsmount
*ump
, uint64_t *offp
, struct quota2_entry
*q2e
,
653 uint64_t off
, void *v
)
655 struct dq2clear_callback
*c
= v
;
656 struct lfs
*fs
= ump
->um_lfs
;
657 const int needswap
= ULFS_MPNEEDSWAP(fs
);
660 if (ulfs_rw32(q2e
->q2e_uid
, needswap
) == c
->id
) {
661 KASSERT(mutex_owned(&c
->dq
->dq_interlock
));
662 c
->dq
->dq2_lblkno
= 0;
663 c
->dq
->dq2_blkoff
= 0;
665 /* remove from hash list */
666 *offp
= q2e
->q2e_next
;
667 /* add to free list */
668 q2e
->q2e_next
= c
->q2h
->q2h_free
;
669 c
->q2h
->q2h_free
= myoff
;
675 lfsquota2_handle_cmd_delete(struct ulfsmount
*ump
, const struct quotakey
*qk
)
680 int error
, i
, canfree
;
682 struct quota2_header
*q2h
;
683 struct quota2_entry q2e
, *q2ep
;
684 struct buf
*hbp
, *bp
;
686 struct dq2clear_callback c
;
688 idtype
= qk
->qk_idtype
;
690 objtype
= qk
->qk_objtype
;
692 if (ump
->um_quotas
[idtype
] == NULLVP
)
694 if (id
== QUOTA_DEFAULTID
)
697 /* get the default entry before locking the entry's buffer */
698 mutex_enter(&lfs_dqlock
);
699 error
= getq2h(ump
, idtype
, &hbp
, &q2h
, 0);
701 mutex_exit(&lfs_dqlock
);
704 /* we'll copy to another disk entry, so no need to swap */
705 memcpy(&q2e
, &q2h
->q2h_defentry
, sizeof(q2e
));
706 mutex_exit(&lfs_dqlock
);
709 error
= lfs_dqget(NULLVP
, id
, ump
, idtype
, &dq
);
713 mutex_enter(&dq
->dq_interlock
);
714 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
715 /* already clear, nothing to do */
720 error
= getq2e(ump
, idtype
, dq
->dq2_lblkno
, dq
->dq2_blkoff
,
721 &bp
, &q2ep
, B_MODIFY
);
725 /* make sure we can index by the objtype passed in */
726 CTASSERT(QUOTA_OBJTYPE_BLOCKS
== QL_BLOCK
);
727 CTASSERT(QUOTA_OBJTYPE_FILES
== QL_FILE
);
729 /* clear the requested objtype by copying from the default entry */
730 q2ep
->q2e_val
[objtype
].q2v_softlimit
=
731 q2e
.q2e_val
[objtype
].q2v_softlimit
;
732 q2ep
->q2e_val
[objtype
].q2v_hardlimit
=
733 q2e
.q2e_val
[objtype
].q2v_hardlimit
;
734 q2ep
->q2e_val
[objtype
].q2v_grace
=
735 q2e
.q2e_val
[objtype
].q2v_grace
;
736 q2ep
->q2e_val
[objtype
].q2v_time
= 0;
738 /* if this entry now contains no information, we can free it */
740 for (i
= 0; i
< N_QL
; i
++) {
741 if (q2ep
->q2e_val
[i
].q2v_cur
!= 0 ||
742 (q2ep
->q2e_val
[i
].q2v_softlimit
!=
743 q2e
.q2e_val
[i
].q2v_softlimit
) ||
744 (q2ep
->q2e_val
[i
].q2v_hardlimit
!=
745 q2e
.q2e_val
[i
].q2v_hardlimit
) ||
746 (q2ep
->q2e_val
[i
].q2v_grace
!=
747 q2e
.q2e_val
[i
].q2v_grace
)) {
751 /* note: do not need to check q2v_time */
755 quota2_bwrite(ump
->um_mountp
, bp
);
758 /* we can free it. release bp so we can walk the list */
760 mutex_enter(&lfs_dqlock
);
761 error
= getq2h(ump
, idtype
, &hbp
, &q2h
, 0);
765 hash_mask
= ((1 << q2h
->q2h_hash_shift
) - 1);
769 error
= quota2_walk_list(ump
, hbp
, idtype
,
770 &q2h
->q2h_entries
[id
& hash_mask
], B_MODIFY
, &c
,
776 mutex_exit(&lfs_dqlock
);
779 mutex_exit(&dq
->dq_interlock
);
780 lfs_dqrele(NULLVP
, dq
);
785 quota2_fetch_q2e(struct ulfsmount
*ump
, const struct quotakey
*qk
,
786 struct quota2_entry
*ret
)
790 struct quota2_entry
*q2ep
;
792 struct lfs
*fs
= ump
->um_lfs
;
793 const int needswap
= ULFS_MPNEEDSWAP(fs
);
795 error
= lfs_dqget(NULLVP
, qk
->qk_id
, ump
, qk
->qk_idtype
, &dq
);
799 mutex_enter(&dq
->dq_interlock
);
800 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
801 mutex_exit(&dq
->dq_interlock
);
802 lfs_dqrele(NULLVP
, dq
);
805 error
= getq2e(ump
, qk
->qk_idtype
, dq
->dq2_lblkno
, dq
->dq2_blkoff
,
808 mutex_exit(&dq
->dq_interlock
);
809 lfs_dqrele(NULLVP
, dq
);
812 lfsquota2_ulfs_rwq2e(q2ep
, ret
, needswap
);
814 mutex_exit(&dq
->dq_interlock
);
815 lfs_dqrele(NULLVP
, dq
);
821 quota2_fetch_quotaval(struct ulfsmount
*ump
, const struct quotakey
*qk
,
822 struct quotaval
*ret
)
826 struct quota2_entry
*q2ep
, q2e
;
828 struct lfs
*fs
= ump
->um_lfs
;
829 const int needswap
= ULFS_MPNEEDSWAP(fs
);
832 error
= lfs_dqget(NULLVP
, qk
->qk_id
, ump
, qk
->qk_idtype
, &dq
);
836 mutex_enter(&dq
->dq_interlock
);
837 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
838 mutex_exit(&dq
->dq_interlock
);
839 lfs_dqrele(NULLVP
, dq
);
842 error
= getq2e(ump
, qk
->qk_idtype
, dq
->dq2_lblkno
, dq
->dq2_blkoff
,
845 mutex_exit(&dq
->dq_interlock
);
846 lfs_dqrele(NULLVP
, dq
);
849 lfsquota2_ulfs_rwq2e(q2ep
, &q2e
, needswap
);
851 mutex_exit(&dq
->dq_interlock
);
852 lfs_dqrele(NULLVP
, dq
);
854 q2e_to_quotaval(&q2e
, 0, &id2
, qk
->qk_objtype
, ret
);
855 KASSERT(id2
== qk
->qk_id
);
860 lfsquota2_handle_cmd_get(struct ulfsmount
*ump
, const struct quotakey
*qk
,
864 struct quota2_header
*q2h
;
865 struct quota2_entry q2e
;
867 struct lfs
*fs
= ump
->um_lfs
;
868 const int needswap
= ULFS_MPNEEDSWAP(fs
);
872 * Make sure the FS-independent codes match the internal ones,
873 * so we can use the passed-in objtype without having to
874 * convert it explicitly to QL_BLOCK/QL_FILE.
876 CTASSERT(QL_BLOCK
== QUOTA_OBJTYPE_BLOCKS
);
877 CTASSERT(QL_FILE
== QUOTA_OBJTYPE_FILES
);
880 if (qk
->qk_objtype
< 0 || qk
->qk_objtype
>= N_QL
) {
884 if (ump
->um_quotas
[qk
->qk_idtype
] == NULLVP
)
886 if (qk
->qk_id
== QUOTA_DEFAULTID
) {
887 mutex_enter(&lfs_dqlock
);
888 error
= getq2h(ump
, qk
->qk_idtype
, &bp
, &q2h
, 0);
890 mutex_exit(&lfs_dqlock
);
893 lfsquota2_ulfs_rwq2e(&q2h
->q2h_defentry
, &q2e
, needswap
);
894 mutex_exit(&lfs_dqlock
);
896 q2e_to_quotaval(&q2e
, qk
->qk_id
== QUOTA_DEFAULTID
, &id2
,
900 error
= quota2_fetch_quotaval(ump
, qk
, qv
);
906 * Cursor structure we used.
908 * This will get stored in userland between calls so we must not assume
909 * it isn't arbitrarily corrupted.
911 struct ulfsq2_cursor
{
912 uint32_t q2c_magic
; /* magic number */
913 int q2c_hashsize
; /* size of hash table at last go */
915 int q2c_users_done
; /* true if we've returned all user data */
916 int q2c_groups_done
; /* true if we've returned all group data */
917 int q2c_defaults_done
; /* true if we've returned the default values */
918 int q2c_hashpos
; /* slot to start at in hash table */
919 int q2c_uidpos
; /* number of ids we've handled */
920 int q2c_blocks_done
; /* true if we've returned the blocks value */
924 * State of a single cursorget call, or at least the part of it that
925 * needs to be passed around.
927 struct q2cursor_state
{
928 /* data return pointers */
929 struct quotakey
*keys
;
930 struct quotaval
*vals
;
932 /* key/value counters */
934 unsigned numkeys
; /* number of keys assigned */
936 /* ID to key/value conversion state */
937 int skipfirst
; /* if true skip first key/value */
938 int skiplast
; /* if true skip last key/value */
941 unsigned maxids
; /* maximum number of IDs to handle */
942 unsigned numids
; /* number of IDs handled */
946 * Additional structure for getids callback.
948 struct q2cursor_getids
{
949 struct q2cursor_state
*state
;
951 unsigned skip
; /* number of ids to skip over */
952 unsigned new_skip
; /* number of ids to skip over next time */
953 unsigned skipped
; /* number skipped so far */
954 int stopped
; /* true if we stopped quota_walk_list early */
958 * Cursor-related functions
962 #define Q2C_MAGIC (0xbeebe111)
964 /* extract cursor from caller form */
965 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
968 * Check that a cursor we're handed is something like valid. If
969 * someone munges it and it still passes these checks, they'll get
970 * partial or odd results back but won't break anything.
973 q2cursor_check(struct ulfsq2_cursor
*cursor
)
975 if (cursor
->q2c_magic
!= Q2C_MAGIC
) {
978 if (cursor
->q2c_hashsize
< 0) {
982 if (cursor
->q2c_users_done
!= 0 && cursor
->q2c_users_done
!= 1) {
985 if (cursor
->q2c_groups_done
!= 0 && cursor
->q2c_groups_done
!= 1) {
988 if (cursor
->q2c_defaults_done
!= 0 && cursor
->q2c_defaults_done
!= 1) {
991 if (cursor
->q2c_hashpos
< 0 || cursor
->q2c_uidpos
< 0) {
994 if (cursor
->q2c_blocks_done
!= 0 && cursor
->q2c_blocks_done
!= 1) {
1001 * Set up the q2cursor state.
1004 q2cursor_initstate(struct q2cursor_state
*state
, struct quotakey
*keys
,
1005 struct quotaval
*vals
, unsigned maxkeyvals
, int blocks_done
)
1010 state
->maxkeyvals
= maxkeyvals
;
1014 * For each ID there are two quotavals to return. If the
1015 * maximum number of entries to return is odd, we might want
1016 * to skip the first quotaval of the first ID, or the last
1017 * quotaval of the last ID, but not both. So the number of IDs
1018 * we want is (up to) half the number of return slots we have,
1022 state
->maxids
= (state
->maxkeyvals
+ 1) / 2;
1024 if (state
->maxkeyvals
% 2) {
1026 state
->skipfirst
= 1;
1027 state
->skiplast
= 0;
1029 state
->skipfirst
= 0;
1030 state
->skiplast
= 1;
1033 state
->skipfirst
= 0;
1034 state
->skiplast
= 0;
1039 * Choose which idtype we're going to work on. If doing a full
1040 * iteration, we do users first, then groups, but either might be
1041 * disabled or marked to skip via cursorsetidtype(), so don't make
1042 * silly assumptions.
1045 q2cursor_pickidtype(struct ulfsq2_cursor
*cursor
, int *idtype_ret
)
1047 if (cursor
->q2c_users_done
== 0) {
1048 *idtype_ret
= QUOTA_IDTYPE_USER
;
1049 } else if (cursor
->q2c_groups_done
== 0) {
1050 *idtype_ret
= QUOTA_IDTYPE_GROUP
;
1058 * Add an ID to the current state. Sets up either one or two keys to
1059 * refer to it, depending on whether it's first/last and the setting
1060 * of skipfirst. (skiplast does not need to be explicitly tested)
1063 q2cursor_addid(struct q2cursor_state
*state
, int idtype
, id_t id
)
1065 KASSERT(state
->numids
< state
->maxids
);
1066 KASSERT(state
->numkeys
< state
->maxkeyvals
);
1068 if (!state
->skipfirst
|| state
->numkeys
> 0) {
1069 state
->keys
[state
->numkeys
].qk_idtype
= idtype
;
1070 state
->keys
[state
->numkeys
].qk_id
= id
;
1071 state
->keys
[state
->numkeys
].qk_objtype
= QUOTA_OBJTYPE_BLOCKS
;
1074 if (state
->numkeys
< state
->maxkeyvals
) {
1075 state
->keys
[state
->numkeys
].qk_idtype
= idtype
;
1076 state
->keys
[state
->numkeys
].qk_id
= id
;
1077 state
->keys
[state
->numkeys
].qk_objtype
= QUOTA_OBJTYPE_FILES
;
1080 KASSERT(state
->skiplast
);
1086 * Callback function for getting IDs. Update counting and call addid.
1089 q2cursor_getids_callback(struct ulfsmount
*ump
, uint64_t *offp
,
1090 struct quota2_entry
*q2ep
, uint64_t off
, void *v
)
1092 struct q2cursor_getids
*gi
= v
;
1094 struct lfs
*fs
= ump
->um_lfs
;
1095 const int needswap
= ULFS_MPNEEDSWAP(fs
);
1097 if (gi
->skipped
< gi
->skip
) {
1101 id
= ulfs_rw32(q2ep
->q2e_uid
, needswap
);
1102 q2cursor_addid(gi
->state
, gi
->idtype
, id
);
1104 if (gi
->state
->numids
>= gi
->state
->maxids
) {
1105 /* got enough ids, stop now */
1113 * Fill in a batch of quotakeys by scanning one or more hash chains.
1116 q2cursor_getkeys(struct ulfsmount
*ump
, int idtype
, struct ulfsq2_cursor
*cursor
,
1117 struct q2cursor_state
*state
,
1118 int *hashsize_ret
, struct quota2_entry
*default_q2e_ret
)
1120 struct lfs
*fs
= ump
->um_lfs
;
1121 const int needswap
= ULFS_MPNEEDSWAP(fs
);
1123 struct quota2_header
*q2h
;
1124 int quota2_hash_size
;
1125 struct q2cursor_getids gi
;
1130 * Read the header block.
1133 mutex_enter(&lfs_dqlock
);
1134 error
= getq2h(ump
, idtype
, &hbp
, &q2h
, 0);
1136 mutex_exit(&lfs_dqlock
);
1140 /* if the table size has changed, make the caller start over */
1141 quota2_hash_size
= ulfs_rw16(q2h
->q2h_hash_size
, needswap
);
1142 if (cursor
->q2c_hashsize
== 0) {
1143 cursor
->q2c_hashsize
= quota2_hash_size
;
1144 } else if (cursor
->q2c_hashsize
!= quota2_hash_size
) {
1149 /* grab the entry with the default values out of the header */
1150 lfsquota2_ulfs_rwq2e(&q2h
->q2h_defentry
, default_q2e_ret
, needswap
);
1152 /* If we haven't done the defaults yet, that goes first. */
1153 if (cursor
->q2c_defaults_done
== 0) {
1154 q2cursor_addid(state
, idtype
, QUOTA_DEFAULTID
);
1155 /* if we read both halves, mark it done */
1156 if (state
->numids
< state
->maxids
|| !state
->skiplast
) {
1157 cursor
->q2c_defaults_done
= 1;
1164 while (state
->numids
< state
->maxids
) {
1165 if (cursor
->q2c_hashpos
>= quota2_hash_size
) {
1166 /* nothing more left */
1170 /* scan this hash chain */
1171 gi
.skip
= cursor
->q2c_uidpos
;
1172 gi
.new_skip
= gi
.skip
;
1175 offset
= q2h
->q2h_entries
[cursor
->q2c_hashpos
];
1177 error
= quota2_walk_list(ump
, hbp
, idtype
, &offset
, 0, &gi
,
1178 q2cursor_getids_callback
);
1179 KASSERT(error
!= Q2WL_ABORT
);
1184 /* callback stopped before reading whole chain */
1185 cursor
->q2c_uidpos
= gi
.new_skip
;
1186 /* if we didn't get both halves, back up */
1187 if (state
->numids
== state
->maxids
&& state
->skiplast
){
1188 KASSERT(cursor
->q2c_uidpos
> 0);
1189 cursor
->q2c_uidpos
--;
1192 /* read whole chain */
1193 /* if we got both halves of the last id, advance */
1194 if (state
->numids
< state
->maxids
|| !state
->skiplast
){
1195 cursor
->q2c_uidpos
= 0;
1196 cursor
->q2c_hashpos
++;
1202 mutex_exit(&lfs_dqlock
);
1207 *hashsize_ret
= quota2_hash_size
;
1212 * Fetch the quotavals for the quotakeys.
1215 q2cursor_getvals(struct ulfsmount
*ump
, struct q2cursor_state
*state
,
1216 const struct quota2_entry
*default_q2e
)
1221 struct quota2_entry q2e
;
1227 for (pos
= 0; pos
< state
->numkeys
; pos
++) {
1228 id
= state
->keys
[pos
].qk_id
;
1229 if (!hasid
|| id
!= loadedid
) {
1232 if (id
== QUOTA_DEFAULTID
) {
1235 error
= quota2_fetch_q2e(ump
,
1238 if (error
== ENOENT
) {
1239 /* something changed - start over */
1249 objtype
= state
->keys
[pos
].qk_objtype
;
1250 KASSERT(objtype
>= 0 && objtype
< N_QL
);
1251 q2val_to_quotaval(&q2e
.q2e_val
[objtype
], &state
->vals
[pos
]);
1260 * We can't just read keys and values directly, because we can't walk
1261 * the list with qdlock and grab dq_interlock to read the entries at
1262 * the same time. So we're going to do two passes: one to figure out
1263 * which IDs we want and fill in the keys, and then a second to use
1264 * the keys to fetch the values.
1267 lfsquota2_handle_cmd_cursorget(struct ulfsmount
*ump
, struct quotakcursor
*qkc
,
1268 struct quotakey
*keys
, struct quotaval
*vals
, unsigned maxreturn
,
1272 struct ulfsq2_cursor
*cursor
;
1273 struct ulfsq2_cursor newcursor
;
1274 struct q2cursor_state state
;
1275 struct quota2_entry default_q2e
;
1277 int quota2_hash_size
;
1280 * Convert and validate the cursor.
1282 cursor
= Q2CURSOR(qkc
);
1283 error
= q2cursor_check(cursor
);
1289 * Make sure our on-disk codes match the values of the
1290 * FS-independent ones. This avoids the need for explicit
1291 * conversion (which would be a NOP anyway and thus easily
1292 * left out or called in the wrong places...)
1294 CTASSERT(QUOTA_IDTYPE_USER
== ULFS_USRQUOTA
);
1295 CTASSERT(QUOTA_IDTYPE_GROUP
== ULFS_GRPQUOTA
);
1296 CTASSERT(QUOTA_OBJTYPE_BLOCKS
== QL_BLOCK
);
1297 CTASSERT(QUOTA_OBJTYPE_FILES
== QL_FILE
);
1300 * If some of the idtypes aren't configured/enabled, arrange
1301 * to skip over them.
1303 if (cursor
->q2c_users_done
== 0 &&
1304 ump
->um_quotas
[ULFS_USRQUOTA
] == NULLVP
) {
1305 cursor
->q2c_users_done
= 1;
1307 if (cursor
->q2c_groups_done
== 0 &&
1308 ump
->um_quotas
[ULFS_GRPQUOTA
] == NULLVP
) {
1309 cursor
->q2c_groups_done
= 1;
1312 /* Loop over, potentially, both idtypes */
1315 /* Choose id type */
1316 error
= q2cursor_pickidtype(cursor
, &idtype
);
1317 if (error
== EAGAIN
) {
1318 /* nothing more to do, return 0 */
1322 KASSERT(ump
->um_quotas
[idtype
] != NULLVP
);
1325 * Initialize the per-call iteration state. Copy the
1326 * cursor state so we can update it in place but back
1329 q2cursor_initstate(&state
, keys
, vals
, maxreturn
,
1330 cursor
->q2c_blocks_done
);
1331 newcursor
= *cursor
;
1334 error
= q2cursor_getkeys(ump
, idtype
, &newcursor
, &state
,
1335 "a2_hash_size
, &default_q2e
);
1340 /* Now fill in the values. */
1341 error
= q2cursor_getvals(ump
, &state
, &default_q2e
);
1347 * Now that we aren't going to fail and lose what we
1348 * did so far, we can update the cursor state.
1351 if (newcursor
.q2c_hashpos
>= quota2_hash_size
) {
1352 if (idtype
== QUOTA_IDTYPE_USER
)
1353 cursor
->q2c_users_done
= 1;
1355 cursor
->q2c_groups_done
= 1;
1357 /* start over on another id type */
1358 cursor
->q2c_hashsize
= 0;
1359 cursor
->q2c_defaults_done
= 0;
1360 cursor
->q2c_hashpos
= 0;
1361 cursor
->q2c_uidpos
= 0;
1362 cursor
->q2c_blocks_done
= 0;
1364 *cursor
= newcursor
;
1365 cursor
->q2c_blocks_done
= state
.skiplast
;
1369 * If we have something to return, return it.
1370 * Otherwise, continue to the other idtype, if any,
1371 * and only return zero at end of iteration.
1373 if (state
.numkeys
> 0) {
1378 *ret
= state
.numkeys
;
1383 lfsquota2_handle_cmd_cursoropen(struct ulfsmount
*ump
, struct quotakcursor
*qkc
)
1385 struct ulfsq2_cursor
*cursor
;
1387 CTASSERT(sizeof(*cursor
) <= sizeof(qkc
->u
.qkc_space
));
1388 cursor
= Q2CURSOR(qkc
);
1390 cursor
->q2c_magic
= Q2C_MAGIC
;
1391 cursor
->q2c_hashsize
= 0;
1393 cursor
->q2c_users_done
= 0;
1394 cursor
->q2c_groups_done
= 0;
1395 cursor
->q2c_defaults_done
= 0;
1396 cursor
->q2c_hashpos
= 0;
1397 cursor
->q2c_uidpos
= 0;
1398 cursor
->q2c_blocks_done
= 0;
1403 lfsquota2_handle_cmd_cursorclose(struct ulfsmount
*ump
, struct quotakcursor
*qkc
)
1405 struct ulfsq2_cursor
*cursor
;
1408 cursor
= Q2CURSOR(qkc
);
1409 error
= q2cursor_check(cursor
);
1420 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount
*ump
,
1421 struct quotakcursor
*qkc
, int idtype
)
1423 struct ulfsq2_cursor
*cursor
;
1426 cursor
= Q2CURSOR(qkc
);
1427 error
= q2cursor_check(cursor
);
1433 case QUOTA_IDTYPE_USER
:
1434 cursor
->q2c_users_done
= 1;
1436 case QUOTA_IDTYPE_GROUP
:
1437 cursor
->q2c_groups_done
= 1;
1447 lfsquota2_handle_cmd_cursoratend(struct ulfsmount
*ump
, struct quotakcursor
*qkc
,
1450 struct ulfsq2_cursor
*cursor
;
1453 cursor
= Q2CURSOR(qkc
);
1454 error
= q2cursor_check(cursor
);
1459 *ret
= (cursor
->q2c_users_done
&& cursor
->q2c_groups_done
);
1464 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount
*ump
, struct quotakcursor
*qkc
)
1466 struct ulfsq2_cursor
*cursor
;
1469 cursor
= Q2CURSOR(qkc
);
1470 error
= q2cursor_check(cursor
);
1475 cursor
->q2c_hashsize
= 0;
1477 cursor
->q2c_users_done
= 0;
1478 cursor
->q2c_groups_done
= 0;
1479 cursor
->q2c_defaults_done
= 0;
1480 cursor
->q2c_hashpos
= 0;
1481 cursor
->q2c_uidpos
= 0;
1482 cursor
->q2c_blocks_done
= 0;
1488 lfs_q2sync(struct mount
*mp
)
1493 struct dq2get_callback
{
1499 dq2get_callback(struct ulfsmount
*ump
, uint64_t *offp
, struct quota2_entry
*q2e
,
1500 uint64_t off
, void *v
)
1502 struct dq2get_callback
*c
= v
;
1505 struct lfs
*fs
= ump
->um_lfs
;
1506 const int needswap
= ULFS_MPNEEDSWAP(fs
);
1508 if (ulfs_rw32(q2e
->q2e_uid
, needswap
) == c
->id
) {
1509 KASSERT(mutex_owned(&c
->dq
->dq_interlock
));
1510 lblkno
= (off
>> ump
->um_mountp
->mnt_fs_bshift
);
1511 blkoff
= (off
& ump
->umq2_bmask
);
1512 c
->dq
->dq2_lblkno
= lblkno
;
1513 c
->dq
->dq2_blkoff
= blkoff
;
1520 lfs_dq2get(struct vnode
*dqvp
, u_long id
, struct ulfsmount
*ump
, int type
,
1524 struct quota2_header
*q2h
;
1528 struct dq2get_callback c
= {
1533 KASSERT(mutex_owned(&dq
->dq_interlock
));
1534 mutex_enter(&lfs_dqlock
);
1535 error
= getq2h(ump
, type
, &bp
, &q2h
, 0);
1538 /* look for our entry */
1539 hash_mask
= ((1 << q2h
->q2h_hash_shift
) - 1);
1540 offset
= q2h
->q2h_entries
[id
& hash_mask
];
1541 error
= quota2_walk_list(ump
, bp
, type
, &offset
, 0, (void *)&c
,
1545 mutex_exit(&lfs_dqlock
);
1550 lfs_dq2sync(struct vnode
*vp
, struct dquot
*dq
)
1556 lfs_quota2_mount(struct mount
*mp
)
1558 struct ulfsmount
*ump
= VFSTOULFS(mp
);
1559 struct lfs
*fs
= ump
->um_lfs
;
1562 struct lwp
*l
= curlwp
;
1564 if ((fs
->lfs_use_quota2
) == 0)
1567 fs
->um_flags
|= ULFS_QUOTA2
;
1568 ump
->umq2_bsize
= fs
->lfs_bsize
;
1569 ump
->umq2_bmask
= fs
->lfs_bmask
;
1570 if (fs
->lfs_quota_magic
!= Q2_HEAD_MAGIC
) {
1571 printf("%s: Invalid quota magic number\n",
1572 mp
->mnt_stat
.f_mntonname
);
1575 if ((fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_USRQUOTA
)) &&
1576 fs
->lfs_quotaino
[ULFS_USRQUOTA
] == 0) {
1577 printf("%s: no user quota inode\n",
1578 mp
->mnt_stat
.f_mntonname
);
1581 if ((fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_GRPQUOTA
)) &&
1582 fs
->lfs_quotaino
[ULFS_GRPQUOTA
] == 0) {
1583 printf("%s: no group quota inode\n",
1584 mp
->mnt_stat
.f_mntonname
);
1590 if (fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_USRQUOTA
) &&
1591 ump
->um_quotas
[ULFS_USRQUOTA
] == NULLVP
) {
1592 error
= VFS_VGET(mp
, fs
->lfs_quotaino
[ULFS_USRQUOTA
], &vp
);
1594 printf("%s: can't vget() user quota inode: %d\n",
1595 mp
->mnt_stat
.f_mntonname
, error
);
1598 ump
->um_quotas
[ULFS_USRQUOTA
] = vp
;
1599 ump
->um_cred
[ULFS_USRQUOTA
] = l
->l_cred
;
1600 mutex_enter(vp
->v_interlock
);
1602 mutex_exit(vp
->v_interlock
);
1605 if (fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_GRPQUOTA
) &&
1606 ump
->um_quotas
[ULFS_GRPQUOTA
] == NULLVP
) {
1607 error
= VFS_VGET(mp
, fs
->lfs_quotaino
[ULFS_GRPQUOTA
], &vp
);
1609 vn_close(ump
->um_quotas
[ULFS_USRQUOTA
],
1610 FREAD
|FWRITE
, l
->l_cred
);
1611 printf("%s: can't vget() group quota inode: %d\n",
1612 mp
->mnt_stat
.f_mntonname
, error
);
1615 ump
->um_quotas
[ULFS_GRPQUOTA
] = vp
;
1616 ump
->um_cred
[ULFS_GRPQUOTA
] = l
->l_cred
;
1617 mutex_enter(vp
->v_interlock
);
1618 vp
->v_vflag
|= VV_SYSTEM
;
1620 mutex_exit(vp
->v_interlock
);
1623 mp
->mnt_flag
|= MNT_QUOTA
;