1 /* $NetBSD: ulfs_quota2.c,v 1.21 2015/07/28 05:09:35 dholland Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3 /* from NetBSD: ffs_quota2.c,v 1.4 2011/06/12 03:36:00 rmind Exp */
6 * Copyright (c) 2010 Manuel Bouyer
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.21 2015/07/28 05:09:35 dholland Exp $");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/fstrans.h>
44 #include <sys/kauth.h>
45 #include <sys/wapbl.h>
46 #include <sys/quota.h>
47 #include <sys/quotactl.h>
49 #include <ufs/lfs/lfs.h>
50 #include <ufs/lfs/lfs_accessors.h>
51 #include <ufs/lfs/lfs_extern.h>
53 #include <ufs/lfs/ulfs_quota2.h>
54 #include <ufs/lfs/ulfs_inode.h>
55 #include <ufs/lfs/ulfsmount.h>
56 #include <ufs/lfs/ulfs_bswap.h>
57 #include <ufs/lfs/ulfs_extern.h>
58 #include <ufs/lfs/ulfs_quota.h>
62 * Data in the entries are protected by the associated struct dquot's
63 * dq_interlock (this means we can't read or change a quota entry without
64 * grabing a dquot for it).
65 * The header and lists (including pointers in the data entries, and q2e_uid)
66 * are protected by the global dqlock.
67 * the locking order is dq_interlock -> dqlock
70 static int quota2_bwrite(struct mount
*, struct buf
*);
71 static int getinoquota2(struct inode
*, bool, bool, struct buf
**,
72 struct quota2_entry
**);
73 static int getq2h(struct ulfsmount
*, int, struct buf
**,
74 struct quota2_header
**, int);
75 static int getq2e(struct ulfsmount
*, int, daddr_t
, int, struct buf
**,
76 struct quota2_entry
**, int);
77 static int quota2_walk_list(struct ulfsmount
*, struct buf
*, int,
78 uint64_t *, int, void *,
79 int (*func
)(struct ulfsmount
*, uint64_t *, struct quota2_entry
*,
82 static const char *limnames
[] = INITQLNAMES
;
85 quota2_dict_update_q2e_limits(int objtype
, const struct quotaval
*val
,
86 struct quota2_entry
*q2e
)
88 /* make sure we can index q2e_val[] by the fs-independent objtype */
89 CTASSERT(QUOTA_OBJTYPE_BLOCKS
== QL_BLOCK
);
90 CTASSERT(QUOTA_OBJTYPE_FILES
== QL_FILE
);
92 q2e
->q2e_val
[objtype
].q2v_hardlimit
= val
->qv_hardlimit
;
93 q2e
->q2e_val
[objtype
].q2v_softlimit
= val
->qv_softlimit
;
94 q2e
->q2e_val
[objtype
].q2v_grace
= val
->qv_grace
;
98 * Convert internal representation to FS-independent representation.
99 * (Note that while the two types are currently identical, the
100 * internal representation is an on-disk struct and the FS-independent
101 * representation is not, and they might diverge in the future.)
104 q2val_to_quotaval(struct quota2_val
*q2v
, struct quotaval
*qv
)
106 qv
->qv_softlimit
= q2v
->q2v_softlimit
;
107 qv
->qv_hardlimit
= q2v
->q2v_hardlimit
;
108 qv
->qv_usage
= q2v
->q2v_cur
;
109 qv
->qv_expiretime
= q2v
->q2v_time
;
110 qv
->qv_grace
= q2v
->q2v_grace
;
114 * Convert a quota2entry and default-flag to the FS-independent
118 q2e_to_quotaval(struct quota2_entry
*q2e
, int def
,
119 id_t
*id
, int objtype
, struct quotaval
*ret
)
122 *id
= QUOTA_DEFAULTID
;
127 KASSERT(objtype
>= 0 && objtype
< N_QL
);
128 q2val_to_quotaval(&q2e
->q2e_val
[objtype
], ret
);
133 quota2_bwrite(struct mount
*mp
, struct buf
*bp
)
135 if (mp
->mnt_flag
& MNT_SYNCHRONOUS
)
144 getq2h(struct ulfsmount
*ump
, int type
,
145 struct buf
**bpp
, struct quota2_header
**q2hp
, int flags
)
147 struct lfs
*fs
= ump
->um_lfs
;
148 const int needswap
= ULFS_MPNEEDSWAP(fs
);
151 struct quota2_header
*q2h
;
153 KASSERT(mutex_owned(&lfs_dqlock
));
154 error
= bread(ump
->um_quotas
[type
], 0, ump
->umq2_bsize
, flags
, &bp
);
157 if (bp
->b_resid
!= 0)
158 panic("dq2get: %s quota file truncated", lfs_quotatypes
[type
]);
160 q2h
= (void *)bp
->b_data
;
161 if (ulfs_rw32(q2h
->q2h_magic_number
, needswap
) != Q2_HEAD_MAGIC
||
162 q2h
->q2h_type
!= type
)
163 panic("dq2get: corrupted %s quota header", lfs_quotatypes
[type
]);
170 getq2e(struct ulfsmount
*ump
, int type
, daddr_t lblkno
, int blkoffset
,
171 struct buf
**bpp
, struct quota2_entry
**q2ep
, int flags
)
176 if (blkoffset
& (sizeof(uint64_t) - 1)) {
177 panic("dq2get: %s quota file corrupted",
178 lfs_quotatypes
[type
]);
180 error
= bread(ump
->um_quotas
[type
], lblkno
, ump
->umq2_bsize
, flags
, &bp
);
183 if (bp
->b_resid
!= 0) {
184 panic("dq2get: %s quota file corrupted",
185 lfs_quotatypes
[type
]);
187 *q2ep
= (void *)((char *)bp
->b_data
+ blkoffset
);
192 /* walk a quota entry list, calling the callback for each entry */
193 #define Q2WL_ABORT 0x10000000
196 quota2_walk_list(struct ulfsmount
*ump
, struct buf
*hbp
, int type
,
197 uint64_t *offp
, int flags
, void *a
,
198 int (*func
)(struct ulfsmount
*, uint64_t *, struct quota2_entry
*, uint64_t, void *))
200 struct lfs
*fs
= ump
->um_lfs
;
201 const int needswap
= ULFS_MPNEEDSWAP(fs
);
202 daddr_t off
= ulfs_rw64(*offp
, needswap
);
203 struct buf
*bp
, *obp
= hbp
;
204 int ret
= 0, ret2
= 0;
205 struct quota2_entry
*q2e
;
206 daddr_t lblkno
, blkoff
, olblkno
= 0;
208 KASSERT(mutex_owner(&lfs_dqlock
));
211 lblkno
= (off
>> ump
->um_mountp
->mnt_fs_bshift
);
212 blkoff
= (off
& ump
->umq2_bmask
);
214 /* in the header block */
216 } else if (lblkno
== olblkno
) {
217 /* still in the same buf */
220 ret
= bread(ump
->um_quotas
[type
], lblkno
,
221 ump
->umq2_bsize
, flags
, &bp
);
224 if (bp
->b_resid
!= 0) {
225 panic("quota2_walk_list: %s quota file corrupted",
226 lfs_quotatypes
[type
]);
229 q2e
= (void *)((char *)(bp
->b_data
) + blkoff
);
230 ret
= (*func
)(ump
, offp
, q2e
, off
, a
);
231 if (off
!= ulfs_rw64(*offp
, needswap
)) {
232 /* callback changed parent's pointer, redo */
233 off
= ulfs_rw64(*offp
, needswap
);
234 if (bp
!= hbp
&& bp
!= obp
)
237 /* parent if now current */
238 if (obp
!= bp
&& obp
!= hbp
) {
239 if (flags
& B_MODIFY
)
246 offp
= &(q2e
->q2e_next
);
247 off
= ulfs_rw64(*offp
, needswap
);
257 if (flags
& B_MODIFY
)
262 if (ret
& Q2WL_ABORT
)
270 lfsquota2_umount(struct mount
*mp
, int flags
)
273 struct ulfsmount
*ump
= VFSTOULFS(mp
);
274 struct lfs
*fs
= ump
->um_lfs
;
276 if ((fs
->um_flags
& ULFS_QUOTA2
) == 0)
279 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
280 if (ump
->um_quotas
[i
] != NULLVP
) {
281 error
= vn_close(ump
->um_quotas
[i
], FREAD
|FWRITE
,
284 printf("quota2_umount failed: close(%p) %d\n",
285 ump
->um_quotas
[i
], error
);
289 ump
->um_quotas
[i
] = NULLVP
;
295 quota2_q2ealloc(struct ulfsmount
*ump
, int type
, uid_t uid
, struct dquot
*dq
)
298 struct buf
*hbp
, *bp
;
299 struct quota2_header
*q2h
;
300 struct quota2_entry
*q2e
;
303 struct lfs
*fs
= ump
->um_lfs
;
304 const int needswap
= ULFS_MPNEEDSWAP(fs
);
306 KASSERT(mutex_owned(&dq
->dq_interlock
));
307 KASSERT(mutex_owned(&lfs_dqlock
));
308 error
= getq2h(ump
, type
, &hbp
, &q2h
, B_MODIFY
);
311 offset
= ulfs_rw64(q2h
->q2h_free
, needswap
);
313 struct vnode
*vp
= ump
->um_quotas
[type
];
314 struct inode
*ip
= VTOI(vp
);
315 uint64_t size
= ip
->i_size
;
316 /* need to alocate a new disk block */
317 error
= lfs_balloc(vp
, size
, ump
->umq2_bsize
,
318 ump
->um_cred
[type
], B_CLRBUF
| B_SYNC
, &bp
);
323 KASSERT((ip
->i_size
% ump
->umq2_bsize
) == 0);
324 ip
->i_size
+= ump
->umq2_bsize
;
325 DIP_ASSIGN(ip
, size
, ip
->i_size
);
326 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
327 uvm_vnp_setsize(vp
, ip
->i_size
);
328 lfsquota2_addfreeq2e(q2h
, bp
->b_data
, size
, ump
->umq2_bsize
,
331 error2
= lfs_update(vp
, NULL
, NULL
, UPDATE_WAIT
);
332 if (error
|| error2
) {
338 offset
= ulfs_rw64(q2h
->q2h_free
, needswap
);
339 KASSERT(offset
!= 0);
341 dq
->dq2_lblkno
= (offset
>> ump
->um_mountp
->mnt_fs_bshift
);
342 dq
->dq2_blkoff
= (offset
& ump
->umq2_bmask
);
343 if (dq
->dq2_lblkno
== 0) {
345 q2e
= (void *)((char *)bp
->b_data
+ dq
->dq2_blkoff
);
347 error
= getq2e(ump
, type
, dq
->dq2_lblkno
,
348 dq
->dq2_blkoff
, &bp
, &q2e
, B_MODIFY
);
354 hash_mask
= ((1 << q2h
->q2h_hash_shift
) - 1);
355 /* remove from free list */
356 q2h
->q2h_free
= q2e
->q2e_next
;
358 memcpy(q2e
, &q2h
->q2h_defentry
, sizeof(*q2e
));
359 q2e
->q2e_uid
= ulfs_rw32(uid
, needswap
);
360 /* insert in hash list */
361 q2e
->q2e_next
= q2h
->q2h_entries
[uid
& hash_mask
];
362 q2h
->q2h_entries
[uid
& hash_mask
] = ulfs_rw64(offset
, needswap
);
371 getinoquota2(struct inode
*ip
, bool alloc
, bool modify
, struct buf
**bpp
,
372 struct quota2_entry
**q2ep
)
377 struct ulfsmount
*ump
= ip
->i_ump
;
378 u_int32_t ino_ids
[ULFS_MAXQUOTAS
];
380 error
= lfs_getinoquota(ip
);
384 ino_ids
[ULFS_USRQUOTA
] = ip
->i_uid
;
385 ino_ids
[ULFS_GRPQUOTA
] = ip
->i_gid
;
386 /* first get the interlock for all dquot */
387 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
391 mutex_enter(&dq
->dq_interlock
);
393 /* now get the corresponding quota entry */
394 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
400 if (__predict_false(ump
->um_quotas
[i
] == NULL
)) {
402 * quotas have been turned off. This can happen
405 mutex_exit(&dq
->dq_interlock
);
406 lfs_dqrele(NULLVP
, dq
);
407 ip
->i_dquot
[i
] = NULL
;
411 if ((dq
->dq2_lblkno
| dq
->dq2_blkoff
) == 0) {
415 /* need to alloc a new on-disk quot */
416 mutex_enter(&lfs_dqlock
);
417 error
= quota2_q2ealloc(ump
, i
, ino_ids
[i
], dq
);
418 mutex_exit(&lfs_dqlock
);
422 KASSERT(dq
->dq2_lblkno
!= 0 || dq
->dq2_blkoff
!= 0);
423 error
= getq2e(ump
, i
, dq
->dq2_lblkno
,
424 dq
->dq2_blkoff
, &bpp
[i
], &q2ep
[i
],
425 modify
? B_MODIFY
: 0);
432 __inline
static int __unused
433 lfsquota2_check_limit(struct quota2_val
*q2v
, uint64_t change
, time_t now
)
435 return lfsquota_check_limit(q2v
->q2v_cur
, change
, q2v
->q2v_softlimit
,
436 q2v
->q2v_hardlimit
, q2v
->q2v_time
, now
);
440 quota2_check(struct inode
*ip
, int vtype
, int64_t change
, kauth_cred_t cred
,
444 struct buf
*bp
[ULFS_MAXQUOTAS
];
445 struct quota2_entry
*q2e
[ULFS_MAXQUOTAS
];
446 struct quota2_val
*q2vp
;
449 struct ulfsmount
*ump
= ip
->i_ump
;
450 struct lfs
*fs
= ip
->i_lfs
;
451 struct mount
*mp
= ump
->um_mountp
;
452 const int needswap
= ULFS_MPNEEDSWAP(fs
);
455 if ((error
= getinoquota2(ip
, change
> 0, change
!= 0, bp
, q2e
)) != 0)
458 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
464 mutex_exit(&dq
->dq_interlock
);
469 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
473 if (q2e
[i
] == NULL
) {
474 mutex_exit(&dq
->dq_interlock
);
477 q2vp
= &q2e
[i
]->q2e_val
[vtype
];
478 ncurblks
= ulfs_rw64(q2vp
->q2v_cur
, needswap
);
479 if (ncurblks
< -change
)
483 q2vp
->q2v_cur
= ulfs_rw64(ncurblks
, needswap
);
484 quota2_bwrite(mp
, bp
[i
]);
485 mutex_exit(&dq
->dq_interlock
);
489 /* see if the allocation is allowed */
490 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
491 struct quota2_val q2v
;
496 KASSERT(q2e
[i
] != NULL
);
497 lfsquota2_ulfs_rwq2v(&q2e
[i
]->q2e_val
[vtype
], &q2v
, needswap
);
498 ql_stat
= lfsquota2_check_limit(&q2v
, change
, time_second
);
500 if ((flags
& FORCE
) == 0 &&
501 kauth_authorize_system(cred
, KAUTH_SYSTEM_FS_QUOTA
,
502 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT
,
503 KAUTH_ARG(i
), KAUTH_ARG(vtype
), NULL
) != 0) {
504 /* enforce this limit */
505 switch(QL_STATUS(ql_stat
)) {
507 if ((dq
->dq_flags
& DQ_WARN(vtype
)) == 0) {
508 uprintf("\n%s: write failed, %s %s "
510 mp
->mnt_stat
.f_mntonname
,
511 lfs_quotatypes
[i
], limnames
[vtype
]);
512 dq
->dq_flags
|= DQ_WARN(vtype
);
516 case QL_S_DENY_GRACE
:
517 if ((dq
->dq_flags
& DQ_WARN(vtype
)) == 0) {
518 uprintf("\n%s: write failed, %s %s "
520 mp
->mnt_stat
.f_mntonname
,
521 lfs_quotatypes
[i
], limnames
[vtype
]);
522 dq
->dq_flags
|= DQ_WARN(vtype
);
526 case QL_S_ALLOW_SOFT
:
527 if ((dq
->dq_flags
& DQ_WARN(vtype
)) == 0) {
528 uprintf("\n%s: warning, %s %s "
530 mp
->mnt_stat
.f_mntonname
,
531 lfs_quotatypes
[i
], limnames
[vtype
]);
532 dq
->dq_flags
|= DQ_WARN(vtype
);
538 * always do this; we don't know if the allocation will
539 * succed or not in the end. if we don't do the allocation
540 * q2v_time will be ignored anyway
542 if (ql_stat
& QL_F_CROSS
) {
543 q2v
.q2v_time
= time_second
+ q2v
.q2v_grace
;
544 lfsquota2_ulfs_rwq2v(&q2v
, &q2e
[i
]->q2e_val
[vtype
],
549 /* now do the allocation if allowed */
550 for (i
= 0; i
< ULFS_MAXQUOTAS
; i
++) {
554 KASSERT(q2e
[i
] != NULL
);
556 q2vp
= &q2e
[i
]->q2e_val
[vtype
];
557 ncurblks
= ulfs_rw64(q2vp
->q2v_cur
, needswap
);
558 q2vp
->q2v_cur
= ulfs_rw64(ncurblks
+ change
, needswap
);
559 quota2_bwrite(mp
, bp
[i
]);
562 mutex_exit(&dq
->dq_interlock
);
568 lfs_chkdq2(struct inode
*ip
, int64_t change
, kauth_cred_t cred
, int flags
)
570 return quota2_check(ip
, QL_BLOCK
, change
, cred
, flags
);
574 lfs_chkiq2(struct inode
*ip
, int32_t change
, kauth_cred_t cred
, int flags
)
576 return quota2_check(ip
, QL_FILE
, change
, cred
, flags
);
580 lfsquota2_handle_cmd_put(struct ulfsmount
*ump
, const struct quotakey
*key
,
581 const struct quotaval
*val
)
585 struct quota2_header
*q2h
;
586 struct quota2_entry q2e
, *q2ep
;
588 struct lfs
*fs
= ump
->um_lfs
;
589 const int needswap
= ULFS_MPNEEDSWAP(fs
);
591 /* make sure we can index by the fs-independent idtype */
592 CTASSERT(QUOTA_IDTYPE_USER
== ULFS_USRQUOTA
);
593 CTASSERT(QUOTA_IDTYPE_GROUP
== ULFS_GRPQUOTA
);
595 if (ump
->um_quotas
[key
->qk_idtype
] == NULLVP
)
598 if (key
->qk_id
== QUOTA_DEFAULTID
) {
599 mutex_enter(&lfs_dqlock
);
600 error
= getq2h(ump
, key
->qk_idtype
, &bp
, &q2h
, B_MODIFY
);
602 mutex_exit(&lfs_dqlock
);
605 lfsquota2_ulfs_rwq2e(&q2h
->q2h_defentry
, &q2e
, needswap
);
606 quota2_dict_update_q2e_limits(key
->qk_objtype
, val
, &q2e
);
607 lfsquota2_ulfs_rwq2e(&q2e
, &q2h
->q2h_defentry
, needswap
);
608 mutex_exit(&lfs_dqlock
);
609 quota2_bwrite(ump
->um_mountp
, bp
);
613 error
= lfs_dqget(NULLVP
, key
->qk_id
, ump
, key
->qk_idtype
, &dq
);
617 mutex_enter(&dq
->dq_interlock
);
618 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
619 /* need to alloc a new on-disk quot */
620 mutex_enter(&lfs_dqlock
);
621 error
= quota2_q2ealloc(ump
, key
->qk_idtype
, key
->qk_id
, dq
);
622 mutex_exit(&lfs_dqlock
);
626 KASSERT(dq
->dq2_lblkno
!= 0 || dq
->dq2_blkoff
!= 0);
627 error
= getq2e(ump
, key
->qk_idtype
, dq
->dq2_lblkno
,
628 dq
->dq2_blkoff
, &bp
, &q2ep
, B_MODIFY
);
632 lfsquota2_ulfs_rwq2e(q2ep
, &q2e
, needswap
);
633 quota2_dict_update_q2e_limits(key
->qk_objtype
, val
, &q2e
);
634 lfsquota2_ulfs_rwq2e(&q2e
, q2ep
, needswap
);
635 quota2_bwrite(ump
->um_mountp
, bp
);
638 mutex_exit(&dq
->dq_interlock
);
639 lfs_dqrele(NULLVP
, dq
);
644 struct dq2clear_callback
{
647 struct quota2_header
*q2h
;
651 dq2clear_callback(struct ulfsmount
*ump
, uint64_t *offp
, struct quota2_entry
*q2e
,
652 uint64_t off
, void *v
)
654 struct dq2clear_callback
*c
= v
;
655 struct lfs
*fs
= ump
->um_lfs
;
656 const int needswap
= ULFS_MPNEEDSWAP(fs
);
659 if (ulfs_rw32(q2e
->q2e_uid
, needswap
) == c
->id
) {
660 KASSERT(mutex_owned(&c
->dq
->dq_interlock
));
661 c
->dq
->dq2_lblkno
= 0;
662 c
->dq
->dq2_blkoff
= 0;
664 /* remove from hash list */
665 *offp
= q2e
->q2e_next
;
666 /* add to free list */
667 q2e
->q2e_next
= c
->q2h
->q2h_free
;
668 c
->q2h
->q2h_free
= myoff
;
674 lfsquota2_handle_cmd_del(struct ulfsmount
*ump
, const struct quotakey
*qk
)
679 int error
, i
, canfree
;
681 struct quota2_header
*q2h
;
682 struct quota2_entry q2e
, *q2ep
;
683 struct buf
*hbp
, *bp
;
685 struct dq2clear_callback c
;
687 idtype
= qk
->qk_idtype
;
689 objtype
= qk
->qk_objtype
;
691 if (ump
->um_quotas
[idtype
] == NULLVP
)
693 if (id
== QUOTA_DEFAULTID
)
696 /* get the default entry before locking the entry's buffer */
697 mutex_enter(&lfs_dqlock
);
698 error
= getq2h(ump
, idtype
, &hbp
, &q2h
, 0);
700 mutex_exit(&lfs_dqlock
);
703 /* we'll copy to another disk entry, so no need to swap */
704 memcpy(&q2e
, &q2h
->q2h_defentry
, sizeof(q2e
));
705 mutex_exit(&lfs_dqlock
);
708 error
= lfs_dqget(NULLVP
, id
, ump
, idtype
, &dq
);
712 mutex_enter(&dq
->dq_interlock
);
713 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
714 /* already clear, nothing to do */
719 error
= getq2e(ump
, idtype
, dq
->dq2_lblkno
, dq
->dq2_blkoff
,
720 &bp
, &q2ep
, B_MODIFY
);
724 /* make sure we can index by the objtype passed in */
725 CTASSERT(QUOTA_OBJTYPE_BLOCKS
== QL_BLOCK
);
726 CTASSERT(QUOTA_OBJTYPE_FILES
== QL_FILE
);
728 /* clear the requested objtype by copying from the default entry */
729 q2ep
->q2e_val
[objtype
].q2v_softlimit
=
730 q2e
.q2e_val
[objtype
].q2v_softlimit
;
731 q2ep
->q2e_val
[objtype
].q2v_hardlimit
=
732 q2e
.q2e_val
[objtype
].q2v_hardlimit
;
733 q2ep
->q2e_val
[objtype
].q2v_grace
=
734 q2e
.q2e_val
[objtype
].q2v_grace
;
735 q2ep
->q2e_val
[objtype
].q2v_time
= 0;
737 /* if this entry now contains no information, we can free it */
739 for (i
= 0; i
< N_QL
; i
++) {
740 if (q2ep
->q2e_val
[i
].q2v_cur
!= 0 ||
741 (q2ep
->q2e_val
[i
].q2v_softlimit
!=
742 q2e
.q2e_val
[i
].q2v_softlimit
) ||
743 (q2ep
->q2e_val
[i
].q2v_hardlimit
!=
744 q2e
.q2e_val
[i
].q2v_hardlimit
) ||
745 (q2ep
->q2e_val
[i
].q2v_grace
!=
746 q2e
.q2e_val
[i
].q2v_grace
)) {
750 /* note: do not need to check q2v_time */
754 quota2_bwrite(ump
->um_mountp
, bp
);
757 /* we can free it. release bp so we can walk the list */
759 mutex_enter(&lfs_dqlock
);
760 error
= getq2h(ump
, idtype
, &hbp
, &q2h
, 0);
764 hash_mask
= ((1 << q2h
->q2h_hash_shift
) - 1);
768 error
= quota2_walk_list(ump
, hbp
, idtype
,
769 &q2h
->q2h_entries
[id
& hash_mask
], B_MODIFY
, &c
,
775 mutex_exit(&lfs_dqlock
);
778 mutex_exit(&dq
->dq_interlock
);
779 lfs_dqrele(NULLVP
, dq
);
784 quota2_fetch_q2e(struct ulfsmount
*ump
, const struct quotakey
*qk
,
785 struct quota2_entry
*ret
)
789 struct quota2_entry
*q2ep
;
791 struct lfs
*fs
= ump
->um_lfs
;
792 const int needswap
= ULFS_MPNEEDSWAP(fs
);
794 error
= lfs_dqget(NULLVP
, qk
->qk_id
, ump
, qk
->qk_idtype
, &dq
);
798 mutex_enter(&dq
->dq_interlock
);
799 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
800 mutex_exit(&dq
->dq_interlock
);
801 lfs_dqrele(NULLVP
, dq
);
804 error
= getq2e(ump
, qk
->qk_idtype
, dq
->dq2_lblkno
, dq
->dq2_blkoff
,
807 mutex_exit(&dq
->dq_interlock
);
808 lfs_dqrele(NULLVP
, dq
);
811 lfsquota2_ulfs_rwq2e(q2ep
, ret
, needswap
);
813 mutex_exit(&dq
->dq_interlock
);
814 lfs_dqrele(NULLVP
, dq
);
820 quota2_fetch_quotaval(struct ulfsmount
*ump
, const struct quotakey
*qk
,
821 struct quotaval
*ret
)
825 struct quota2_entry
*q2ep
, q2e
;
827 struct lfs
*fs
= ump
->um_lfs
;
828 const int needswap
= ULFS_MPNEEDSWAP(fs
);
831 error
= lfs_dqget(NULLVP
, qk
->qk_id
, ump
, qk
->qk_idtype
, &dq
);
835 mutex_enter(&dq
->dq_interlock
);
836 if (dq
->dq2_lblkno
== 0 && dq
->dq2_blkoff
== 0) {
837 mutex_exit(&dq
->dq_interlock
);
838 lfs_dqrele(NULLVP
, dq
);
841 error
= getq2e(ump
, qk
->qk_idtype
, dq
->dq2_lblkno
, dq
->dq2_blkoff
,
844 mutex_exit(&dq
->dq_interlock
);
845 lfs_dqrele(NULLVP
, dq
);
848 lfsquota2_ulfs_rwq2e(q2ep
, &q2e
, needswap
);
850 mutex_exit(&dq
->dq_interlock
);
851 lfs_dqrele(NULLVP
, dq
);
853 q2e_to_quotaval(&q2e
, 0, &id2
, qk
->qk_objtype
, ret
);
854 KASSERT(id2
== qk
->qk_id
);
859 lfsquota2_handle_cmd_get(struct ulfsmount
*ump
, const struct quotakey
*qk
,
863 struct quota2_header
*q2h
;
864 struct quota2_entry q2e
;
866 struct lfs
*fs
= ump
->um_lfs
;
867 const int needswap
= ULFS_MPNEEDSWAP(fs
);
871 * Make sure the FS-independent codes match the internal ones,
872 * so we can use the passed-in objtype without having to
873 * convert it explicitly to QL_BLOCK/QL_FILE.
875 CTASSERT(QL_BLOCK
== QUOTA_OBJTYPE_BLOCKS
);
876 CTASSERT(QL_FILE
== QUOTA_OBJTYPE_FILES
);
879 if (qk
->qk_objtype
< 0 || qk
->qk_objtype
>= N_QL
) {
883 if (ump
->um_quotas
[qk
->qk_idtype
] == NULLVP
)
885 if (qk
->qk_id
== QUOTA_DEFAULTID
) {
886 mutex_enter(&lfs_dqlock
);
887 error
= getq2h(ump
, qk
->qk_idtype
, &bp
, &q2h
, 0);
889 mutex_exit(&lfs_dqlock
);
892 lfsquota2_ulfs_rwq2e(&q2h
->q2h_defentry
, &q2e
, needswap
);
893 mutex_exit(&lfs_dqlock
);
895 q2e_to_quotaval(&q2e
, qk
->qk_id
== QUOTA_DEFAULTID
, &id2
,
899 error
= quota2_fetch_quotaval(ump
, qk
, qv
);
905 * Cursor structure we used.
907 * This will get stored in userland between calls so we must not assume
908 * it isn't arbitrarily corrupted.
910 struct ulfsq2_cursor
{
911 uint32_t q2c_magic
; /* magic number */
912 int q2c_hashsize
; /* size of hash table at last go */
914 int q2c_users_done
; /* true if we've returned all user data */
915 int q2c_groups_done
; /* true if we've returned all group data */
916 int q2c_defaults_done
; /* true if we've returned the default values */
917 int q2c_hashpos
; /* slot to start at in hash table */
918 int q2c_uidpos
; /* number of ids we've handled */
919 int q2c_blocks_done
; /* true if we've returned the blocks value */
923 * State of a single cursorget call, or at least the part of it that
924 * needs to be passed around.
926 struct q2cursor_state
{
927 /* data return pointers */
928 struct quotakey
*keys
;
929 struct quotaval
*vals
;
931 /* key/value counters */
933 unsigned numkeys
; /* number of keys assigned */
935 /* ID to key/value conversion state */
936 int skipfirst
; /* if true skip first key/value */
937 int skiplast
; /* if true skip last key/value */
940 unsigned maxids
; /* maximum number of IDs to handle */
941 unsigned numids
; /* number of IDs handled */
945 * Additional structure for getids callback.
947 struct q2cursor_getids
{
948 struct q2cursor_state
*state
;
950 unsigned skip
; /* number of ids to skip over */
951 unsigned new_skip
; /* number of ids to skip over next time */
952 unsigned skipped
; /* number skipped so far */
953 int stopped
; /* true if we stopped quota_walk_list early */
957 * Cursor-related functions
961 #define Q2C_MAGIC (0xbeebe111)
963 /* extract cursor from caller form */
964 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
967 * Check that a cursor we're handed is something like valid. If
968 * someone munges it and it still passes these checks, they'll get
969 * partial or odd results back but won't break anything.
972 q2cursor_check(struct ulfsq2_cursor
*cursor
)
974 if (cursor
->q2c_magic
!= Q2C_MAGIC
) {
977 if (cursor
->q2c_hashsize
< 0) {
981 if (cursor
->q2c_users_done
!= 0 && cursor
->q2c_users_done
!= 1) {
984 if (cursor
->q2c_groups_done
!= 0 && cursor
->q2c_groups_done
!= 1) {
987 if (cursor
->q2c_defaults_done
!= 0 && cursor
->q2c_defaults_done
!= 1) {
990 if (cursor
->q2c_hashpos
< 0 || cursor
->q2c_uidpos
< 0) {
993 if (cursor
->q2c_blocks_done
!= 0 && cursor
->q2c_blocks_done
!= 1) {
1000 * Set up the q2cursor state.
1003 q2cursor_initstate(struct q2cursor_state
*state
, struct quotakey
*keys
,
1004 struct quotaval
*vals
, unsigned maxkeyvals
, int blocks_done
)
1009 state
->maxkeyvals
= maxkeyvals
;
1013 * For each ID there are two quotavals to return. If the
1014 * maximum number of entries to return is odd, we might want
1015 * to skip the first quotaval of the first ID, or the last
1016 * quotaval of the last ID, but not both. So the number of IDs
1017 * we want is (up to) half the number of return slots we have,
1021 state
->maxids
= (state
->maxkeyvals
+ 1) / 2;
1023 if (state
->maxkeyvals
% 2) {
1025 state
->skipfirst
= 1;
1026 state
->skiplast
= 0;
1028 state
->skipfirst
= 0;
1029 state
->skiplast
= 1;
1032 state
->skipfirst
= 0;
1033 state
->skiplast
= 0;
1038 * Choose which idtype we're going to work on. If doing a full
1039 * iteration, we do users first, then groups, but either might be
1040 * disabled or marked to skip via cursorsetidtype(), so don't make
1041 * silly assumptions.
1044 q2cursor_pickidtype(struct ulfsq2_cursor
*cursor
, int *idtype_ret
)
1046 if (cursor
->q2c_users_done
== 0) {
1047 *idtype_ret
= QUOTA_IDTYPE_USER
;
1048 } else if (cursor
->q2c_groups_done
== 0) {
1049 *idtype_ret
= QUOTA_IDTYPE_GROUP
;
1057 * Add an ID to the current state. Sets up either one or two keys to
1058 * refer to it, depending on whether it's first/last and the setting
1059 * of skipfirst. (skiplast does not need to be explicitly tested)
1062 q2cursor_addid(struct q2cursor_state
*state
, int idtype
, id_t id
)
1064 KASSERT(state
->numids
< state
->maxids
);
1065 KASSERT(state
->numkeys
< state
->maxkeyvals
);
1067 if (!state
->skipfirst
|| state
->numkeys
> 0) {
1068 state
->keys
[state
->numkeys
].qk_idtype
= idtype
;
1069 state
->keys
[state
->numkeys
].qk_id
= id
;
1070 state
->keys
[state
->numkeys
].qk_objtype
= QUOTA_OBJTYPE_BLOCKS
;
1073 if (state
->numkeys
< state
->maxkeyvals
) {
1074 state
->keys
[state
->numkeys
].qk_idtype
= idtype
;
1075 state
->keys
[state
->numkeys
].qk_id
= id
;
1076 state
->keys
[state
->numkeys
].qk_objtype
= QUOTA_OBJTYPE_FILES
;
1079 KASSERT(state
->skiplast
);
1085 * Callback function for getting IDs. Update counting and call addid.
1088 q2cursor_getids_callback(struct ulfsmount
*ump
, uint64_t *offp
,
1089 struct quota2_entry
*q2ep
, uint64_t off
, void *v
)
1091 struct q2cursor_getids
*gi
= v
;
1093 struct lfs
*fs
= ump
->um_lfs
;
1094 const int needswap
= ULFS_MPNEEDSWAP(fs
);
1096 if (gi
->skipped
< gi
->skip
) {
1100 id
= ulfs_rw32(q2ep
->q2e_uid
, needswap
);
1101 q2cursor_addid(gi
->state
, gi
->idtype
, id
);
1103 if (gi
->state
->numids
>= gi
->state
->maxids
) {
1104 /* got enough ids, stop now */
1112 * Fill in a batch of quotakeys by scanning one or more hash chains.
1115 q2cursor_getkeys(struct ulfsmount
*ump
, int idtype
, struct ulfsq2_cursor
*cursor
,
1116 struct q2cursor_state
*state
,
1117 int *hashsize_ret
, struct quota2_entry
*default_q2e_ret
)
1119 struct lfs
*fs
= ump
->um_lfs
;
1120 const int needswap
= ULFS_MPNEEDSWAP(fs
);
1122 struct quota2_header
*q2h
;
1123 int quota2_hash_size
;
1124 struct q2cursor_getids gi
;
1129 * Read the header block.
1132 mutex_enter(&lfs_dqlock
);
1133 error
= getq2h(ump
, idtype
, &hbp
, &q2h
, 0);
1135 mutex_exit(&lfs_dqlock
);
1139 /* if the table size has changed, make the caller start over */
1140 quota2_hash_size
= ulfs_rw16(q2h
->q2h_hash_size
, needswap
);
1141 if (cursor
->q2c_hashsize
== 0) {
1142 cursor
->q2c_hashsize
= quota2_hash_size
;
1143 } else if (cursor
->q2c_hashsize
!= quota2_hash_size
) {
1148 /* grab the entry with the default values out of the header */
1149 lfsquota2_ulfs_rwq2e(&q2h
->q2h_defentry
, default_q2e_ret
, needswap
);
1151 /* If we haven't done the defaults yet, that goes first. */
1152 if (cursor
->q2c_defaults_done
== 0) {
1153 q2cursor_addid(state
, idtype
, QUOTA_DEFAULTID
);
1154 /* if we read both halves, mark it done */
1155 if (state
->numids
< state
->maxids
|| !state
->skiplast
) {
1156 cursor
->q2c_defaults_done
= 1;
1163 while (state
->numids
< state
->maxids
) {
1164 if (cursor
->q2c_hashpos
>= quota2_hash_size
) {
1165 /* nothing more left */
1169 /* scan this hash chain */
1170 gi
.skip
= cursor
->q2c_uidpos
;
1171 gi
.new_skip
= gi
.skip
;
1174 offset
= q2h
->q2h_entries
[cursor
->q2c_hashpos
];
1176 error
= quota2_walk_list(ump
, hbp
, idtype
, &offset
, 0, &gi
,
1177 q2cursor_getids_callback
);
1178 KASSERT(error
!= Q2WL_ABORT
);
1183 /* callback stopped before reading whole chain */
1184 cursor
->q2c_uidpos
= gi
.new_skip
;
1185 /* if we didn't get both halves, back up */
1186 if (state
->numids
== state
->maxids
&& state
->skiplast
){
1187 KASSERT(cursor
->q2c_uidpos
> 0);
1188 cursor
->q2c_uidpos
--;
1191 /* read whole chain */
1192 /* if we got both halves of the last id, advance */
1193 if (state
->numids
< state
->maxids
|| !state
->skiplast
){
1194 cursor
->q2c_uidpos
= 0;
1195 cursor
->q2c_hashpos
++;
1201 mutex_exit(&lfs_dqlock
);
1206 *hashsize_ret
= quota2_hash_size
;
1211 * Fetch the quotavals for the quotakeys.
1214 q2cursor_getvals(struct ulfsmount
*ump
, struct q2cursor_state
*state
,
1215 const struct quota2_entry
*default_q2e
)
1220 struct quota2_entry q2e
;
1226 for (pos
= 0; pos
< state
->numkeys
; pos
++) {
1227 id
= state
->keys
[pos
].qk_id
;
1228 if (!hasid
|| id
!= loadedid
) {
1231 if (id
== QUOTA_DEFAULTID
) {
1234 error
= quota2_fetch_q2e(ump
,
1237 if (error
== ENOENT
) {
1238 /* something changed - start over */
1248 objtype
= state
->keys
[pos
].qk_objtype
;
1249 KASSERT(objtype
>= 0 && objtype
< N_QL
);
1250 q2val_to_quotaval(&q2e
.q2e_val
[objtype
], &state
->vals
[pos
]);
1259 * We can't just read keys and values directly, because we can't walk
1260 * the list with qdlock and grab dq_interlock to read the entries at
1261 * the same time. So we're going to do two passes: one to figure out
1262 * which IDs we want and fill in the keys, and then a second to use
1263 * the keys to fetch the values.
1266 lfsquota2_handle_cmd_cursorget(struct ulfsmount
*ump
, struct quotakcursor
*qkc
,
1267 struct quotakey
*keys
, struct quotaval
*vals
, unsigned maxreturn
,
1271 struct ulfsq2_cursor
*cursor
;
1272 struct ulfsq2_cursor newcursor
;
1273 struct q2cursor_state state
;
1274 struct quota2_entry default_q2e
;
1276 int quota2_hash_size
= 0; /* XXXuninit */
1279 * Convert and validate the cursor.
1281 cursor
= Q2CURSOR(qkc
);
1282 error
= q2cursor_check(cursor
);
1288 * Make sure our on-disk codes match the values of the
1289 * FS-independent ones. This avoids the need for explicit
1290 * conversion (which would be a NOP anyway and thus easily
1291 * left out or called in the wrong places...)
1293 CTASSERT(QUOTA_IDTYPE_USER
== ULFS_USRQUOTA
);
1294 CTASSERT(QUOTA_IDTYPE_GROUP
== ULFS_GRPQUOTA
);
1295 CTASSERT(QUOTA_OBJTYPE_BLOCKS
== QL_BLOCK
);
1296 CTASSERT(QUOTA_OBJTYPE_FILES
== QL_FILE
);
1299 * If some of the idtypes aren't configured/enabled, arrange
1300 * to skip over them.
1302 if (cursor
->q2c_users_done
== 0 &&
1303 ump
->um_quotas
[ULFS_USRQUOTA
] == NULLVP
) {
1304 cursor
->q2c_users_done
= 1;
1306 if (cursor
->q2c_groups_done
== 0 &&
1307 ump
->um_quotas
[ULFS_GRPQUOTA
] == NULLVP
) {
1308 cursor
->q2c_groups_done
= 1;
1311 /* Loop over, potentially, both idtypes */
1314 /* Choose id type */
1315 error
= q2cursor_pickidtype(cursor
, &idtype
);
1316 if (error
== EAGAIN
) {
1317 /* nothing more to do, return 0 */
1321 KASSERT(ump
->um_quotas
[idtype
] != NULLVP
);
1324 * Initialize the per-call iteration state. Copy the
1325 * cursor state so we can update it in place but back
1328 q2cursor_initstate(&state
, keys
, vals
, maxreturn
,
1329 cursor
->q2c_blocks_done
);
1330 newcursor
= *cursor
;
1333 error
= q2cursor_getkeys(ump
, idtype
, &newcursor
, &state
,
1334 "a2_hash_size
, &default_q2e
);
1339 /* Now fill in the values. */
1340 error
= q2cursor_getvals(ump
, &state
, &default_q2e
);
1346 * Now that we aren't going to fail and lose what we
1347 * did so far, we can update the cursor state.
1350 if (newcursor
.q2c_hashpos
>= quota2_hash_size
) {
1351 if (idtype
== QUOTA_IDTYPE_USER
)
1352 cursor
->q2c_users_done
= 1;
1354 cursor
->q2c_groups_done
= 1;
1356 /* start over on another id type */
1357 cursor
->q2c_hashsize
= 0;
1358 cursor
->q2c_defaults_done
= 0;
1359 cursor
->q2c_hashpos
= 0;
1360 cursor
->q2c_uidpos
= 0;
1361 cursor
->q2c_blocks_done
= 0;
1363 *cursor
= newcursor
;
1364 cursor
->q2c_blocks_done
= state
.skiplast
;
1368 * If we have something to return, return it.
1369 * Otherwise, continue to the other idtype, if any,
1370 * and only return zero at end of iteration.
1372 if (state
.numkeys
> 0) {
1377 *ret
= state
.numkeys
;
1382 lfsquota2_handle_cmd_cursoropen(struct ulfsmount
*ump
, struct quotakcursor
*qkc
)
1384 struct ulfsq2_cursor
*cursor
;
1386 CTASSERT(sizeof(*cursor
) <= sizeof(qkc
->u
.qkc_space
));
1387 cursor
= Q2CURSOR(qkc
);
1389 cursor
->q2c_magic
= Q2C_MAGIC
;
1390 cursor
->q2c_hashsize
= 0;
1392 cursor
->q2c_users_done
= 0;
1393 cursor
->q2c_groups_done
= 0;
1394 cursor
->q2c_defaults_done
= 0;
1395 cursor
->q2c_hashpos
= 0;
1396 cursor
->q2c_uidpos
= 0;
1397 cursor
->q2c_blocks_done
= 0;
1402 lfsquota2_handle_cmd_cursorclose(struct ulfsmount
*ump
, struct quotakcursor
*qkc
)
1404 struct ulfsq2_cursor
*cursor
;
1407 cursor
= Q2CURSOR(qkc
);
1408 error
= q2cursor_check(cursor
);
1419 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount
*ump
,
1420 struct quotakcursor
*qkc
, int idtype
)
1422 struct ulfsq2_cursor
*cursor
;
1425 cursor
= Q2CURSOR(qkc
);
1426 error
= q2cursor_check(cursor
);
1432 case QUOTA_IDTYPE_USER
:
1433 cursor
->q2c_users_done
= 1;
1435 case QUOTA_IDTYPE_GROUP
:
1436 cursor
->q2c_groups_done
= 1;
1446 lfsquota2_handle_cmd_cursoratend(struct ulfsmount
*ump
, struct quotakcursor
*qkc
,
1449 struct ulfsq2_cursor
*cursor
;
1452 cursor
= Q2CURSOR(qkc
);
1453 error
= q2cursor_check(cursor
);
1458 *ret
= (cursor
->q2c_users_done
&& cursor
->q2c_groups_done
);
1463 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount
*ump
, struct quotakcursor
*qkc
)
1465 struct ulfsq2_cursor
*cursor
;
1468 cursor
= Q2CURSOR(qkc
);
1469 error
= q2cursor_check(cursor
);
1474 cursor
->q2c_hashsize
= 0;
1476 cursor
->q2c_users_done
= 0;
1477 cursor
->q2c_groups_done
= 0;
1478 cursor
->q2c_defaults_done
= 0;
1479 cursor
->q2c_hashpos
= 0;
1480 cursor
->q2c_uidpos
= 0;
1481 cursor
->q2c_blocks_done
= 0;
1487 lfs_q2sync(struct mount
*mp
)
1492 struct dq2get_callback
{
1498 dq2get_callback(struct ulfsmount
*ump
, uint64_t *offp
, struct quota2_entry
*q2e
,
1499 uint64_t off
, void *v
)
1501 struct dq2get_callback
*c
= v
;
1504 struct lfs
*fs
= ump
->um_lfs
;
1505 const int needswap
= ULFS_MPNEEDSWAP(fs
);
1507 if (ulfs_rw32(q2e
->q2e_uid
, needswap
) == c
->id
) {
1508 KASSERT(mutex_owned(&c
->dq
->dq_interlock
));
1509 lblkno
= (off
>> ump
->um_mountp
->mnt_fs_bshift
);
1510 blkoff
= (off
& ump
->umq2_bmask
);
1511 c
->dq
->dq2_lblkno
= lblkno
;
1512 c
->dq
->dq2_blkoff
= blkoff
;
1519 lfs_dq2get(struct vnode
*dqvp
, u_long id
, struct ulfsmount
*ump
, int type
,
1523 struct quota2_header
*q2h
;
1527 struct dq2get_callback c
= {
1532 KASSERT(mutex_owned(&dq
->dq_interlock
));
1533 mutex_enter(&lfs_dqlock
);
1534 error
= getq2h(ump
, type
, &bp
, &q2h
, 0);
1537 /* look for our entry */
1538 hash_mask
= ((1 << q2h
->q2h_hash_shift
) - 1);
1539 offset
= q2h
->q2h_entries
[id
& hash_mask
];
1540 error
= quota2_walk_list(ump
, bp
, type
, &offset
, 0, (void *)&c
,
1544 mutex_exit(&lfs_dqlock
);
1549 lfs_dq2sync(struct vnode
*vp
, struct dquot
*dq
)
1555 lfs_quota2_mount(struct mount
*mp
)
1557 struct ulfsmount
*ump
= VFSTOULFS(mp
);
1558 struct lfs
*fs
= ump
->um_lfs
;
1561 struct lwp
*l
= curlwp
;
1563 if ((fs
->lfs_use_quota2
) == 0)
1566 fs
->um_flags
|= ULFS_QUOTA2
;
1567 ump
->umq2_bsize
= lfs_sb_getbsize(fs
);
1568 ump
->umq2_bmask
= lfs_sb_getbmask(fs
);
1569 if (fs
->lfs_quota_magic
!= Q2_HEAD_MAGIC
) {
1570 printf("%s: Invalid quota magic number\n",
1571 mp
->mnt_stat
.f_mntonname
);
1574 if ((fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_USRQUOTA
)) &&
1575 fs
->lfs_quotaino
[ULFS_USRQUOTA
] == 0) {
1576 printf("%s: no user quota inode\n",
1577 mp
->mnt_stat
.f_mntonname
);
1580 if ((fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_GRPQUOTA
)) &&
1581 fs
->lfs_quotaino
[ULFS_GRPQUOTA
] == 0) {
1582 printf("%s: no group quota inode\n",
1583 mp
->mnt_stat
.f_mntonname
);
1589 if (fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_USRQUOTA
) &&
1590 ump
->um_quotas
[ULFS_USRQUOTA
] == NULLVP
) {
1591 error
= VFS_VGET(mp
, fs
->lfs_quotaino
[ULFS_USRQUOTA
], &vp
);
1593 printf("%s: can't vget() user quota inode: %d\n",
1594 mp
->mnt_stat
.f_mntonname
, error
);
1597 ump
->um_quotas
[ULFS_USRQUOTA
] = vp
;
1598 ump
->um_cred
[ULFS_USRQUOTA
] = l
->l_cred
;
1599 mutex_enter(vp
->v_interlock
);
1601 mutex_exit(vp
->v_interlock
);
1604 if (fs
->lfs_quota_flags
& FS_Q2_DO_TYPE(ULFS_GRPQUOTA
) &&
1605 ump
->um_quotas
[ULFS_GRPQUOTA
] == NULLVP
) {
1606 error
= VFS_VGET(mp
, fs
->lfs_quotaino
[ULFS_GRPQUOTA
], &vp
);
1608 vn_close(ump
->um_quotas
[ULFS_USRQUOTA
],
1609 FREAD
|FWRITE
, l
->l_cred
);
1610 printf("%s: can't vget() group quota inode: %d\n",
1611 mp
->mnt_stat
.f_mntonname
, error
);
1614 ump
->um_quotas
[ULFS_GRPQUOTA
] = vp
;
1615 ump
->um_cred
[ULFS_GRPQUOTA
] = l
->l_cred
;
1616 mutex_enter(vp
->v_interlock
);
1617 vp
->v_vflag
|= VV_SYSTEM
;
1619 mutex_exit(vp
->v_interlock
);
1622 mp
->mnt_flag
|= MNT_QUOTA
;