dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / kernel / fs / ufs / ufs_acl.c
blobf5bd5a65b582a3cffc57162f2974291dee30b3a4
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <sys/errno.h>
29 #include <sys/kmem.h>
30 #include <sys/t_lock.h>
31 #include <sys/ksynch.h>
32 #include <sys/buf.h>
33 #include <sys/vfs.h>
34 #include <sys/vnode.h>
35 #include <sys/mode.h>
36 #include <sys/systm.h>
37 #include <vm/seg.h>
38 #include <sys/file.h>
39 #include <sys/acl.h>
40 #include <sys/fs/ufs_inode.h>
41 #include <sys/fs/ufs_acl.h>
42 #include <sys/fs/ufs_quota.h>
43 #include <sys/sysmacros.h>
44 #include <sys/debug.h>
45 #include <sys/policy.h>
47 /* Cache routines */
48 static int si_signature(si_t *);
49 static int si_cachei_get(struct inode *, si_t **);
50 static int si_cachea_get(struct inode *, si_t *, si_t **);
51 static int si_cmp(si_t *, si_t *);
52 static void si_cache_put(si_t *);
53 void si_cache_del(si_t *, int);
54 void si_cache_init(void);
56 static void ufs_si_free_mem(si_t *);
57 static int ufs_si_store(struct inode *, si_t *, int, cred_t *);
58 static si_t *ufs_acl_cp(si_t *);
59 static int ufs_sectobuf(si_t *, caddr_t *, size_t *);
60 static int acl_count(ufs_ic_acl_t *);
61 static int acl_validate(aclent_t *, int, int);
62 static int vsecattr2aclentry(vsecattr_t *, si_t **);
63 static int aclentry2vsecattr(si_t *, vsecattr_t *);
65 krwlock_t si_cache_lock; /* Protects si_cache */
66 int si_cachecnt = 64; /* # buckets in si_cache[a|i] */
67 si_t **si_cachea; /* The 'by acl' cache chains */
68 si_t **si_cachei; /* The 'by inode' cache chains */
69 long si_cachehit = 0;
70 long si_cachemiss = 0;
72 #define SI_HASH(S) ((int)(S) & (si_cachecnt - 1))
75 * Store the new acls in aclp. Attempts to make things atomic.
76 * Search the acl cache for an identical sp and, if found, attach
77 * the cache'd acl to ip. If the acl is new (not in the cache),
78 * add it to the cache, then attach it to ip. Last, remove and
79 * decrement the reference count of any prior acl list attached
80 * to the ip.
82 * Parameters:
83 * ip - Ptr to inode to receive the acl list
84 * sp - Ptr to in-core acl structure to attach to the inode.
85 * puship - 0 do not push the object inode(ip) 1 push the ip
86 * cr - Ptr to credentials
88 * Returns: 0 - Success
89 * N - From errno.h
91 static int
92 ufs_si_store(struct inode *ip, si_t *sp, int puship, cred_t *cr)
94 struct vfs *vfsp;
95 struct inode *sip;
96 si_t *oldsp;
97 si_t *csp;
98 caddr_t acldata;
99 ino_t oldshadow;
100 size_t acldatalen;
101 off_t offset;
102 int shadow;
103 int err;
104 int refcnt;
105 int usecnt;
106 int signature;
107 int resid;
108 struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
109 struct fs *fs = ufsvfsp->vfs_fs;
111 ASSERT(RW_WRITE_HELD(&ip->i_contents));
112 ASSERT(ip->i_ufs_acl != sp);
114 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
115 return (ENOSYS);
118 * if there are only the three owner/group/other then do not
119 * create a shadow inode. If there is already a shadow with
120 * the file, remove it.
123 if (!sp->ausers &&
124 !sp->agroups &&
125 !sp->downer &&
126 !sp->dgroup &&
127 !sp->dother &&
128 sp->dclass.acl_ismask == 0 &&
129 !sp->dusers &&
130 !sp->dgroups) {
131 if (ip->i_ufs_acl)
132 err = ufs_si_free(ip->i_ufs_acl, ITOV(ip)->v_vfsp, cr);
133 ip->i_ufs_acl = NULL;
134 ip->i_shadow = 0;
135 ip->i_flag |= IMOD | IACC;
136 ip->i_mode = (ip->i_smode & ~0777) |
137 ((sp->aowner->acl_ic_perm & 07) << 6) |
138 (MASK2MODE(sp)) |
139 (sp->aother->acl_ic_perm & 07);
140 TRANS_INODE(ip->i_ufsvfs, ip);
141 ufs_iupdat(ip, 1);
142 ufs_si_free_mem(sp);
143 return (0);
146 loop:
149 * Check cache. If in cache, use existing shadow inode.
150 * Increment the shadow link count, then attach to the
151 * cached ufs_acl_entry struct, and increment it's reference
152 * count. Then discard the passed-in ufs_acl_entry and
153 * return.
155 if (si_cachea_get(ip, sp, &csp) == 0) {
156 ASSERT(RW_WRITE_HELD(&csp->s_lock));
157 if (ip->i_ufs_acl == csp) {
158 rw_exit(&csp->s_lock);
159 (void) ufs_si_free_mem(sp);
160 return (0);
162 vfsp = ITOV(ip)->v_vfsp;
163 ASSERT(csp->s_shadow <= INT_MAX);
164 shadow = (int)csp->s_shadow;
166 * We can't call ufs_iget while holding the csp locked,
167 * because we might deadlock. So we drop the
168 * lock on csp, then go search the si_cache again
169 * to see if the csp is still there.
171 rw_exit(&csp->s_lock);
172 if ((err = ufs_iget(vfsp, shadow, &sip, cr)) != 0) {
173 (void) ufs_si_free_mem(sp);
174 return (EIO);
176 rw_enter(&sip->i_contents, RW_WRITER);
177 if ((sip->i_mode & IFMT) != IFSHAD || sip->i_nlink <= 0) {
178 rw_exit(&sip->i_contents);
179 VN_RELE(ITOV(sip));
180 goto loop;
182 /* Get the csp again */
183 if (si_cachea_get(ip, sp, &csp) != 0) {
184 rw_exit(&sip->i_contents);
185 VN_RELE(ITOV(sip));
186 goto loop;
188 ASSERT(RW_WRITE_HELD(&csp->s_lock));
189 /* See if we got the right shadow */
190 if (csp->s_shadow != shadow) {
191 rw_exit(&csp->s_lock);
192 rw_exit(&sip->i_contents);
193 VN_RELE(ITOV(sip));
194 goto loop;
196 ASSERT(RW_WRITE_HELD(&sip->i_contents));
197 ASSERT(sip->i_dquot == 0);
198 /* Increment link count */
199 ASSERT(sip->i_nlink > 0);
200 sip->i_nlink++;
201 TRANS_INODE(ufsvfsp, sip);
202 csp->s_use = sip->i_nlink;
203 csp->s_ref++;
204 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
205 sip->i_flag |= ICHG | IMOD;
206 sip->i_seq++;
207 ITIMES_NOLOCK(sip);
209 * Always release s_lock before both releasing i_contents
210 * and calling VN_RELE.
212 rw_exit(&csp->s_lock);
213 rw_exit(&sip->i_contents);
214 VN_RELE(ITOV(sip));
215 (void) ufs_si_free_mem(sp);
216 sp = csp;
217 si_cachehit++;
218 goto switchshadows;
221 /* Alloc a shadow inode and fill it in */
222 err = ufs_ialloc(ip, ip->i_number, (mode_t)IFSHAD, &sip, cr);
223 if (err) {
224 (void) ufs_si_free_mem(sp);
225 return (err);
227 rw_enter(&sip->i_contents, RW_WRITER);
228 sip->i_flag |= IACC | IUPD | ICHG;
229 sip->i_seq++;
230 sip->i_mode = (o_mode_t)IFSHAD;
231 ITOV(sip)->v_type = VREG;
232 ufs_reset_vnode(ITOV(sip));
233 sip->i_nlink = 1;
234 sip->i_uid = crgetuid(cr);
235 sip->i_suid = (ulong_t)sip->i_uid > (ulong_t)USHRT_MAX ?
236 UID_LONG : sip->i_uid;
237 sip->i_gid = crgetgid(cr);
238 sip->i_sgid = (ulong_t)sip->i_gid > (ulong_t)USHRT_MAX ?
239 GID_LONG : sip->i_gid;
240 sip->i_shadow = 0;
241 TRANS_INODE(ufsvfsp, sip);
242 sip->i_ufs_acl = NULL;
243 ASSERT(sip->i_size == 0);
245 sp->s_shadow = sip->i_number;
247 if ((err = ufs_sectobuf(sp, &acldata, &acldatalen)) != 0)
248 goto errout;
249 offset = 0;
252 * We don't actually care about the residual count upon failure,
253 * but giving ufs_rdwri() the pointer means it won't translate
254 * all failures to EIO. Our caller needs to know when ENOSPC
255 * gets hit.
257 resid = 0;
258 if (((err = ufs_rdwri(UIO_WRITE, FWRITE|FSYNC, sip, acldata,
259 acldatalen, (offset_t)0, UIO_SYSSPACE, &resid, cr)) != 0) ||
260 (resid != 0)) {
261 kmem_free(acldata, acldatalen);
262 if ((resid != 0) && (err == 0))
263 err = ENOSPC;
264 goto errout;
267 offset += acldatalen;
268 if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl)
269 ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize;
271 kmem_free(acldata, acldatalen);
272 /* Sync & free the shadow inode */
273 ufs_iupdat(sip, 1);
274 rw_exit(&sip->i_contents);
275 VN_RELE(ITOV(sip));
277 /* We're committed to using this sp */
278 sp->s_use = 1;
279 sp->s_ref = 1;
281 /* Now put the new acl stuff in the cache */
282 /* XXX Might make a duplicate */
283 si_cache_put(sp);
284 si_cachemiss++;
286 switchshadows:
287 /* Now switch the parent inode to use the new shadow inode */
288 ASSERT(RW_WRITE_HELD(&ip->i_contents));
289 rw_enter(&sp->s_lock, RW_READER);
290 oldsp = ip->i_ufs_acl;
291 oldshadow = ip->i_shadow;
292 ip->i_ufs_acl = sp;
293 ASSERT(sp->s_shadow <= INT_MAX);
294 ip->i_shadow = (int32_t)sp->s_shadow;
295 ASSERT(oldsp != sp);
296 ASSERT(oldshadow != ip->i_number);
297 ASSERT(ip->i_number != ip->i_shadow);
299 * Change the mode bits to follow the acl list
301 * NOTE: a directory is not required to have a "regular" acl
302 * bug id's 1238908, 1257173, 1263171 and 1263188
304 * but if a "regular" acl is present, it must contain
305 * an "owner", "group", and "other" acl
307 * If an ACL mask exists, the effective group rights are
308 * set to the mask. Otherwise, the effective group rights
309 * are set to the object group bits.
311 if (sp->aowner) { /* Owner */
312 ip->i_mode &= ~0700; /* clear Owner */
313 ip->i_mode |= (sp->aowner->acl_ic_perm & 07) << 6;
314 ip->i_uid = sp->aowner->acl_ic_who;
317 if (sp->agroup) { /* Group */
318 ip->i_mode &= ~0070; /* clear Group */
319 ip->i_mode |= MASK2MODE(sp); /* apply mask */
320 ip->i_gid = sp->agroup->acl_ic_who;
323 if (sp->aother) { /* Other */
324 ip->i_mode &= ~0007; /* clear Other */
325 ip->i_mode |= (sp->aother->acl_ic_perm & 07);
328 if (sp->aclass.acl_ismask)
329 ip->i_mode = (ip->i_mode & ~070) |
330 (((sp->aclass.acl_maskbits & 07) << 3) &
331 ip->i_mode);
333 TRANS_INODE(ufsvfsp, ip);
334 rw_exit(&sp->s_lock);
335 ip->i_flag |= ICHG;
336 ip->i_seq++;
338 * when creating a file there is no need to push the inode, it
339 * is pushed later
341 if (puship == 1)
342 ufs_iupdat(ip, 1);
345 * Decrement link count on the old shadow inode,
346 * and decrement reference count on the old aclp,
348 if (oldshadow) {
349 /* Get the shadow inode */
350 ASSERT(RW_WRITE_HELD(&ip->i_contents));
351 vfsp = ITOV(ip)->v_vfsp;
352 if ((err = ufs_iget_alloced(vfsp, oldshadow, &sip, cr)) != 0) {
353 return (EIO);
355 /* Decrement link count */
356 rw_enter(&sip->i_contents, RW_WRITER);
357 if (oldsp)
358 rw_enter(&oldsp->s_lock, RW_WRITER);
359 ASSERT(sip->i_dquot == 0);
360 ASSERT(sip->i_nlink > 0);
361 usecnt = --sip->i_nlink;
362 ufs_setreclaim(sip);
363 TRANS_INODE(ufsvfsp, sip);
364 sip->i_flag |= ICHG | IMOD;
365 sip->i_seq++;
366 ITIMES_NOLOCK(sip);
367 if (oldsp) {
368 oldsp->s_use = usecnt;
369 refcnt = --oldsp->s_ref;
370 signature = oldsp->s_signature;
372 * Always release s_lock before both releasing
373 * i_contents and calling VN_RELE.
375 rw_exit(&oldsp->s_lock);
377 rw_exit(&sip->i_contents);
378 VN_RELE(ITOV(sip));
379 if (oldsp && (refcnt == 0))
380 si_cache_del(oldsp, signature);
382 return (0);
384 errout:
385 /* Throw the newly alloc'd inode away */
386 sip->i_nlink = 0;
387 ufs_setreclaim(sip);
388 TRANS_INODE(ufsvfsp, sip);
389 ITIMES_NOLOCK(sip);
390 rw_exit(&sip->i_contents);
391 VN_RELE(ITOV(sip));
392 ASSERT(!sp->s_use && !sp->s_ref && !(sp->s_flags & SI_CACHED));
393 (void) ufs_si_free_mem(sp);
394 return (err);
398 * Load the acls for inode ip either from disk (adding to the cache),
399 * or search the cache and attach the cache'd acl list to the ip.
400 * In either case, maintain the proper reference count on the cached entry.
402 * Parameters:
403 * ip - Ptr to the inode which needs the acl list loaded
404 * cr - Ptr to credentials
406 * Returns: 0 - Success
407 * N - From errno.h
410 ufs_si_load(struct inode *ip, cred_t *cr)
412 struct vfs *vfsp;
413 struct inode *sip;
414 ufs_fsd_t *fsdp;
415 si_t *sp;
416 vsecattr_t vsecattr = { 0, 0, NULL, 0, NULL };
417 aclent_t *aclp;
418 ufs_acl_t *ufsaclp;
419 caddr_t acldata = NULL;
420 ino_t maxino;
421 int err;
422 size_t acldatalen;
423 int numacls;
424 int shadow;
425 int usecnt;
426 struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
427 struct fs *fs = ufsvfsp->vfs_fs;
429 ASSERT(ip != NULL);
430 ASSERT(RW_WRITE_HELD(&ip->i_contents));
431 ASSERT(ip->i_shadow && ip->i_ufs_acl == NULL);
432 ASSERT((ip->i_mode & IFMT) != IFSHAD);
434 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
435 return (ENOSYS);
437 if (ip->i_shadow == ip->i_number)
438 return (EIO);
440 maxino = (ino_t)(ITOF(ip)->fs_ncg * ITOF(ip)->fs_ipg);
441 if (ip->i_shadow < UFSROOTINO || ip->i_shadow > maxino)
442 return (EIO);
445 * XXX Check cache. If in cache, link to it and increment
446 * the reference count, then return.
448 if (si_cachei_get(ip, &sp) == 0) {
449 ASSERT(RW_WRITE_HELD(&sp->s_lock));
450 ip->i_ufs_acl = sp;
451 sp->s_ref++;
452 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
453 rw_exit(&sp->s_lock);
454 si_cachehit++;
455 return (0);
458 /* Get the shadow inode */
459 vfsp = ITOV(ip)->v_vfsp;
460 shadow = ip->i_shadow;
461 if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) != 0) {
462 return (err);
464 rw_enter(&sip->i_contents, RW_WRITER);
466 if ((sip->i_mode & IFMT) != IFSHAD) {
467 rw_exit(&sip->i_contents);
468 err = EINVAL;
469 goto alldone;
472 ASSERT(sip->i_dquot == 0);
473 usecnt = sip->i_nlink;
474 if ((!ULOCKFS_IS_NOIACC(&ufsvfsp->vfs_ulockfs)) &&
475 (!(sip)->i_ufsvfs->vfs_noatime)) {
476 sip->i_flag |= IACC;
478 rw_downgrade(&sip->i_contents);
480 ASSERT(sip->i_size <= MAXOFF_T);
481 /* Read the acl's and other stuff from disk */
482 acldata = kmem_zalloc((size_t)sip->i_size, KM_SLEEP);
483 acldatalen = sip->i_size;
485 err = ufs_rdwri(UIO_READ, FREAD, sip, acldata, acldatalen, (offset_t)0,
486 UIO_SYSSPACE, (int *)0, cr);
488 rw_exit(&sip->i_contents);
490 if (err)
491 goto alldone;
494 * Convert from disk format
495 * Result is a vsecattr struct which we then convert to the
496 * si struct.
498 bzero((caddr_t)&vsecattr, sizeof (vsecattr_t));
499 for (fsdp = (ufs_fsd_t *)acldata;
500 fsdp < (ufs_fsd_t *)(acldata + acldatalen);
501 fsdp = (ufs_fsd_t *)((caddr_t)fsdp +
502 FSD_RECSZ(fsdp, fsdp->fsd_size))) {
503 if (fsdp->fsd_size <= 0)
504 break;
505 switch (fsdp->fsd_type) {
506 case FSD_ACL:
507 numacls = vsecattr.vsa_aclcnt =
508 (int)((fsdp->fsd_size - 2 * sizeof (int)) /
509 sizeof (ufs_acl_t));
510 aclp = vsecattr.vsa_aclentp =
511 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
512 for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data;
513 numacls; ufsaclp++) {
514 aclp->a_type = ufsaclp->acl_tag;
515 aclp->a_id = ufsaclp->acl_who;
516 aclp->a_perm = ufsaclp->acl_perm;
517 aclp++;
518 numacls--;
520 break;
521 case FSD_DFACL:
522 numacls = vsecattr.vsa_dfaclcnt =
523 (int)((fsdp->fsd_size - 2 * sizeof (int)) /
524 sizeof (ufs_acl_t));
525 aclp = vsecattr.vsa_dfaclentp =
526 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
527 for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data;
528 numacls; ufsaclp++) {
529 aclp->a_type = ufsaclp->acl_tag;
530 aclp->a_id = ufsaclp->acl_who;
531 aclp->a_perm = ufsaclp->acl_perm;
532 aclp++;
533 numacls--;
535 break;
538 /* Sort the lists */
539 if (vsecattr.vsa_aclentp) {
540 ksort((caddr_t)vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt,
541 sizeof (aclent_t), cmp2acls);
542 if ((err = acl_validate(vsecattr.vsa_aclentp,
543 vsecattr.vsa_aclcnt, ACL_CHECK)) != 0) {
544 goto alldone;
547 if (vsecattr.vsa_dfaclentp) {
548 ksort((caddr_t)vsecattr.vsa_dfaclentp, vsecattr.vsa_dfaclcnt,
549 sizeof (aclent_t), cmp2acls);
550 if ((err = acl_validate(vsecattr.vsa_dfaclentp,
551 vsecattr.vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) {
552 goto alldone;
556 /* ignore shadow inodes without ACLs */
557 if (!vsecattr.vsa_aclentp && !vsecattr.vsa_dfaclentp) {
558 err = 0;
559 goto alldone;
562 /* Convert from vsecattr struct to ufs_acl_entry struct */
563 if ((err = vsecattr2aclentry(&vsecattr, &sp)) != 0) {
564 goto alldone;
567 /* There aren't filled in by vsecattr2aclentry */
568 sp->s_shadow = ip->i_shadow;
569 sp->s_dev = ip->i_dev;
570 sp->s_use = usecnt;
571 sp->s_ref = 1;
572 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
574 /* XXX Might make a duplicate */
575 si_cache_put(sp);
577 /* Signal anyone waiting on this shadow to be loaded */
578 ip->i_ufs_acl = sp;
579 err = 0;
580 si_cachemiss++;
581 if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl)
582 ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize;
583 alldone:
585 * Common exit point. Mark shadow inode as ISTALE
586 * if we detect an internal inconsistency, to
587 * prevent stray inodes appearing in the cache.
589 if (err) {
590 rw_enter(&sip->i_contents, RW_READER);
591 mutex_enter(&sip->i_tlock);
592 sip->i_flag |= ISTALE;
593 mutex_exit(&sip->i_tlock);
594 rw_exit(&sip->i_contents);
596 VN_RELE(ITOV(sip));
599 * Cleanup of data structures allocated
600 * on the fly.
602 if (acldata)
603 kmem_free(acldata, acldatalen);
605 if (vsecattr.vsa_aclentp)
606 kmem_free(vsecattr.vsa_aclentp,
607 vsecattr.vsa_aclcnt * sizeof (aclent_t));
608 if (vsecattr.vsa_dfaclentp)
609 kmem_free(vsecattr.vsa_dfaclentp,
610 vsecattr.vsa_dfaclcnt * sizeof (aclent_t));
611 return (err);
615 * Check the inode's ACL's to see if this mode of access is
616 * allowed; return 0 if allowed, EACCES if not.
618 * We follow the procedure defined in Sec. 3.3.5, ACL Access
619 * Check Algorithm, of the POSIX 1003.6 Draft Standard.
621 * ip parent inode
622 * mode mode of access read, write, execute/examine
623 * cr credentials
626 ufs_acl_access(struct inode *ip, int mode, cred_t *cr)
628 ufs_ic_acl_t *acl;
629 int ismask, mask = 0;
630 int gperm = 0;
631 int ngroup = 0;
632 si_t *sp = NULL;
633 uid_t uid = crgetuid(cr);
634 uid_t owner;
636 ASSERT(ip->i_ufs_acl != NULL);
637 ASSERT(RW_LOCK_HELD(&ip->i_contents));
639 sp = ip->i_ufs_acl;
641 ismask = sp->aclass.acl_ismask ?
642 sp->aclass.acl_ismask : 0;
644 if (ismask)
645 mask = sp->aclass.acl_maskbits;
646 else
647 mask = -1;
650 * (1) If user owns the file, obey user mode bits
652 owner = sp->aowner->acl_ic_who;
653 if (uid == owner) {
654 return (MODE_CHECK(owner, mode, (sp->aowner->acl_ic_perm << 6),
655 cr, ip));
659 * (2) Obey any matching ACL_USER entry
661 if (sp->ausers)
662 for (acl = sp->ausers; acl != NULL; acl = acl->acl_ic_next) {
663 if (acl->acl_ic_who == uid) {
664 return (MODE_CHECK(owner, mode,
665 (mask & acl->acl_ic_perm) << 6, cr, ip));
670 * (3) If user belongs to file's group, obey group mode bits
671 * if no ACL mask is defined; if there is an ACL mask, we look
672 * at both the group mode bits and any ACL_GROUP entries.
674 if (groupmember((uid_t)sp->agroup->acl_ic_who, cr)) {
675 ngroup++;
676 gperm = (sp->agroup->acl_ic_perm);
677 if (!ismask)
678 return (MODE_CHECK(owner, mode, (gperm << 6), cr, ip));
682 * (4) Accumulate the permissions in matching ACL_GROUP entries
684 if (sp->agroups) {
685 for (acl = sp->agroups; acl != NULL; acl = acl->acl_ic_next) {
686 if (groupmember(acl->acl_ic_who, cr)) {
687 ngroup++;
688 gperm |= acl->acl_ic_perm;
693 if (ngroup != 0)
694 return (MODE_CHECK(owner, mode, ((gperm & mask) << 6), cr, ip));
697 * (5) Finally, use the "other" mode bits
699 return (MODE_CHECK(owner, mode, sp->aother->acl_ic_perm << 6, cr, ip));
702 /*ARGSUSED2*/
704 ufs_acl_get(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr)
706 aclent_t *aclentp;
708 ASSERT(RW_LOCK_HELD(&ip->i_contents));
710 /* XXX Range check, sanity check, shadow check */
711 /* If an ACL is present, get the data from the shadow inode info */
712 if (ip->i_ufs_acl)
713 return (aclentry2vsecattr(ip->i_ufs_acl, vsap));
716 * If no ACLs are present, fabricate one from the mode bits.
717 * This code is almost identical to fs_fab_acl(), but we
718 * already have the mode bits handy, so we'll avoid going
719 * through fop_getattr() again.
722 vsap->vsa_aclcnt = 0;
723 vsap->vsa_aclentp = NULL;
724 vsap->vsa_dfaclcnt = 0; /* Default ACLs are not fabricated */
725 vsap->vsa_dfaclentp = NULL;
727 if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL))
728 vsap->vsa_aclcnt = 4; /* USER, GROUP, OTHER, and CLASS */
730 if (vsap->vsa_mask & VSA_ACL) {
731 vsap->vsa_aclentp = kmem_zalloc(4 * sizeof (aclent_t),
732 KM_SLEEP);
733 if (vsap->vsa_aclentp == NULL)
734 return (ENOMEM);
735 aclentp = vsap->vsa_aclentp;
737 /* Owner */
738 aclentp->a_type = USER_OBJ;
739 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0700)) >> 6;
740 aclentp->a_id = ip->i_uid; /* Really undefined */
741 aclentp++;
743 /* Group */
744 aclentp->a_type = GROUP_OBJ;
745 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3;
746 aclentp->a_id = ip->i_gid; /* Really undefined */
747 aclentp++;
749 /* Other */
750 aclentp->a_type = OTHER_OBJ;
751 aclentp->a_perm = ip->i_mode & 0007;
752 aclentp->a_id = 0; /* Really undefined */
753 aclentp++;
755 /* Class */
756 aclentp->a_type = CLASS_OBJ;
757 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3;
758 aclentp->a_id = 0; /* Really undefined */
759 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
760 sizeof (aclent_t), cmp2acls);
763 return (0);
766 /*ARGSUSED2*/
768 ufs_acl_set(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr)
770 si_t *sp;
771 int err;
773 ASSERT(RW_WRITE_HELD(&ip->i_contents));
775 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
776 return (ENOSYS);
779 * only the owner of the file or privileged users can change the ACLs
781 if (secpolicy_vnode_setdac(cr, ip->i_uid) != 0)
782 return (EPERM);
784 /* Convert from vsecattr struct to ufs_acl_entry struct */
785 if ((err = vsecattr2aclentry(vsap, &sp)) != 0)
786 return (err);
787 sp->s_dev = ip->i_dev;
790 * Make the user & group objs in the acl list follow what's
791 * in the inode.
793 #ifdef DEBUG
794 if (vsap->vsa_mask == VSA_ACL) {
795 ASSERT(sp->aowner);
796 ASSERT(sp->agroup);
797 ASSERT(sp->aother);
799 #endif /* DEBUG */
801 if (sp->aowner)
802 sp->aowner->acl_ic_who = ip->i_uid;
803 if (sp->agroup)
804 sp->agroup->acl_ic_who = ip->i_gid;
807 * Write and cache the new acl list
809 err = ufs_si_store(ip, sp, 1, cr);
811 return (err);
815 * XXX Scan sorted array of acl's, checking for:
816 * 1) Any duplicate/conflicting entries (same type and id)
817 * 2) More than 1 of USER_OBJ, GROUP_OBJ, OTHER_OBJ, CLASS_OBJ
818 * 3) More than 1 of DEF_USER_OBJ, DEF_GROUP_OBJ, DEF_OTHER_OBJ, DEF_CLASS_OBJ
820 * Parameters:
821 * aclentp - ptr to sorted list of acl entries.
822 * nentries - # acl entries on the list
823 * flag - Bitmap (ACL_CHECK and/or DEF_ACL_CHECK) indicating whether the
824 * list contains regular acls, default acls, or both.
826 * Returns: 0 - Success
827 * EINVAL - Invalid list (dups or multiple entries of type USER_OBJ, etc)
829 static int
830 acl_validate(aclent_t *aclentp, int nentries, int flag)
832 int i;
833 int nuser_objs = 0;
834 int ngroup_objs = 0;
835 int nother_objs = 0;
836 int nclass_objs = 0;
837 int ndef_user_objs = 0;
838 int ndef_group_objs = 0;
839 int ndef_other_objs = 0;
840 int ndef_class_objs = 0;
841 int nusers = 0;
842 int ngroups = 0;
843 int ndef_users = 0;
844 int ndef_groups = 0;
845 int numdefs = 0;
847 /* Null list or list of one */
848 if (aclentp == NULL)
849 return (0);
851 if (nentries <= 0)
852 return (EINVAL);
854 for (i = 1; i < nentries; i++) {
855 if (((aclentp[i - 1].a_type == aclentp[i].a_type) &&
856 (aclentp[i - 1].a_id == aclentp[i].a_id)) ||
857 (aclentp[i - 1].a_perm > 07)) {
858 return (EINVAL);
862 if (flag == 0 || (flag != ACL_CHECK && flag != DEF_ACL_CHECK))
863 return (EINVAL);
865 /* Count types */
866 for (i = 0; i < nentries; i++) {
867 switch (aclentp[i].a_type) {
868 case USER_OBJ: /* Owner */
869 nuser_objs++;
870 break;
871 case GROUP_OBJ: /* Group */
872 ngroup_objs++;
873 break;
874 case OTHER_OBJ: /* Other */
875 nother_objs++;
876 break;
877 case CLASS_OBJ: /* Mask */
878 nclass_objs++;
879 break;
880 case DEF_USER_OBJ: /* Default Owner */
881 ndef_user_objs++;
882 break;
883 case DEF_GROUP_OBJ: /* Default Group */
884 ndef_group_objs++;
885 break;
886 case DEF_OTHER_OBJ: /* Default Other */
887 ndef_other_objs++;
888 break;
889 case DEF_CLASS_OBJ: /* Default Mask */
890 ndef_class_objs++;
891 break;
892 case USER: /* Users */
893 nusers++;
894 break;
895 case GROUP: /* Groups */
896 ngroups++;
897 break;
898 case DEF_USER: /* Default Users */
899 ndef_users++;
900 break;
901 case DEF_GROUP: /* Default Groups */
902 ndef_groups++;
903 break;
904 default: /* Unknown type */
905 return (EINVAL);
910 * For normal acl's, we require there be one (and only one)
911 * USER_OBJ, GROUP_OBJ and OTHER_OBJ. There is either zero
912 * or one CLASS_OBJ.
914 if (flag & ACL_CHECK) {
915 if (nuser_objs != 1 || ngroup_objs != 1 ||
916 nother_objs != 1 || nclass_objs > 1) {
917 return (EINVAL);
920 * If there are ANY group acls, there MUST be a
921 * class_obj(mask) acl (1003.6/D12 p. 29 lines 75-80).
923 if (ngroups && !nclass_objs) {
924 return (EINVAL);
926 if (nuser_objs + ngroup_objs + nother_objs + nclass_objs +
927 ngroups + nusers > MAX_ACL_ENTRIES)
928 return (EINVAL);
932 * For default acl's, we require that there be either one (and only one)
933 * DEF_USER_OBJ, DEF_GROUP_OBJ and DEF_OTHER_OBJ
934 * or there be none of them.
936 if (flag & DEF_ACL_CHECK) {
937 if (ndef_other_objs > 1 || ndef_user_objs > 1 ||
938 ndef_group_objs > 1 || ndef_class_objs > 1) {
939 return (EINVAL);
942 numdefs = ndef_other_objs + ndef_user_objs + ndef_group_objs;
944 if (numdefs != 0 && numdefs != 3) {
945 return (EINVAL);
948 * If there are ANY def_group acls, there MUST be a
949 * def_class_obj(mask) acl (1003.6/D12 P. 29 lines 75-80).
950 * XXX(jimh) This is inferred.
952 if (ndef_groups && !ndef_class_objs) {
953 return (EINVAL);
955 if ((ndef_users || ndef_groups) &&
956 ((numdefs != 3) && !ndef_class_objs)) {
957 return (EINVAL);
959 if (ndef_user_objs + ndef_group_objs + ndef_other_objs +
960 ndef_class_objs + ndef_users + ndef_groups >
961 MAX_ACL_ENTRIES)
962 return (EINVAL);
964 return (0);
967 static int
968 formacl(ufs_ic_acl_t **aclpp, aclent_t *aclentp)
970 ufs_ic_acl_t *uaclp;
972 uaclp = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP);
973 uaclp->acl_ic_perm = aclentp->a_perm;
974 uaclp->acl_ic_who = aclentp->a_id;
975 uaclp->acl_ic_next = *aclpp;
976 *aclpp = uaclp;
977 return (0);
981 * XXX - Make more efficient
982 * Convert from the vsecattr struct, used by the VOP interface, to
983 * the ufs_acl_entry struct used for in-core storage of acl's.
985 * Parameters:
986 * vsap - Ptr to array of security attributes.
987 * spp - Ptr to ptr to si struct for the results
989 * Returns: 0 - Success
990 * N - From errno.h
992 static int
993 vsecattr2aclentry(vsecattr_t *vsap, si_t **spp)
995 aclent_t *aclentp, *aclp;
996 si_t *sp;
997 int err;
998 int i;
1000 /* Sort & validate the lists on the vsap */
1001 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
1002 sizeof (aclent_t), cmp2acls);
1003 ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt,
1004 sizeof (aclent_t), cmp2acls);
1005 if ((err = acl_validate(vsap->vsa_aclentp,
1006 vsap->vsa_aclcnt, ACL_CHECK)) != 0)
1007 return (err);
1008 if ((err = acl_validate(vsap->vsa_dfaclentp,
1009 vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0)
1010 return (err);
1012 /* Create new si struct and hang acl's off it */
1013 sp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1014 rw_init(&sp->s_lock, NULL, RW_DEFAULT, NULL);
1016 /* Process acl list */
1017 aclp = (aclent_t *)vsap->vsa_aclentp;
1018 aclentp = aclp + vsap->vsa_aclcnt - 1;
1019 for (i = 0; i < vsap->vsa_aclcnt; i++) {
1020 switch (aclentp->a_type) {
1021 case USER_OBJ: /* Owner */
1022 if (err = formacl(&sp->aowner, aclentp))
1023 goto error;
1024 break;
1025 case GROUP_OBJ: /* Group */
1026 if (err = formacl(&sp->agroup, aclentp))
1027 goto error;
1028 break;
1029 case OTHER_OBJ: /* Other */
1030 if (err = formacl(&sp->aother, aclentp))
1031 goto error;
1032 break;
1033 case USER:
1034 if (err = formacl(&sp->ausers, aclentp))
1035 goto error;
1036 break;
1037 case CLASS_OBJ: /* Mask */
1038 sp->aclass.acl_ismask = 1;
1039 sp->aclass.acl_maskbits = aclentp->a_perm;
1040 break;
1041 case GROUP:
1042 if (err = formacl(&sp->agroups, aclentp))
1043 goto error;
1044 break;
1045 default:
1046 break;
1048 aclentp--;
1051 /* Process default acl list */
1052 aclp = (aclent_t *)vsap->vsa_dfaclentp;
1053 aclentp = aclp + vsap->vsa_dfaclcnt - 1;
1054 for (i = 0; i < vsap->vsa_dfaclcnt; i++) {
1055 switch (aclentp->a_type) {
1056 case DEF_USER_OBJ: /* Default Owner */
1057 if (err = formacl(&sp->downer, aclentp))
1058 goto error;
1059 break;
1060 case DEF_GROUP_OBJ: /* Default Group */
1061 if (err = formacl(&sp->dgroup, aclentp))
1062 goto error;
1063 break;
1064 case DEF_OTHER_OBJ: /* Default Other */
1065 if (err = formacl(&sp->dother, aclentp))
1066 goto error;
1067 break;
1068 case DEF_USER:
1069 if (err = formacl(&sp->dusers, aclentp))
1070 goto error;
1071 break;
1072 case DEF_CLASS_OBJ: /* Default Mask */
1073 sp->dclass.acl_ismask = 1;
1074 sp->dclass.acl_maskbits = aclentp->a_perm;
1075 break;
1076 case DEF_GROUP:
1077 if (err = formacl(&sp->dgroups, aclentp))
1078 goto error;
1079 break;
1080 default:
1081 break;
1083 aclentp--;
1085 *spp = sp;
1086 return (0);
1088 error:
1089 ufs_si_free_mem(sp);
1090 return (err);
1093 void
1094 formvsec(int obj_type, ufs_ic_acl_t *aclp, aclent_t **aclentpp)
1096 for (; aclp; aclp = aclp->acl_ic_next) {
1097 (*aclentpp)->a_type = obj_type;
1098 (*aclentpp)->a_perm = aclp->acl_ic_perm;
1099 (*aclentpp)->a_id = aclp->acl_ic_who;
1100 (*aclentpp)++;
1105 * XXX - Make more efficient
1106 * Convert from the ufs_acl_entry struct used for in-core storage of acl's
1107 * to the vsecattr struct, used by the VOP interface.
1109 * Parameters:
1110 * sp - Ptr to si struct with the acls
1111 * vsap - Ptr to a vsecattr struct which will take the results.
1113 * Returns: 0 - Success
1114 * N - From errno table
1116 static int
1117 aclentry2vsecattr(si_t *sp, vsecattr_t *vsap)
1119 aclent_t *aclentp;
1120 int numacls = 0;
1121 int err;
1123 vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL;
1125 numacls = acl_count(sp->aowner) +
1126 acl_count(sp->agroup) +
1127 acl_count(sp->aother) +
1128 acl_count(sp->ausers) +
1129 acl_count(sp->agroups);
1130 if (sp->aclass.acl_ismask)
1131 numacls++;
1133 if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL))
1134 vsap->vsa_aclcnt = numacls;
1136 if (numacls == 0)
1137 goto do_defaults;
1139 if (vsap->vsa_mask & VSA_ACL) {
1140 vsap->vsa_aclentp = kmem_zalloc(numacls * sizeof (aclent_t),
1141 KM_SLEEP);
1142 aclentp = vsap->vsa_aclentp;
1144 formvsec(USER_OBJ, sp->aowner, &aclentp);
1145 formvsec(USER, sp->ausers, &aclentp);
1146 formvsec(GROUP_OBJ, sp->agroup, &aclentp);
1147 formvsec(GROUP, sp->agroups, &aclentp);
1148 formvsec(OTHER_OBJ, sp->aother, &aclentp);
1150 if (sp->aclass.acl_ismask) {
1151 aclentp->a_type = CLASS_OBJ; /* Mask */
1152 aclentp->a_perm = sp->aclass.acl_maskbits;
1153 aclentp->a_id = 0;
1154 aclentp++;
1157 /* Sort the acl list */
1158 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
1159 sizeof (aclent_t), cmp2acls);
1160 /* Check the acl list */
1161 if ((err = acl_validate(vsap->vsa_aclentp,
1162 vsap->vsa_aclcnt, ACL_CHECK)) != 0) {
1163 kmem_free(vsap->vsa_aclentp,
1164 numacls * sizeof (aclent_t));
1165 vsap->vsa_aclentp = NULL;
1166 return (err);
1170 do_defaults:
1171 /* Process Defaults */
1173 numacls = acl_count(sp->downer) +
1174 acl_count(sp->dgroup) +
1175 acl_count(sp->dother) +
1176 acl_count(sp->dusers) +
1177 acl_count(sp->dgroups);
1178 if (sp->dclass.acl_ismask)
1179 numacls++;
1181 if (vsap->vsa_mask & (VSA_DFACLCNT | VSA_DFACL))
1182 vsap->vsa_dfaclcnt = numacls;
1184 if (numacls == 0)
1185 goto do_others;
1187 if (vsap->vsa_mask & VSA_DFACL) {
1188 vsap->vsa_dfaclentp =
1189 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
1190 aclentp = vsap->vsa_dfaclentp;
1191 formvsec(DEF_USER_OBJ, sp->downer, &aclentp);
1192 formvsec(DEF_USER, sp->dusers, &aclentp);
1193 formvsec(DEF_GROUP_OBJ, sp->dgroup, &aclentp);
1194 formvsec(DEF_GROUP, sp->dgroups, &aclentp);
1195 formvsec(DEF_OTHER_OBJ, sp->dother, &aclentp);
1197 if (sp->dclass.acl_ismask) {
1198 aclentp->a_type = DEF_CLASS_OBJ; /* Mask */
1199 aclentp->a_perm = sp->dclass.acl_maskbits;
1200 aclentp->a_id = 0;
1201 aclentp++;
1204 /* Sort the default acl list */
1205 ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt,
1206 sizeof (aclent_t), cmp2acls);
1207 if ((err = acl_validate(vsap->vsa_dfaclentp,
1208 vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) {
1209 if (vsap->vsa_aclentp != NULL)
1210 kmem_free(vsap->vsa_aclentp,
1211 vsap->vsa_aclcnt * sizeof (aclent_t));
1212 kmem_free(vsap->vsa_dfaclentp,
1213 vsap->vsa_dfaclcnt * sizeof (aclent_t));
1214 vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL;
1215 return (err);
1219 do_others:
1220 return (0);
1223 static void
1224 acl_free(ufs_ic_acl_t *aclp)
1226 while (aclp != NULL) {
1227 ufs_ic_acl_t *nextaclp = aclp->acl_ic_next;
1228 kmem_free(aclp, sizeof (ufs_ic_acl_t));
1229 aclp = nextaclp;
1234 * ufs_si_free_mem will discard the sp, and the acl hanging off of the
1235 * sp. It is required that the sp not be locked, and not be in the
1236 * cache.
1238 * input: pointer to sp to discard.
1240 * return - nothing.
1243 static void
1244 ufs_si_free_mem(si_t *sp)
1246 ASSERT(!(sp->s_flags & SI_CACHED));
1247 ASSERT(!RW_LOCK_HELD(&sp->s_lock));
1249 * remove from the cache
1250 * free the acl entries
1252 acl_free(sp->aowner);
1253 acl_free(sp->agroup);
1254 acl_free(sp->aother);
1255 acl_free(sp->ausers);
1256 acl_free(sp->agroups);
1258 acl_free(sp->downer);
1259 acl_free(sp->dgroup);
1260 acl_free(sp->dother);
1261 acl_free(sp->dusers);
1262 acl_free(sp->dgroups);
1264 rw_destroy(&sp->s_lock);
1265 kmem_free(sp, sizeof (si_t));
1268 void
1269 acl_cpy(ufs_ic_acl_t *saclp, ufs_ic_acl_t *daclp)
1271 ufs_ic_acl_t *aclp, *prev_aclp = NULL, *aclp1;
1273 if (saclp == NULL) {
1274 daclp = NULL;
1275 return;
1277 prev_aclp = daclp;
1279 for (aclp = saclp; aclp != NULL; aclp = aclp->acl_ic_next) {
1280 aclp1 = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP);
1281 aclp1->acl_ic_next = NULL;
1282 aclp1->acl_ic_who = aclp->acl_ic_who;
1283 aclp1->acl_ic_perm = aclp->acl_ic_perm;
1284 prev_aclp->acl_ic_next = aclp1;
1285 prev_aclp = (ufs_ic_acl_t *)&aclp1->acl_ic_next;
1290 * ufs_si_inherit takes a parent acl structure (saclp) and the inode
1291 * of the object that is inheriting an acl and returns the inode
1292 * with the acl linked to it. It also writes the acl to disk if
1293 * it is a unique inode.
1295 * ip - pointer to inode of object inheriting the acl (contents lock)
1296 * tdp - parent inode (rw_lock and contents lock)
1297 * mode - creation modes
1298 * cr - credentials pointer
1301 ufs_si_inherit(struct inode *ip, struct inode *tdp, o_mode_t mode, cred_t *cr)
1303 si_t *tsp, *sp = tdp->i_ufs_acl;
1304 int error;
1305 o_mode_t old_modes, old_uid, old_gid;
1306 int mask;
1308 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1309 ASSERT(RW_WRITE_HELD(&tdp->i_rwlock));
1310 ASSERT(RW_WRITE_HELD(&tdp->i_contents));
1313 * if links/symbolic links, or other invalid acl objects are copied
1314 * or moved to a directory with a default acl do not allow inheritance
1315 * just return.
1317 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
1318 return (0);
1320 /* lock the parent security information */
1321 rw_enter(&sp->s_lock, RW_READER);
1323 ASSERT(((tdp->i_mode & IFMT) == IFDIR) ||
1324 ((tdp->i_mode & IFMT) == IFATTRDIR));
1326 mask = ((sp->downer != NULL) ? 1 : 0) |
1327 ((sp->dgroup != NULL) ? 2 : 0) |
1328 ((sp->dother != NULL) ? 4 : 0);
1330 if (mask == 0) {
1331 rw_exit(&sp->s_lock);
1332 return (0);
1335 if (mask != 7) {
1336 rw_exit(&sp->s_lock);
1337 return (EINVAL);
1340 tsp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1341 rw_init(&tsp->s_lock, NULL, RW_DEFAULT, NULL);
1343 /* copy the default acls */
1345 ASSERT(RW_READ_HELD(&sp->s_lock));
1346 acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->aowner);
1347 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->agroup);
1348 acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->aother);
1349 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->ausers);
1350 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->agroups);
1351 tsp->aclass.acl_ismask = sp->dclass.acl_ismask;
1352 tsp->aclass.acl_maskbits = sp->dclass.acl_maskbits;
1355 * set the owner, group, and other values from the master
1356 * inode.
1359 MODE2ACL(tsp->aowner, (mode >> 6), ip->i_uid);
1360 MODE2ACL(tsp->agroup, (mode >> 3), ip->i_gid);
1361 MODE2ACL(tsp->aother, (mode), 0);
1363 if (tsp->aclass.acl_ismask) {
1364 tsp->aclass.acl_maskbits &= mode >> 3;
1368 /* copy default acl if necessary */
1370 if (((ip->i_mode & IFMT) == IFDIR) ||
1371 ((ip->i_mode & IFMT) == IFATTRDIR)) {
1372 acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->downer);
1373 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->dgroup);
1374 acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->dother);
1375 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->dusers);
1376 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->dgroups);
1377 tsp->dclass.acl_ismask = sp->dclass.acl_ismask;
1378 tsp->dclass.acl_maskbits = sp->dclass.acl_maskbits;
1381 * save the new 9 mode bits in the inode (ip->ic_smode) for
1382 * ufs_getattr. Be sure the mode can be recovered if the store
1383 * fails.
1385 old_modes = ip->i_mode;
1386 old_uid = ip->i_uid;
1387 old_gid = ip->i_gid;
1389 * store the acl, and get back a new security anchor if
1390 * it is a duplicate.
1392 rw_exit(&sp->s_lock);
1393 rw_enter(&ip->i_rwlock, RW_WRITER);
1396 * Suppress out of inodes messages if instructed in the
1397 * tdp inode.
1399 ip->i_flag |= tdp->i_flag & IQUIET;
1401 if ((error = ufs_si_store(ip, tsp, 0, cr)) != 0) {
1402 ip->i_mode = old_modes;
1403 ip->i_uid = old_uid;
1404 ip->i_gid = old_gid;
1406 ip->i_flag &= ~IQUIET;
1407 rw_exit(&ip->i_rwlock);
1408 return (error);
1411 si_t *
1412 ufs_acl_cp(si_t *sp)
1415 si_t *dsp;
1417 ASSERT(RW_READ_HELD(&sp->s_lock));
1418 ASSERT(sp->s_ref && sp->s_use);
1420 dsp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1421 rw_init(&dsp->s_lock, NULL, RW_DEFAULT, NULL);
1423 acl_cpy(sp->aowner, (ufs_ic_acl_t *)&dsp->aowner);
1424 acl_cpy(sp->agroup, (ufs_ic_acl_t *)&dsp->agroup);
1425 acl_cpy(sp->aother, (ufs_ic_acl_t *)&dsp->aother);
1426 acl_cpy(sp->ausers, (ufs_ic_acl_t *)&dsp->ausers);
1427 acl_cpy(sp->agroups, (ufs_ic_acl_t *)&dsp->agroups);
1429 dsp->aclass.acl_ismask = sp->aclass.acl_ismask;
1430 dsp->aclass.acl_maskbits = sp->aclass.acl_maskbits;
1432 acl_cpy(sp->downer, (ufs_ic_acl_t *)&dsp->downer);
1433 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&dsp->dgroup);
1434 acl_cpy(sp->dother, (ufs_ic_acl_t *)&dsp->dother);
1435 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&dsp->dusers);
1436 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&dsp->dgroups);
1438 dsp->dclass.acl_ismask = sp->dclass.acl_ismask;
1439 dsp->dclass.acl_maskbits = sp->dclass.acl_maskbits;
1441 return (dsp);
1446 ufs_acl_setattr(struct inode *ip, struct vattr *vap, cred_t *cr)
1449 si_t *sp;
1450 int mask = vap->va_mask;
1451 int error = 0;
1453 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1455 if (!(mask & (AT_MODE|AT_UID|AT_GID)))
1456 return (0);
1459 * if no regular acl's, nothing to do, so let's get out
1461 if (!(ip->i_ufs_acl) || !(ip->i_ufs_acl->aowner))
1462 return (0);
1464 rw_enter(&ip->i_ufs_acl->s_lock, RW_READER);
1465 sp = ufs_acl_cp(ip->i_ufs_acl);
1466 ASSERT(sp != ip->i_ufs_acl);
1469 * set the mask to the group permissions if a mask entry
1470 * exists. Otherwise, set the group obj bits to the group
1471 * permissions. Since non-trivial ACLs always have a mask,
1472 * and the mask is the final arbiter of group permissions,
1473 * setting the mask has the effect of changing the effective
1474 * group permissions, even if the group_obj permissions in
1475 * the ACL aren't changed. Posix P1003.1e states that when
1476 * an ACL mask exists, chmod(2) must set the acl mask (NOT the
1477 * group_obj permissions) to the requested group permissions.
1479 if (mask & AT_MODE) {
1480 sp->aowner->acl_ic_perm = (o_mode_t)(ip->i_mode & 0700) >> 6;
1481 if (sp->aclass.acl_ismask)
1482 sp->aclass.acl_maskbits =
1483 (o_mode_t)(ip->i_mode & 070) >> 3;
1484 else
1485 sp->agroup->acl_ic_perm =
1486 (o_mode_t)(ip->i_mode & 070) >> 3;
1487 sp->aother->acl_ic_perm = (o_mode_t)(ip->i_mode & 07);
1490 if (mask & AT_UID) {
1491 /* Caller has verified our privileges */
1492 sp->aowner->acl_ic_who = ip->i_uid;
1495 if (mask & AT_GID) {
1496 sp->agroup->acl_ic_who = ip->i_gid;
1499 rw_exit(&ip->i_ufs_acl->s_lock);
1500 error = ufs_si_store(ip, sp, 0, cr);
1501 return (error);
1504 static int
1505 acl_count(ufs_ic_acl_t *p)
1507 ufs_ic_acl_t *acl;
1508 int count;
1510 for (count = 0, acl = p; acl; acl = acl->acl_ic_next, count++)
1512 return (count);
1516 * Takes as input a security structure and generates a buffer
1517 * with fsd's in a form which be written to the shadow inode.
1519 static int
1520 ufs_sectobuf(si_t *sp, caddr_t *buf, size_t *len)
1522 size_t acl_size;
1523 size_t def_acl_size;
1524 caddr_t buffer;
1525 struct ufs_fsd *fsdp;
1526 ufs_acl_t *bufaclp;
1529 * Calc size of buffer to hold all the acls
1531 acl_size = acl_count(sp->aowner) + /* owner */
1532 acl_count(sp->agroup) + /* owner group */
1533 acl_count(sp->aother) + /* owner other */
1534 acl_count(sp->ausers) + /* acl list */
1535 acl_count(sp->agroups); /* group alcs */
1536 if (sp->aclass.acl_ismask)
1537 acl_size++;
1539 /* Convert to bytes */
1540 acl_size *= sizeof (ufs_acl_t);
1542 /* Add fsd header */
1543 if (acl_size)
1544 acl_size += 2 * sizeof (int);
1547 * Calc size of buffer to hold all the default acls
1549 def_acl_size =
1550 acl_count(sp->downer) + /* def owner */
1551 acl_count(sp->dgroup) + /* def owner group */
1552 acl_count(sp->dother) + /* def owner other */
1553 acl_count(sp->dusers) + /* def users */
1554 acl_count(sp->dgroups); /* def group acls */
1555 if (sp->dclass.acl_ismask)
1556 def_acl_size++;
1559 * Convert to bytes
1561 def_acl_size *= sizeof (ufs_acl_t);
1564 * Add fsd header
1566 if (def_acl_size)
1567 def_acl_size += 2 * sizeof (int);
1569 if (acl_size + def_acl_size == 0)
1570 return (0);
1572 buffer = kmem_zalloc((acl_size + def_acl_size), KM_SLEEP);
1573 bufaclp = (ufs_acl_t *)buffer;
1575 if (acl_size == 0)
1576 goto wrtdefs;
1578 /* create fsd and copy acls */
1579 fsdp = (struct ufs_fsd *)bufaclp;
1580 fsdp->fsd_type = FSD_ACL;
1581 bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0];
1583 ACL_MOVE(sp->aowner, USER_OBJ, bufaclp);
1584 ACL_MOVE(sp->agroup, GROUP_OBJ, bufaclp);
1585 ACL_MOVE(sp->aother, OTHER_OBJ, bufaclp);
1586 ACL_MOVE(sp->ausers, USER, bufaclp);
1587 ACL_MOVE(sp->agroups, GROUP, bufaclp);
1589 if (sp->aclass.acl_ismask) {
1590 bufaclp->acl_tag = CLASS_OBJ;
1591 bufaclp->acl_who = (uid_t)sp->aclass.acl_ismask;
1592 bufaclp->acl_perm = (o_mode_t)sp->aclass.acl_maskbits;
1593 bufaclp++;
1595 ASSERT(acl_size <= INT_MAX);
1596 fsdp->fsd_size = (int)acl_size;
1598 wrtdefs:
1599 if (def_acl_size == 0)
1600 goto alldone;
1602 /* if defaults exist then create fsd and copy default acls */
1603 fsdp = (struct ufs_fsd *)bufaclp;
1604 fsdp->fsd_type = FSD_DFACL;
1605 bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0];
1607 ACL_MOVE(sp->downer, DEF_USER_OBJ, bufaclp);
1608 ACL_MOVE(sp->dgroup, DEF_GROUP_OBJ, bufaclp);
1609 ACL_MOVE(sp->dother, DEF_OTHER_OBJ, bufaclp);
1610 ACL_MOVE(sp->dusers, DEF_USER, bufaclp);
1611 ACL_MOVE(sp->dgroups, DEF_GROUP, bufaclp);
1612 if (sp->dclass.acl_ismask) {
1613 bufaclp->acl_tag = DEF_CLASS_OBJ;
1614 bufaclp->acl_who = (uid_t)sp->dclass.acl_ismask;
1615 bufaclp->acl_perm = (o_mode_t)sp->dclass.acl_maskbits;
1616 bufaclp++;
1618 ASSERT(def_acl_size <= INT_MAX);
1619 fsdp->fsd_size = (int)def_acl_size;
1621 alldone:
1622 *buf = buffer;
1623 *len = acl_size + def_acl_size;
1625 return (0);
1629 * free a shadow inode on disk and in memory
1632 ufs_si_free(si_t *sp, struct vfs *vfsp, cred_t *cr)
1634 struct inode *sip;
1635 int shadow;
1636 int err = 0;
1637 int refcnt;
1638 int signature;
1640 ASSERT(vfsp);
1641 ASSERT(sp);
1643 rw_enter(&sp->s_lock, RW_READER);
1644 ASSERT(sp->s_shadow <= INT_MAX);
1645 shadow = (int)sp->s_shadow;
1646 ASSERT(sp->s_ref);
1647 rw_exit(&sp->s_lock);
1650 * Decrement link count on the shadow inode,
1651 * and decrement reference count on the sip.
1653 if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) == 0) {
1654 rw_enter(&sip->i_contents, RW_WRITER);
1655 rw_enter(&sp->s_lock, RW_WRITER);
1656 ASSERT(sp->s_shadow == shadow);
1657 ASSERT(sip->i_dquot == 0);
1658 /* Decrement link count */
1659 ASSERT(sip->i_nlink > 0);
1661 * bug #1264710 assertion failure below
1663 sp->s_use = --sip->i_nlink;
1664 ufs_setreclaim(sip);
1665 TRANS_INODE(sip->i_ufsvfs, sip);
1666 sip->i_flag |= ICHG | IMOD;
1667 sip->i_seq++;
1668 ITIMES_NOLOCK(sip);
1669 /* Dec ref counts on si referenced by this ip */
1670 refcnt = --sp->s_ref;
1671 signature = sp->s_signature;
1672 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
1674 * Release s_lock before calling VN_RELE
1675 * (which may want to acquire i_contents).
1677 rw_exit(&sp->s_lock);
1678 rw_exit(&sip->i_contents);
1679 VN_RELE(ITOV(sip));
1680 } else {
1681 rw_enter(&sp->s_lock, RW_WRITER);
1682 /* Dec ref counts on si referenced by this ip */
1683 refcnt = --sp->s_ref;
1684 signature = sp->s_signature;
1685 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
1686 rw_exit(&sp->s_lock);
1689 if (refcnt == 0)
1690 si_cache_del(sp, signature);
1691 return (err);
1695 * Seach the si cache for an si structure by inode #.
1696 * Returns a locked si structure.
1698 * Parameters:
1699 * ip - Ptr to an inode on this fs
1700 * spp - Ptr to ptr to si struct for the results, if found.
1702 * Returns: 0 - Success (results in spp)
1703 * 1 - Failure (spp undefined)
1705 static int
1706 si_cachei_get(struct inode *ip, si_t **spp)
1708 si_t *sp;
1710 rw_enter(&si_cache_lock, RW_READER);
1711 loop:
1712 for (sp = si_cachei[SI_HASH(ip->i_shadow)]; sp; sp = sp->s_forw)
1713 if (sp->s_shadow == ip->i_shadow && sp->s_dev == ip->i_dev)
1714 break;
1716 if (sp == NULL) {
1717 /* Not in cache */
1718 rw_exit(&si_cache_lock);
1719 return (1);
1721 /* Found it */
1722 rw_enter(&sp->s_lock, RW_WRITER);
1723 alldone:
1724 rw_exit(&si_cache_lock);
1725 *spp = sp;
1726 return (0);
1730 * Seach the si cache by si structure (ie duplicate of the one passed in).
1731 * In order for a match the signatures must be the same and
1732 * the devices must be the same, the acls must match and
1733 * link count of the cached shadow must be less than the
1734 * size of ic_nlink - 1. MAXLINK - 1 is used to allow the count
1735 * to be incremented one more time by the caller.
1736 * Returns a locked si structure.
1738 * Parameters:
1739 * ip - Ptr to an inode on this fs
1740 * spi - Ptr to si the struct we're searching the cache for.
1741 * spp - Ptr to ptr to si struct for the results, if found.
1743 * Returns: 0 - Success (results in spp)
1744 * 1 - Failure (spp undefined)
1746 static int
1747 si_cachea_get(struct inode *ip, si_t *spi, si_t **spp)
1749 si_t *sp;
1751 spi->s_dev = ip->i_dev;
1752 spi->s_signature = si_signature(spi);
1753 rw_enter(&si_cache_lock, RW_READER);
1754 loop:
1755 for (sp = si_cachea[SI_HASH(spi->s_signature)]; sp; sp = sp->s_next) {
1756 if (sp->s_signature == spi->s_signature &&
1757 sp->s_dev == spi->s_dev &&
1758 sp->s_use > 0 && /* deleting */
1759 sp->s_use <= (MAXLINK - 1) && /* Too many links */
1760 !si_cmp(sp, spi))
1761 break;
1764 if (sp == NULL) {
1765 /* Cache miss */
1766 rw_exit(&si_cache_lock);
1767 return (1);
1769 /* Found it */
1770 rw_enter(&sp->s_lock, RW_WRITER);
1771 alldone:
1772 spi->s_shadow = sp->s_shadow; /* XXX For debugging */
1773 rw_exit(&si_cache_lock);
1774 *spp = sp;
1775 return (0);
1779 * Place an si structure in the si cache. May cause duplicates.
1781 * Parameters:
1782 * sp - Ptr to the si struct to add to the cache.
1784 * Returns: Nothing (void)
1786 static void
1787 si_cache_put(si_t *sp)
1789 si_t **tspp;
1791 ASSERT(sp->s_fore == NULL);
1792 rw_enter(&si_cache_lock, RW_WRITER);
1793 if (!sp->s_signature)
1794 sp->s_signature = si_signature(sp);
1795 sp->s_flags |= SI_CACHED;
1796 sp->s_fore = NULL;
1798 /* The 'by acl' chains */
1799 tspp = &si_cachea[SI_HASH(sp->s_signature)];
1800 sp->s_next = *tspp;
1801 *tspp = sp;
1803 /* The 'by inode' chains */
1804 tspp = &si_cachei[SI_HASH(sp->s_shadow)];
1805 sp->s_forw = *tspp;
1806 *tspp = sp;
1808 rw_exit(&si_cache_lock);
1812 * The sp passed in is a candidate for deletion from the cache. We acquire
1813 * the cache lock first, so no cache searches can be done. Then we search
1814 * for the acl in the cache, and if we find it we can lock it and check that
1815 * nobody else attached to it while we were acquiring the locks. If the acl
1816 * is in the cache and still has a zero reference count, then we remove it
1817 * from the cache and deallocate it. If the reference count is non-zero or
1818 * it is not found in the cache, then someone else attached to it or has
1819 * already freed it, so we just return.
1821 * Parameters:
1822 * sp - Ptr to the sp struct which is the candicate for deletion.
1823 * signature - the signature for the acl for lookup in the hash table
1825 * Returns: Nothing (void)
1827 void
1828 si_cache_del(si_t *sp, int signature)
1830 si_t **tspp;
1831 int hash;
1832 int foundacl = 0;
1835 * Unlink & free the sp from the other queues, then destroy it.
1836 * Search the 'by acl' chain first, then the 'by inode' chain
1837 * after the acl is locked.
1839 rw_enter(&si_cache_lock, RW_WRITER);
1840 hash = SI_HASH(signature);
1841 for (tspp = &si_cachea[hash]; *tspp; tspp = &(*tspp)->s_next) {
1842 if (*tspp == sp) {
1844 * Wait to grab the acl lock until after the acl has
1845 * been found in the cache. Otherwise it might try to
1846 * grab a lock that has already been destroyed, or
1847 * delete an acl that has already been freed.
1849 rw_enter(&sp->s_lock, RW_WRITER);
1850 /* See if someone else attached to it */
1851 if (sp->s_ref) {
1852 rw_exit(&sp->s_lock);
1853 rw_exit(&si_cache_lock);
1854 return;
1856 ASSERT(sp->s_fore == NULL);
1857 ASSERT(sp->s_flags & SI_CACHED);
1858 foundacl = 1;
1859 *tspp = sp->s_next;
1860 break;
1865 * If the acl was not in the cache, we assume another thread has
1866 * deleted it already. This could happen if another thread attaches to
1867 * the acl and then releases it after this thread has already found the
1868 * reference count to be zero but has not yet taken the cache lock.
1869 * Both threads end up seeing a reference count of zero, and call into
1870 * si_cache_del. See bug 4244827 for details on the race condition.
1872 if (foundacl == 0) {
1873 rw_exit(&si_cache_lock);
1874 return;
1877 /* Now check the 'by inode' chain */
1878 hash = SI_HASH(sp->s_shadow);
1879 for (tspp = &si_cachei[hash]; *tspp; tspp = &(*tspp)->s_forw) {
1880 if (*tspp == sp) {
1881 *tspp = sp->s_forw;
1882 break;
1887 * At this point, we can unlock everything because this si
1888 * is no longer in the cache, thus cannot be attached to.
1890 rw_exit(&sp->s_lock);
1891 rw_exit(&si_cache_lock);
1892 sp->s_flags &= ~SI_CACHED;
1893 (void) ufs_si_free_mem(sp);
1897 * Alloc the hash buckets for the si cache & initialize
1898 * the unreferenced anchor and the cache lock.
1900 void
1901 si_cache_init(void)
1903 rw_init(&si_cache_lock, NULL, RW_DEFAULT, NULL);
1905 /* The 'by acl' headers */
1906 si_cachea = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP);
1907 /* The 'by inode' headers */
1908 si_cachei = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP);
1912 * aclcksum takes an acl and generates a checksum. It takes as input
1913 * the acl to start at.
1915 * s_aclp - pointer to starting acl
1917 * returns checksum
1919 static int
1920 aclcksum(ufs_ic_acl_t *s_aclp)
1922 ufs_ic_acl_t *aclp;
1923 int signature = 0;
1924 for (aclp = s_aclp; aclp; aclp = aclp->acl_ic_next) {
1925 signature += aclp->acl_ic_perm;
1926 signature += aclp->acl_ic_who;
1928 return (signature);
1932 * Generate a unique signature for an si structure. Used by the
1933 * search routine si_cachea_get() to quickly identify candidates
1934 * prior to calling si_cmp().
1935 * Parameters:
1936 * sp - Ptr to the si struct to generate the signature for.
1938 * Returns: A signature for the si struct (really a checksum)
1940 static int
1941 si_signature(si_t *sp)
1943 int signature = sp->s_dev;
1945 signature += aclcksum(sp->aowner) + aclcksum(sp->agroup) +
1946 aclcksum(sp->aother) + aclcksum(sp->ausers) +
1947 aclcksum(sp->agroups) + aclcksum(sp->downer) +
1948 aclcksum(sp->dgroup) + aclcksum(sp->dother) +
1949 aclcksum(sp->dusers) + aclcksum(sp->dgroups);
1950 if (sp->aclass.acl_ismask)
1951 signature += sp->aclass.acl_maskbits;
1952 if (sp->dclass.acl_ismask)
1953 signature += sp->dclass.acl_maskbits;
1955 return (signature);
1959 * aclcmp compares to acls to see if they are identical.
1961 * sp1 is source
1962 * sp2 is sourceb
1964 * returns 0 if equal and 1 if not equal
1966 static int
1967 aclcmp(ufs_ic_acl_t *aclin1p, ufs_ic_acl_t *aclin2p)
1969 ufs_ic_acl_t *aclp1;
1970 ufs_ic_acl_t *aclp2;
1973 * if the starting pointers are equal then they are equal so
1974 * just return.
1976 if (aclin1p == aclin2p)
1977 return (0);
1979 * check element by element
1981 for (aclp1 = aclin1p, aclp2 = aclin2p; aclp1 && aclp2;
1982 aclp1 = aclp1->acl_ic_next, aclp2 = aclp2->acl_ic_next) {
1983 if (aclp1->acl_ic_perm != aclp2->acl_ic_perm ||
1984 aclp1->acl_ic_who != aclp2->acl_ic_who)
1985 return (1);
1988 * both must be zero (at the end of the acl)
1990 if (aclp1 || aclp2)
1991 return (1);
1993 return (0);
1997 * Do extensive, field-by-field compare of two si structures. Returns
1998 * 0 if they are exactly identical, 1 otherwise.
2000 * Paramters:
2001 * sp1 - Ptr to 1st si struct
2002 * sp2 - Ptr to 2nd si struct
2004 * Returns:
2005 * 0 - Not identical
2006 * 1 - Identical
2008 static int
2009 si_cmp(si_t *sp1, si_t *sp2)
2011 if (sp1->s_dev != sp2->s_dev)
2012 return (1);
2013 if (aclcmp(sp1->aowner, sp2->aowner) ||
2014 aclcmp(sp1->agroup, sp2->agroup) ||
2015 aclcmp(sp1->aother, sp2->aother) ||
2016 aclcmp(sp1->ausers, sp2->ausers) ||
2017 aclcmp(sp1->agroups, sp2->agroups) ||
2018 aclcmp(sp1->downer, sp2->downer) ||
2019 aclcmp(sp1->dgroup, sp2->dgroup) ||
2020 aclcmp(sp1->dother, sp2->dother) ||
2021 aclcmp(sp1->dusers, sp2->dusers) ||
2022 aclcmp(sp1->dgroups, sp2->dgroups))
2023 return (1);
2024 if (sp1->aclass.acl_ismask != sp2->aclass.acl_ismask)
2025 return (1);
2026 if (sp1->dclass.acl_ismask != sp2->dclass.acl_ismask)
2027 return (1);
2028 if (sp1->aclass.acl_ismask &&
2029 sp1->aclass.acl_maskbits != sp2->aclass.acl_maskbits)
2030 return (1);
2031 if (sp1->dclass.acl_ismask &&
2032 sp1->dclass.acl_maskbits != sp2->dclass.acl_maskbits)
2033 return (1);
2035 return (0);
2039 * Remove all acls associated with a device. All acls must have
2040 * a reference count of zero.
2042 * inputs:
2043 * device - device to remove from the cache
2045 * outputs:
2046 * none
2048 void
2049 ufs_si_cache_flush(dev_t dev)
2051 si_t *tsp, **tspp;
2052 int i;
2054 rw_enter(&si_cache_lock, RW_WRITER);
2055 for (i = 0; i < si_cachecnt; i++) {
2056 tspp = &si_cachea[i];
2057 while (*tspp) {
2058 if ((*tspp)->s_dev == dev) {
2059 *tspp = (*tspp)->s_next;
2060 } else {
2061 tspp = &(*tspp)->s_next;
2065 for (i = 0; i < si_cachecnt; i++) {
2066 tspp = &si_cachei[i];
2067 while (*tspp) {
2068 if ((*tspp)->s_dev == dev) {
2069 tsp = *tspp;
2070 *tspp = (*tspp)->s_forw;
2071 tsp->s_flags &= ~SI_CACHED;
2072 ufs_si_free_mem(tsp);
2073 } else {
2074 tspp = &(*tspp)->s_forw;
2078 rw_exit(&si_cache_lock);
2082 * ufs_si_del is used to unhook a sp from a inode in memory
2084 * ip is the inode to remove the sp from.
2086 void
2087 ufs_si_del(struct inode *ip)
2089 si_t *sp = ip->i_ufs_acl;
2090 int refcnt;
2091 int signature;
2093 if (sp) {
2094 rw_enter(&sp->s_lock, RW_WRITER);
2095 refcnt = --sp->s_ref;
2096 signature = sp->s_signature;
2097 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
2098 rw_exit(&sp->s_lock);
2099 if (refcnt == 0)
2100 si_cache_del(sp, signature);
2101 ip->i_ufs_acl = NULL;