4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
28 #include <sys/errno.h>
30 #include <sys/t_lock.h>
31 #include <sys/ksynch.h>
34 #include <sys/vnode.h>
36 #include <sys/systm.h>
40 #include <sys/fs/ufs_inode.h>
41 #include <sys/fs/ufs_acl.h>
42 #include <sys/fs/ufs_quota.h>
43 #include <sys/sysmacros.h>
44 #include <sys/debug.h>
45 #include <sys/policy.h>
48 static int si_signature(si_t
*);
49 static int si_cachei_get(struct inode
*, si_t
**);
50 static int si_cachea_get(struct inode
*, si_t
*, si_t
**);
51 static int si_cmp(si_t
*, si_t
*);
52 static void si_cache_put(si_t
*);
53 void si_cache_del(si_t
*, int);
54 void si_cache_init(void);
56 static void ufs_si_free_mem(si_t
*);
57 static int ufs_si_store(struct inode
*, si_t
*, int, cred_t
*);
58 static si_t
*ufs_acl_cp(si_t
*);
59 static int ufs_sectobuf(si_t
*, caddr_t
*, size_t *);
60 static int acl_count(ufs_ic_acl_t
*);
61 static int acl_validate(aclent_t
*, int, int);
62 static int vsecattr2aclentry(vsecattr_t
*, si_t
**);
63 static int aclentry2vsecattr(si_t
*, vsecattr_t
*);
65 krwlock_t si_cache_lock
; /* Protects si_cache */
66 int si_cachecnt
= 64; /* # buckets in si_cache[a|i] */
67 si_t
**si_cachea
; /* The 'by acl' cache chains */
68 si_t
**si_cachei
; /* The 'by inode' cache chains */
70 long si_cachemiss
= 0;
72 #define SI_HASH(S) ((int)(S) & (si_cachecnt - 1))
75 * Store the new acls in aclp. Attempts to make things atomic.
76 * Search the acl cache for an identical sp and, if found, attach
77 * the cache'd acl to ip. If the acl is new (not in the cache),
78 * add it to the cache, then attach it to ip. Last, remove and
79 * decrement the reference count of any prior acl list attached
83 * ip - Ptr to inode to receive the acl list
84 * sp - Ptr to in-core acl structure to attach to the inode.
85 * puship - 0 do not push the object inode(ip) 1 push the ip
86 * cr - Ptr to credentials
88 * Returns: 0 - Success
92 ufs_si_store(struct inode
*ip
, si_t
*sp
, int puship
, cred_t
*cr
)
108 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
109 struct fs
*fs
= ufsvfsp
->vfs_fs
;
111 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
112 ASSERT(ip
->i_ufs_acl
!= sp
);
114 if (!CHECK_ACL_ALLOWED(ip
->i_mode
& IFMT
))
118 * if there are only the three owner/group/other then do not
119 * create a shadow inode. If there is already a shadow with
120 * the file, remove it.
128 sp
->dclass
.acl_ismask
== 0 &&
132 err
= ufs_si_free(ip
->i_ufs_acl
, ITOV(ip
)->v_vfsp
, cr
);
133 ip
->i_ufs_acl
= NULL
;
135 ip
->i_flag
|= IMOD
| IACC
;
136 ip
->i_mode
= (ip
->i_smode
& ~0777) |
137 ((sp
->aowner
->acl_ic_perm
& 07) << 6) |
139 (sp
->aother
->acl_ic_perm
& 07);
140 TRANS_INODE(ip
->i_ufsvfs
, ip
);
149 * Check cache. If in cache, use existing shadow inode.
150 * Increment the shadow link count, then attach to the
151 * cached ufs_acl_entry struct, and increment it's reference
152 * count. Then discard the passed-in ufs_acl_entry and
155 if (si_cachea_get(ip
, sp
, &csp
) == 0) {
156 ASSERT(RW_WRITE_HELD(&csp
->s_lock
));
157 if (ip
->i_ufs_acl
== csp
) {
158 rw_exit(&csp
->s_lock
);
159 (void) ufs_si_free_mem(sp
);
162 vfsp
= ITOV(ip
)->v_vfsp
;
163 ASSERT(csp
->s_shadow
<= INT_MAX
);
164 shadow
= (int)csp
->s_shadow
;
166 * We can't call ufs_iget while holding the csp locked,
167 * because we might deadlock. So we drop the
168 * lock on csp, then go search the si_cache again
169 * to see if the csp is still there.
171 rw_exit(&csp
->s_lock
);
172 if ((err
= ufs_iget(vfsp
, shadow
, &sip
, cr
)) != 0) {
173 (void) ufs_si_free_mem(sp
);
176 rw_enter(&sip
->i_contents
, RW_WRITER
);
177 if ((sip
->i_mode
& IFMT
) != IFSHAD
|| sip
->i_nlink
<= 0) {
178 rw_exit(&sip
->i_contents
);
182 /* Get the csp again */
183 if (si_cachea_get(ip
, sp
, &csp
) != 0) {
184 rw_exit(&sip
->i_contents
);
188 ASSERT(RW_WRITE_HELD(&csp
->s_lock
));
189 /* See if we got the right shadow */
190 if (csp
->s_shadow
!= shadow
) {
191 rw_exit(&csp
->s_lock
);
192 rw_exit(&sip
->i_contents
);
196 ASSERT(RW_WRITE_HELD(&sip
->i_contents
));
197 ASSERT(sip
->i_dquot
== 0);
198 /* Increment link count */
199 ASSERT(sip
->i_nlink
> 0);
201 TRANS_INODE(ufsvfsp
, sip
);
202 csp
->s_use
= sip
->i_nlink
;
204 ASSERT(sp
->s_ref
>= 0 && sp
->s_ref
<= sp
->s_use
);
205 sip
->i_flag
|= ICHG
| IMOD
;
209 * Always release s_lock before both releasing i_contents
210 * and calling VN_RELE.
212 rw_exit(&csp
->s_lock
);
213 rw_exit(&sip
->i_contents
);
215 (void) ufs_si_free_mem(sp
);
221 /* Alloc a shadow inode and fill it in */
222 err
= ufs_ialloc(ip
, ip
->i_number
, (mode_t
)IFSHAD
, &sip
, cr
);
224 (void) ufs_si_free_mem(sp
);
227 rw_enter(&sip
->i_contents
, RW_WRITER
);
228 sip
->i_flag
|= IACC
| IUPD
| ICHG
;
230 sip
->i_mode
= (o_mode_t
)IFSHAD
;
231 ITOV(sip
)->v_type
= VREG
;
232 ufs_reset_vnode(ITOV(sip
));
234 sip
->i_uid
= crgetuid(cr
);
235 sip
->i_suid
= (ulong_t
)sip
->i_uid
> (ulong_t
)USHRT_MAX
?
236 UID_LONG
: sip
->i_uid
;
237 sip
->i_gid
= crgetgid(cr
);
238 sip
->i_sgid
= (ulong_t
)sip
->i_gid
> (ulong_t
)USHRT_MAX
?
239 GID_LONG
: sip
->i_gid
;
241 TRANS_INODE(ufsvfsp
, sip
);
242 sip
->i_ufs_acl
= NULL
;
243 ASSERT(sip
->i_size
== 0);
245 sp
->s_shadow
= sip
->i_number
;
247 if ((err
= ufs_sectobuf(sp
, &acldata
, &acldatalen
)) != 0)
252 * We don't actually care about the residual count upon failure,
253 * but giving ufs_rdwri() the pointer means it won't translate
254 * all failures to EIO. Our caller needs to know when ENOSPC
258 if (((err
= ufs_rdwri(UIO_WRITE
, FWRITE
|FSYNC
, sip
, acldata
,
259 acldatalen
, (offset_t
)0, UIO_SYSSPACE
, &resid
, cr
)) != 0) ||
261 kmem_free(acldata
, acldatalen
);
262 if ((resid
!= 0) && (err
== 0))
267 offset
+= acldatalen
;
268 if ((acldatalen
+ fs
->fs_bsize
) > ufsvfsp
->vfs_maxacl
)
269 ufsvfsp
->vfs_maxacl
= acldatalen
+ fs
->fs_bsize
;
271 kmem_free(acldata
, acldatalen
);
272 /* Sync & free the shadow inode */
274 rw_exit(&sip
->i_contents
);
277 /* We're committed to using this sp */
281 /* Now put the new acl stuff in the cache */
282 /* XXX Might make a duplicate */
287 /* Now switch the parent inode to use the new shadow inode */
288 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
289 rw_enter(&sp
->s_lock
, RW_READER
);
290 oldsp
= ip
->i_ufs_acl
;
291 oldshadow
= ip
->i_shadow
;
293 ASSERT(sp
->s_shadow
<= INT_MAX
);
294 ip
->i_shadow
= (int32_t)sp
->s_shadow
;
296 ASSERT(oldshadow
!= ip
->i_number
);
297 ASSERT(ip
->i_number
!= ip
->i_shadow
);
299 * Change the mode bits to follow the acl list
301 * NOTE: a directory is not required to have a "regular" acl
302 * bug id's 1238908, 1257173, 1263171 and 1263188
304 * but if a "regular" acl is present, it must contain
305 * an "owner", "group", and "other" acl
307 * If an ACL mask exists, the effective group rights are
308 * set to the mask. Otherwise, the effective group rights
309 * are set to the object group bits.
311 if (sp
->aowner
) { /* Owner */
312 ip
->i_mode
&= ~0700; /* clear Owner */
313 ip
->i_mode
|= (sp
->aowner
->acl_ic_perm
& 07) << 6;
314 ip
->i_uid
= sp
->aowner
->acl_ic_who
;
317 if (sp
->agroup
) { /* Group */
318 ip
->i_mode
&= ~0070; /* clear Group */
319 ip
->i_mode
|= MASK2MODE(sp
); /* apply mask */
320 ip
->i_gid
= sp
->agroup
->acl_ic_who
;
323 if (sp
->aother
) { /* Other */
324 ip
->i_mode
&= ~0007; /* clear Other */
325 ip
->i_mode
|= (sp
->aother
->acl_ic_perm
& 07);
328 if (sp
->aclass
.acl_ismask
)
329 ip
->i_mode
= (ip
->i_mode
& ~070) |
330 (((sp
->aclass
.acl_maskbits
& 07) << 3) &
333 TRANS_INODE(ufsvfsp
, ip
);
334 rw_exit(&sp
->s_lock
);
338 * when creating a file there is no need to push the inode, it
345 * Decrement link count on the old shadow inode,
346 * and decrement reference count on the old aclp,
349 /* Get the shadow inode */
350 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
351 vfsp
= ITOV(ip
)->v_vfsp
;
352 if ((err
= ufs_iget_alloced(vfsp
, oldshadow
, &sip
, cr
)) != 0) {
355 /* Decrement link count */
356 rw_enter(&sip
->i_contents
, RW_WRITER
);
358 rw_enter(&oldsp
->s_lock
, RW_WRITER
);
359 ASSERT(sip
->i_dquot
== 0);
360 ASSERT(sip
->i_nlink
> 0);
361 usecnt
= --sip
->i_nlink
;
363 TRANS_INODE(ufsvfsp
, sip
);
364 sip
->i_flag
|= ICHG
| IMOD
;
368 oldsp
->s_use
= usecnt
;
369 refcnt
= --oldsp
->s_ref
;
370 signature
= oldsp
->s_signature
;
372 * Always release s_lock before both releasing
373 * i_contents and calling VN_RELE.
375 rw_exit(&oldsp
->s_lock
);
377 rw_exit(&sip
->i_contents
);
379 if (oldsp
&& (refcnt
== 0))
380 si_cache_del(oldsp
, signature
);
385 /* Throw the newly alloc'd inode away */
388 TRANS_INODE(ufsvfsp
, sip
);
390 rw_exit(&sip
->i_contents
);
392 ASSERT(!sp
->s_use
&& !sp
->s_ref
&& !(sp
->s_flags
& SI_CACHED
));
393 (void) ufs_si_free_mem(sp
);
398 * Load the acls for inode ip either from disk (adding to the cache),
399 * or search the cache and attach the cache'd acl list to the ip.
400 * In either case, maintain the proper reference count on the cached entry.
403 * ip - Ptr to the inode which needs the acl list loaded
404 * cr - Ptr to credentials
406 * Returns: 0 - Success
410 ufs_si_load(struct inode
*ip
, cred_t
*cr
)
416 vsecattr_t vsecattr
= { 0, 0, NULL
, 0, NULL
};
419 caddr_t acldata
= NULL
;
426 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
427 struct fs
*fs
= ufsvfsp
->vfs_fs
;
430 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
431 ASSERT(ip
->i_shadow
&& ip
->i_ufs_acl
== NULL
);
432 ASSERT((ip
->i_mode
& IFMT
) != IFSHAD
);
434 if (!CHECK_ACL_ALLOWED(ip
->i_mode
& IFMT
))
437 if (ip
->i_shadow
== ip
->i_number
)
440 maxino
= (ino_t
)(ITOF(ip
)->fs_ncg
* ITOF(ip
)->fs_ipg
);
441 if (ip
->i_shadow
< UFSROOTINO
|| ip
->i_shadow
> maxino
)
445 * XXX Check cache. If in cache, link to it and increment
446 * the reference count, then return.
448 if (si_cachei_get(ip
, &sp
) == 0) {
449 ASSERT(RW_WRITE_HELD(&sp
->s_lock
));
452 ASSERT(sp
->s_ref
>= 0 && sp
->s_ref
<= sp
->s_use
);
453 rw_exit(&sp
->s_lock
);
458 /* Get the shadow inode */
459 vfsp
= ITOV(ip
)->v_vfsp
;
460 shadow
= ip
->i_shadow
;
461 if ((err
= ufs_iget_alloced(vfsp
, shadow
, &sip
, cr
)) != 0) {
464 rw_enter(&sip
->i_contents
, RW_WRITER
);
466 if ((sip
->i_mode
& IFMT
) != IFSHAD
) {
467 rw_exit(&sip
->i_contents
);
472 ASSERT(sip
->i_dquot
== 0);
473 usecnt
= sip
->i_nlink
;
474 if ((!ULOCKFS_IS_NOIACC(&ufsvfsp
->vfs_ulockfs
)) &&
475 (!(sip
)->i_ufsvfs
->vfs_noatime
)) {
478 rw_downgrade(&sip
->i_contents
);
480 ASSERT(sip
->i_size
<= MAXOFF_T
);
481 /* Read the acl's and other stuff from disk */
482 acldata
= kmem_zalloc((size_t)sip
->i_size
, KM_SLEEP
);
483 acldatalen
= sip
->i_size
;
485 err
= ufs_rdwri(UIO_READ
, FREAD
, sip
, acldata
, acldatalen
, (offset_t
)0,
486 UIO_SYSSPACE
, (int *)0, cr
);
488 rw_exit(&sip
->i_contents
);
494 * Convert from disk format
495 * Result is a vsecattr struct which we then convert to the
498 bzero((caddr_t
)&vsecattr
, sizeof (vsecattr_t
));
499 for (fsdp
= (ufs_fsd_t
*)acldata
;
500 fsdp
< (ufs_fsd_t
*)(acldata
+ acldatalen
);
501 fsdp
= (ufs_fsd_t
*)((caddr_t
)fsdp
+
502 FSD_RECSZ(fsdp
, fsdp
->fsd_size
))) {
503 if (fsdp
->fsd_size
<= 0)
505 switch (fsdp
->fsd_type
) {
507 numacls
= vsecattr
.vsa_aclcnt
=
508 (int)((fsdp
->fsd_size
- 2 * sizeof (int)) /
510 aclp
= vsecattr
.vsa_aclentp
=
511 kmem_zalloc(numacls
* sizeof (aclent_t
), KM_SLEEP
);
512 for (ufsaclp
= (ufs_acl_t
*)fsdp
->fsd_data
;
513 numacls
; ufsaclp
++) {
514 aclp
->a_type
= ufsaclp
->acl_tag
;
515 aclp
->a_id
= ufsaclp
->acl_who
;
516 aclp
->a_perm
= ufsaclp
->acl_perm
;
522 numacls
= vsecattr
.vsa_dfaclcnt
=
523 (int)((fsdp
->fsd_size
- 2 * sizeof (int)) /
525 aclp
= vsecattr
.vsa_dfaclentp
=
526 kmem_zalloc(numacls
* sizeof (aclent_t
), KM_SLEEP
);
527 for (ufsaclp
= (ufs_acl_t
*)fsdp
->fsd_data
;
528 numacls
; ufsaclp
++) {
529 aclp
->a_type
= ufsaclp
->acl_tag
;
530 aclp
->a_id
= ufsaclp
->acl_who
;
531 aclp
->a_perm
= ufsaclp
->acl_perm
;
539 if (vsecattr
.vsa_aclentp
) {
540 ksort((caddr_t
)vsecattr
.vsa_aclentp
, vsecattr
.vsa_aclcnt
,
541 sizeof (aclent_t
), cmp2acls
);
542 if ((err
= acl_validate(vsecattr
.vsa_aclentp
,
543 vsecattr
.vsa_aclcnt
, ACL_CHECK
)) != 0) {
547 if (vsecattr
.vsa_dfaclentp
) {
548 ksort((caddr_t
)vsecattr
.vsa_dfaclentp
, vsecattr
.vsa_dfaclcnt
,
549 sizeof (aclent_t
), cmp2acls
);
550 if ((err
= acl_validate(vsecattr
.vsa_dfaclentp
,
551 vsecattr
.vsa_dfaclcnt
, DEF_ACL_CHECK
)) != 0) {
556 /* ignore shadow inodes without ACLs */
557 if (!vsecattr
.vsa_aclentp
&& !vsecattr
.vsa_dfaclentp
) {
562 /* Convert from vsecattr struct to ufs_acl_entry struct */
563 if ((err
= vsecattr2aclentry(&vsecattr
, &sp
)) != 0) {
567 /* There aren't filled in by vsecattr2aclentry */
568 sp
->s_shadow
= ip
->i_shadow
;
569 sp
->s_dev
= ip
->i_dev
;
572 ASSERT(sp
->s_ref
>= 0 && sp
->s_ref
<= sp
->s_use
);
574 /* XXX Might make a duplicate */
577 /* Signal anyone waiting on this shadow to be loaded */
581 if ((acldatalen
+ fs
->fs_bsize
) > ufsvfsp
->vfs_maxacl
)
582 ufsvfsp
->vfs_maxacl
= acldatalen
+ fs
->fs_bsize
;
585 * Common exit point. Mark shadow inode as ISTALE
586 * if we detect an internal inconsistency, to
587 * prevent stray inodes appearing in the cache.
590 rw_enter(&sip
->i_contents
, RW_READER
);
591 mutex_enter(&sip
->i_tlock
);
592 sip
->i_flag
|= ISTALE
;
593 mutex_exit(&sip
->i_tlock
);
594 rw_exit(&sip
->i_contents
);
599 * Cleanup of data structures allocated
603 kmem_free(acldata
, acldatalen
);
605 if (vsecattr
.vsa_aclentp
)
606 kmem_free(vsecattr
.vsa_aclentp
,
607 vsecattr
.vsa_aclcnt
* sizeof (aclent_t
));
608 if (vsecattr
.vsa_dfaclentp
)
609 kmem_free(vsecattr
.vsa_dfaclentp
,
610 vsecattr
.vsa_dfaclcnt
* sizeof (aclent_t
));
615 * Check the inode's ACL's to see if this mode of access is
616 * allowed; return 0 if allowed, EACCES if not.
618 * We follow the procedure defined in Sec. 3.3.5, ACL Access
619 * Check Algorithm, of the POSIX 1003.6 Draft Standard.
622 * mode mode of access read, write, execute/examine
626 ufs_acl_access(struct inode
*ip
, int mode
, cred_t
*cr
)
629 int ismask
, mask
= 0;
633 uid_t uid
= crgetuid(cr
);
636 ASSERT(ip
->i_ufs_acl
!= NULL
);
637 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
641 ismask
= sp
->aclass
.acl_ismask
?
642 sp
->aclass
.acl_ismask
: 0;
645 mask
= sp
->aclass
.acl_maskbits
;
650 * (1) If user owns the file, obey user mode bits
652 owner
= sp
->aowner
->acl_ic_who
;
654 return (MODE_CHECK(owner
, mode
, (sp
->aowner
->acl_ic_perm
<< 6),
659 * (2) Obey any matching ACL_USER entry
662 for (acl
= sp
->ausers
; acl
!= NULL
; acl
= acl
->acl_ic_next
) {
663 if (acl
->acl_ic_who
== uid
) {
664 return (MODE_CHECK(owner
, mode
,
665 (mask
& acl
->acl_ic_perm
) << 6, cr
, ip
));
670 * (3) If user belongs to file's group, obey group mode bits
671 * if no ACL mask is defined; if there is an ACL mask, we look
672 * at both the group mode bits and any ACL_GROUP entries.
674 if (groupmember((uid_t
)sp
->agroup
->acl_ic_who
, cr
)) {
676 gperm
= (sp
->agroup
->acl_ic_perm
);
678 return (MODE_CHECK(owner
, mode
, (gperm
<< 6), cr
, ip
));
682 * (4) Accumulate the permissions in matching ACL_GROUP entries
685 for (acl
= sp
->agroups
; acl
!= NULL
; acl
= acl
->acl_ic_next
) {
686 if (groupmember(acl
->acl_ic_who
, cr
)) {
688 gperm
|= acl
->acl_ic_perm
;
694 return (MODE_CHECK(owner
, mode
, ((gperm
& mask
) << 6), cr
, ip
));
697 * (5) Finally, use the "other" mode bits
699 return (MODE_CHECK(owner
, mode
, sp
->aother
->acl_ic_perm
<< 6, cr
, ip
));
704 ufs_acl_get(struct inode
*ip
, vsecattr_t
*vsap
, int flag
, cred_t
*cr
)
708 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
710 /* XXX Range check, sanity check, shadow check */
711 /* If an ACL is present, get the data from the shadow inode info */
713 return (aclentry2vsecattr(ip
->i_ufs_acl
, vsap
));
716 * If no ACLs are present, fabricate one from the mode bits.
717 * This code is almost identical to fs_fab_acl(), but we
718 * already have the mode bits handy, so we'll avoid going
719 * through fop_getattr() again.
722 vsap
->vsa_aclcnt
= 0;
723 vsap
->vsa_aclentp
= NULL
;
724 vsap
->vsa_dfaclcnt
= 0; /* Default ACLs are not fabricated */
725 vsap
->vsa_dfaclentp
= NULL
;
727 if (vsap
->vsa_mask
& (VSA_ACLCNT
| VSA_ACL
))
728 vsap
->vsa_aclcnt
= 4; /* USER, GROUP, OTHER, and CLASS */
730 if (vsap
->vsa_mask
& VSA_ACL
) {
731 vsap
->vsa_aclentp
= kmem_zalloc(4 * sizeof (aclent_t
),
733 if (vsap
->vsa_aclentp
== NULL
)
735 aclentp
= vsap
->vsa_aclentp
;
738 aclentp
->a_type
= USER_OBJ
;
739 aclentp
->a_perm
= ((ushort_t
)(ip
->i_mode
& 0700)) >> 6;
740 aclentp
->a_id
= ip
->i_uid
; /* Really undefined */
744 aclentp
->a_type
= GROUP_OBJ
;
745 aclentp
->a_perm
= ((ushort_t
)(ip
->i_mode
& 0070)) >> 3;
746 aclentp
->a_id
= ip
->i_gid
; /* Really undefined */
750 aclentp
->a_type
= OTHER_OBJ
;
751 aclentp
->a_perm
= ip
->i_mode
& 0007;
752 aclentp
->a_id
= 0; /* Really undefined */
756 aclentp
->a_type
= CLASS_OBJ
;
757 aclentp
->a_perm
= ((ushort_t
)(ip
->i_mode
& 0070)) >> 3;
758 aclentp
->a_id
= 0; /* Really undefined */
759 ksort((caddr_t
)vsap
->vsa_aclentp
, vsap
->vsa_aclcnt
,
760 sizeof (aclent_t
), cmp2acls
);
768 ufs_acl_set(struct inode
*ip
, vsecattr_t
*vsap
, int flag
, cred_t
*cr
)
773 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
775 if (!CHECK_ACL_ALLOWED(ip
->i_mode
& IFMT
))
779 * only the owner of the file or privileged users can change the ACLs
781 if (secpolicy_vnode_setdac(cr
, ip
->i_uid
) != 0)
784 /* Convert from vsecattr struct to ufs_acl_entry struct */
785 if ((err
= vsecattr2aclentry(vsap
, &sp
)) != 0)
787 sp
->s_dev
= ip
->i_dev
;
790 * Make the user & group objs in the acl list follow what's
794 if (vsap
->vsa_mask
== VSA_ACL
) {
802 sp
->aowner
->acl_ic_who
= ip
->i_uid
;
804 sp
->agroup
->acl_ic_who
= ip
->i_gid
;
807 * Write and cache the new acl list
809 err
= ufs_si_store(ip
, sp
, 1, cr
);
815 * XXX Scan sorted array of acl's, checking for:
816 * 1) Any duplicate/conflicting entries (same type and id)
817 * 2) More than 1 of USER_OBJ, GROUP_OBJ, OTHER_OBJ, CLASS_OBJ
818 * 3) More than 1 of DEF_USER_OBJ, DEF_GROUP_OBJ, DEF_OTHER_OBJ, DEF_CLASS_OBJ
821 * aclentp - ptr to sorted list of acl entries.
822 * nentries - # acl entries on the list
823 * flag - Bitmap (ACL_CHECK and/or DEF_ACL_CHECK) indicating whether the
824 * list contains regular acls, default acls, or both.
826 * Returns: 0 - Success
827 * EINVAL - Invalid list (dups or multiple entries of type USER_OBJ, etc)
830 acl_validate(aclent_t
*aclentp
, int nentries
, int flag
)
837 int ndef_user_objs
= 0;
838 int ndef_group_objs
= 0;
839 int ndef_other_objs
= 0;
840 int ndef_class_objs
= 0;
847 /* Null list or list of one */
854 for (i
= 1; i
< nentries
; i
++) {
855 if (((aclentp
[i
- 1].a_type
== aclentp
[i
].a_type
) &&
856 (aclentp
[i
- 1].a_id
== aclentp
[i
].a_id
)) ||
857 (aclentp
[i
- 1].a_perm
> 07)) {
862 if (flag
== 0 || (flag
!= ACL_CHECK
&& flag
!= DEF_ACL_CHECK
))
866 for (i
= 0; i
< nentries
; i
++) {
867 switch (aclentp
[i
].a_type
) {
868 case USER_OBJ
: /* Owner */
871 case GROUP_OBJ
: /* Group */
874 case OTHER_OBJ
: /* Other */
877 case CLASS_OBJ
: /* Mask */
880 case DEF_USER_OBJ
: /* Default Owner */
883 case DEF_GROUP_OBJ
: /* Default Group */
886 case DEF_OTHER_OBJ
: /* Default Other */
889 case DEF_CLASS_OBJ
: /* Default Mask */
892 case USER
: /* Users */
895 case GROUP
: /* Groups */
898 case DEF_USER
: /* Default Users */
901 case DEF_GROUP
: /* Default Groups */
904 default: /* Unknown type */
910 * For normal acl's, we require there be one (and only one)
911 * USER_OBJ, GROUP_OBJ and OTHER_OBJ. There is either zero
914 if (flag
& ACL_CHECK
) {
915 if (nuser_objs
!= 1 || ngroup_objs
!= 1 ||
916 nother_objs
!= 1 || nclass_objs
> 1) {
920 * If there are ANY group acls, there MUST be a
921 * class_obj(mask) acl (1003.6/D12 p. 29 lines 75-80).
923 if (ngroups
&& !nclass_objs
) {
926 if (nuser_objs
+ ngroup_objs
+ nother_objs
+ nclass_objs
+
927 ngroups
+ nusers
> MAX_ACL_ENTRIES
)
932 * For default acl's, we require that there be either one (and only one)
933 * DEF_USER_OBJ, DEF_GROUP_OBJ and DEF_OTHER_OBJ
934 * or there be none of them.
936 if (flag
& DEF_ACL_CHECK
) {
937 if (ndef_other_objs
> 1 || ndef_user_objs
> 1 ||
938 ndef_group_objs
> 1 || ndef_class_objs
> 1) {
942 numdefs
= ndef_other_objs
+ ndef_user_objs
+ ndef_group_objs
;
944 if (numdefs
!= 0 && numdefs
!= 3) {
948 * If there are ANY def_group acls, there MUST be a
949 * def_class_obj(mask) acl (1003.6/D12 P. 29 lines 75-80).
950 * XXX(jimh) This is inferred.
952 if (ndef_groups
&& !ndef_class_objs
) {
955 if ((ndef_users
|| ndef_groups
) &&
956 ((numdefs
!= 3) && !ndef_class_objs
)) {
959 if (ndef_user_objs
+ ndef_group_objs
+ ndef_other_objs
+
960 ndef_class_objs
+ ndef_users
+ ndef_groups
>
968 formacl(ufs_ic_acl_t
**aclpp
, aclent_t
*aclentp
)
972 uaclp
= kmem_alloc(sizeof (ufs_ic_acl_t
), KM_SLEEP
);
973 uaclp
->acl_ic_perm
= aclentp
->a_perm
;
974 uaclp
->acl_ic_who
= aclentp
->a_id
;
975 uaclp
->acl_ic_next
= *aclpp
;
981 * XXX - Make more efficient
982 * Convert from the vsecattr struct, used by the VOP interface, to
983 * the ufs_acl_entry struct used for in-core storage of acl's.
986 * vsap - Ptr to array of security attributes.
987 * spp - Ptr to ptr to si struct for the results
989 * Returns: 0 - Success
993 vsecattr2aclentry(vsecattr_t
*vsap
, si_t
**spp
)
995 aclent_t
*aclentp
, *aclp
;
1000 /* Sort & validate the lists on the vsap */
1001 ksort((caddr_t
)vsap
->vsa_aclentp
, vsap
->vsa_aclcnt
,
1002 sizeof (aclent_t
), cmp2acls
);
1003 ksort((caddr_t
)vsap
->vsa_dfaclentp
, vsap
->vsa_dfaclcnt
,
1004 sizeof (aclent_t
), cmp2acls
);
1005 if ((err
= acl_validate(vsap
->vsa_aclentp
,
1006 vsap
->vsa_aclcnt
, ACL_CHECK
)) != 0)
1008 if ((err
= acl_validate(vsap
->vsa_dfaclentp
,
1009 vsap
->vsa_dfaclcnt
, DEF_ACL_CHECK
)) != 0)
1012 /* Create new si struct and hang acl's off it */
1013 sp
= kmem_zalloc(sizeof (si_t
), KM_SLEEP
);
1014 rw_init(&sp
->s_lock
, NULL
, RW_DEFAULT
, NULL
);
1016 /* Process acl list */
1017 aclp
= (aclent_t
*)vsap
->vsa_aclentp
;
1018 aclentp
= aclp
+ vsap
->vsa_aclcnt
- 1;
1019 for (i
= 0; i
< vsap
->vsa_aclcnt
; i
++) {
1020 switch (aclentp
->a_type
) {
1021 case USER_OBJ
: /* Owner */
1022 if (err
= formacl(&sp
->aowner
, aclentp
))
1025 case GROUP_OBJ
: /* Group */
1026 if (err
= formacl(&sp
->agroup
, aclentp
))
1029 case OTHER_OBJ
: /* Other */
1030 if (err
= formacl(&sp
->aother
, aclentp
))
1034 if (err
= formacl(&sp
->ausers
, aclentp
))
1037 case CLASS_OBJ
: /* Mask */
1038 sp
->aclass
.acl_ismask
= 1;
1039 sp
->aclass
.acl_maskbits
= aclentp
->a_perm
;
1042 if (err
= formacl(&sp
->agroups
, aclentp
))
1051 /* Process default acl list */
1052 aclp
= (aclent_t
*)vsap
->vsa_dfaclentp
;
1053 aclentp
= aclp
+ vsap
->vsa_dfaclcnt
- 1;
1054 for (i
= 0; i
< vsap
->vsa_dfaclcnt
; i
++) {
1055 switch (aclentp
->a_type
) {
1056 case DEF_USER_OBJ
: /* Default Owner */
1057 if (err
= formacl(&sp
->downer
, aclentp
))
1060 case DEF_GROUP_OBJ
: /* Default Group */
1061 if (err
= formacl(&sp
->dgroup
, aclentp
))
1064 case DEF_OTHER_OBJ
: /* Default Other */
1065 if (err
= formacl(&sp
->dother
, aclentp
))
1069 if (err
= formacl(&sp
->dusers
, aclentp
))
1072 case DEF_CLASS_OBJ
: /* Default Mask */
1073 sp
->dclass
.acl_ismask
= 1;
1074 sp
->dclass
.acl_maskbits
= aclentp
->a_perm
;
1077 if (err
= formacl(&sp
->dgroups
, aclentp
))
1089 ufs_si_free_mem(sp
);
1094 formvsec(int obj_type
, ufs_ic_acl_t
*aclp
, aclent_t
**aclentpp
)
1096 for (; aclp
; aclp
= aclp
->acl_ic_next
) {
1097 (*aclentpp
)->a_type
= obj_type
;
1098 (*aclentpp
)->a_perm
= aclp
->acl_ic_perm
;
1099 (*aclentpp
)->a_id
= aclp
->acl_ic_who
;
1105 * XXX - Make more efficient
1106 * Convert from the ufs_acl_entry struct used for in-core storage of acl's
1107 * to the vsecattr struct, used by the VOP interface.
1110 * sp - Ptr to si struct with the acls
1111 * vsap - Ptr to a vsecattr struct which will take the results.
1113 * Returns: 0 - Success
1114 * N - From errno table
1117 aclentry2vsecattr(si_t
*sp
, vsecattr_t
*vsap
)
1123 vsap
->vsa_aclentp
= vsap
->vsa_dfaclentp
= NULL
;
1125 numacls
= acl_count(sp
->aowner
) +
1126 acl_count(sp
->agroup
) +
1127 acl_count(sp
->aother
) +
1128 acl_count(sp
->ausers
) +
1129 acl_count(sp
->agroups
);
1130 if (sp
->aclass
.acl_ismask
)
1133 if (vsap
->vsa_mask
& (VSA_ACLCNT
| VSA_ACL
))
1134 vsap
->vsa_aclcnt
= numacls
;
1139 if (vsap
->vsa_mask
& VSA_ACL
) {
1140 vsap
->vsa_aclentp
= kmem_zalloc(numacls
* sizeof (aclent_t
),
1142 aclentp
= vsap
->vsa_aclentp
;
1144 formvsec(USER_OBJ
, sp
->aowner
, &aclentp
);
1145 formvsec(USER
, sp
->ausers
, &aclentp
);
1146 formvsec(GROUP_OBJ
, sp
->agroup
, &aclentp
);
1147 formvsec(GROUP
, sp
->agroups
, &aclentp
);
1148 formvsec(OTHER_OBJ
, sp
->aother
, &aclentp
);
1150 if (sp
->aclass
.acl_ismask
) {
1151 aclentp
->a_type
= CLASS_OBJ
; /* Mask */
1152 aclentp
->a_perm
= sp
->aclass
.acl_maskbits
;
1157 /* Sort the acl list */
1158 ksort((caddr_t
)vsap
->vsa_aclentp
, vsap
->vsa_aclcnt
,
1159 sizeof (aclent_t
), cmp2acls
);
1160 /* Check the acl list */
1161 if ((err
= acl_validate(vsap
->vsa_aclentp
,
1162 vsap
->vsa_aclcnt
, ACL_CHECK
)) != 0) {
1163 kmem_free(vsap
->vsa_aclentp
,
1164 numacls
* sizeof (aclent_t
));
1165 vsap
->vsa_aclentp
= NULL
;
1171 /* Process Defaults */
1173 numacls
= acl_count(sp
->downer
) +
1174 acl_count(sp
->dgroup
) +
1175 acl_count(sp
->dother
) +
1176 acl_count(sp
->dusers
) +
1177 acl_count(sp
->dgroups
);
1178 if (sp
->dclass
.acl_ismask
)
1181 if (vsap
->vsa_mask
& (VSA_DFACLCNT
| VSA_DFACL
))
1182 vsap
->vsa_dfaclcnt
= numacls
;
1187 if (vsap
->vsa_mask
& VSA_DFACL
) {
1188 vsap
->vsa_dfaclentp
=
1189 kmem_zalloc(numacls
* sizeof (aclent_t
), KM_SLEEP
);
1190 aclentp
= vsap
->vsa_dfaclentp
;
1191 formvsec(DEF_USER_OBJ
, sp
->downer
, &aclentp
);
1192 formvsec(DEF_USER
, sp
->dusers
, &aclentp
);
1193 formvsec(DEF_GROUP_OBJ
, sp
->dgroup
, &aclentp
);
1194 formvsec(DEF_GROUP
, sp
->dgroups
, &aclentp
);
1195 formvsec(DEF_OTHER_OBJ
, sp
->dother
, &aclentp
);
1197 if (sp
->dclass
.acl_ismask
) {
1198 aclentp
->a_type
= DEF_CLASS_OBJ
; /* Mask */
1199 aclentp
->a_perm
= sp
->dclass
.acl_maskbits
;
1204 /* Sort the default acl list */
1205 ksort((caddr_t
)vsap
->vsa_dfaclentp
, vsap
->vsa_dfaclcnt
,
1206 sizeof (aclent_t
), cmp2acls
);
1207 if ((err
= acl_validate(vsap
->vsa_dfaclentp
,
1208 vsap
->vsa_dfaclcnt
, DEF_ACL_CHECK
)) != 0) {
1209 if (vsap
->vsa_aclentp
!= NULL
)
1210 kmem_free(vsap
->vsa_aclentp
,
1211 vsap
->vsa_aclcnt
* sizeof (aclent_t
));
1212 kmem_free(vsap
->vsa_dfaclentp
,
1213 vsap
->vsa_dfaclcnt
* sizeof (aclent_t
));
1214 vsap
->vsa_aclentp
= vsap
->vsa_dfaclentp
= NULL
;
1224 acl_free(ufs_ic_acl_t
*aclp
)
1226 while (aclp
!= NULL
) {
1227 ufs_ic_acl_t
*nextaclp
= aclp
->acl_ic_next
;
1228 kmem_free(aclp
, sizeof (ufs_ic_acl_t
));
1234 * ufs_si_free_mem will discard the sp, and the acl hanging off of the
1235 * sp. It is required that the sp not be locked, and not be in the
1238 * input: pointer to sp to discard.
1244 ufs_si_free_mem(si_t
*sp
)
1246 ASSERT(!(sp
->s_flags
& SI_CACHED
));
1247 ASSERT(!RW_LOCK_HELD(&sp
->s_lock
));
1249 * remove from the cache
1250 * free the acl entries
1252 acl_free(sp
->aowner
);
1253 acl_free(sp
->agroup
);
1254 acl_free(sp
->aother
);
1255 acl_free(sp
->ausers
);
1256 acl_free(sp
->agroups
);
1258 acl_free(sp
->downer
);
1259 acl_free(sp
->dgroup
);
1260 acl_free(sp
->dother
);
1261 acl_free(sp
->dusers
);
1262 acl_free(sp
->dgroups
);
1264 rw_destroy(&sp
->s_lock
);
1265 kmem_free(sp
, sizeof (si_t
));
1269 acl_cpy(ufs_ic_acl_t
*saclp
, ufs_ic_acl_t
*daclp
)
1271 ufs_ic_acl_t
*aclp
, *prev_aclp
= NULL
, *aclp1
;
1273 if (saclp
== NULL
) {
1279 for (aclp
= saclp
; aclp
!= NULL
; aclp
= aclp
->acl_ic_next
) {
1280 aclp1
= kmem_alloc(sizeof (ufs_ic_acl_t
), KM_SLEEP
);
1281 aclp1
->acl_ic_next
= NULL
;
1282 aclp1
->acl_ic_who
= aclp
->acl_ic_who
;
1283 aclp1
->acl_ic_perm
= aclp
->acl_ic_perm
;
1284 prev_aclp
->acl_ic_next
= aclp1
;
1285 prev_aclp
= (ufs_ic_acl_t
*)&aclp1
->acl_ic_next
;
1290 * ufs_si_inherit takes a parent acl structure (saclp) and the inode
1291 * of the object that is inheriting an acl and returns the inode
1292 * with the acl linked to it. It also writes the acl to disk if
1293 * it is a unique inode.
1295 * ip - pointer to inode of object inheriting the acl (contents lock)
1296 * tdp - parent inode (rw_lock and contents lock)
1297 * mode - creation modes
1298 * cr - credentials pointer
1301 ufs_si_inherit(struct inode
*ip
, struct inode
*tdp
, o_mode_t mode
, cred_t
*cr
)
1303 si_t
*tsp
, *sp
= tdp
->i_ufs_acl
;
1305 o_mode_t old_modes
, old_uid
, old_gid
;
1308 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
1309 ASSERT(RW_WRITE_HELD(&tdp
->i_rwlock
));
1310 ASSERT(RW_WRITE_HELD(&tdp
->i_contents
));
1313 * if links/symbolic links, or other invalid acl objects are copied
1314 * or moved to a directory with a default acl do not allow inheritance
1317 if (!CHECK_ACL_ALLOWED(ip
->i_mode
& IFMT
))
1320 /* lock the parent security information */
1321 rw_enter(&sp
->s_lock
, RW_READER
);
1323 ASSERT(((tdp
->i_mode
& IFMT
) == IFDIR
) ||
1324 ((tdp
->i_mode
& IFMT
) == IFATTRDIR
));
1326 mask
= ((sp
->downer
!= NULL
) ? 1 : 0) |
1327 ((sp
->dgroup
!= NULL
) ? 2 : 0) |
1328 ((sp
->dother
!= NULL
) ? 4 : 0);
1331 rw_exit(&sp
->s_lock
);
1336 rw_exit(&sp
->s_lock
);
1340 tsp
= kmem_zalloc(sizeof (si_t
), KM_SLEEP
);
1341 rw_init(&tsp
->s_lock
, NULL
, RW_DEFAULT
, NULL
);
1343 /* copy the default acls */
1345 ASSERT(RW_READ_HELD(&sp
->s_lock
));
1346 acl_cpy(sp
->downer
, (ufs_ic_acl_t
*)&tsp
->aowner
);
1347 acl_cpy(sp
->dgroup
, (ufs_ic_acl_t
*)&tsp
->agroup
);
1348 acl_cpy(sp
->dother
, (ufs_ic_acl_t
*)&tsp
->aother
);
1349 acl_cpy(sp
->dusers
, (ufs_ic_acl_t
*)&tsp
->ausers
);
1350 acl_cpy(sp
->dgroups
, (ufs_ic_acl_t
*)&tsp
->agroups
);
1351 tsp
->aclass
.acl_ismask
= sp
->dclass
.acl_ismask
;
1352 tsp
->aclass
.acl_maskbits
= sp
->dclass
.acl_maskbits
;
1355 * set the owner, group, and other values from the master
1359 MODE2ACL(tsp
->aowner
, (mode
>> 6), ip
->i_uid
);
1360 MODE2ACL(tsp
->agroup
, (mode
>> 3), ip
->i_gid
);
1361 MODE2ACL(tsp
->aother
, (mode
), 0);
1363 if (tsp
->aclass
.acl_ismask
) {
1364 tsp
->aclass
.acl_maskbits
&= mode
>> 3;
1368 /* copy default acl if necessary */
1370 if (((ip
->i_mode
& IFMT
) == IFDIR
) ||
1371 ((ip
->i_mode
& IFMT
) == IFATTRDIR
)) {
1372 acl_cpy(sp
->downer
, (ufs_ic_acl_t
*)&tsp
->downer
);
1373 acl_cpy(sp
->dgroup
, (ufs_ic_acl_t
*)&tsp
->dgroup
);
1374 acl_cpy(sp
->dother
, (ufs_ic_acl_t
*)&tsp
->dother
);
1375 acl_cpy(sp
->dusers
, (ufs_ic_acl_t
*)&tsp
->dusers
);
1376 acl_cpy(sp
->dgroups
, (ufs_ic_acl_t
*)&tsp
->dgroups
);
1377 tsp
->dclass
.acl_ismask
= sp
->dclass
.acl_ismask
;
1378 tsp
->dclass
.acl_maskbits
= sp
->dclass
.acl_maskbits
;
1381 * save the new 9 mode bits in the inode (ip->ic_smode) for
1382 * ufs_getattr. Be sure the mode can be recovered if the store
1385 old_modes
= ip
->i_mode
;
1386 old_uid
= ip
->i_uid
;
1387 old_gid
= ip
->i_gid
;
1389 * store the acl, and get back a new security anchor if
1390 * it is a duplicate.
1392 rw_exit(&sp
->s_lock
);
1393 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
1396 * Suppress out of inodes messages if instructed in the
1399 ip
->i_flag
|= tdp
->i_flag
& IQUIET
;
1401 if ((error
= ufs_si_store(ip
, tsp
, 0, cr
)) != 0) {
1402 ip
->i_mode
= old_modes
;
1403 ip
->i_uid
= old_uid
;
1404 ip
->i_gid
= old_gid
;
1406 ip
->i_flag
&= ~IQUIET
;
1407 rw_exit(&ip
->i_rwlock
);
1412 ufs_acl_cp(si_t
*sp
)
1417 ASSERT(RW_READ_HELD(&sp
->s_lock
));
1418 ASSERT(sp
->s_ref
&& sp
->s_use
);
1420 dsp
= kmem_zalloc(sizeof (si_t
), KM_SLEEP
);
1421 rw_init(&dsp
->s_lock
, NULL
, RW_DEFAULT
, NULL
);
1423 acl_cpy(sp
->aowner
, (ufs_ic_acl_t
*)&dsp
->aowner
);
1424 acl_cpy(sp
->agroup
, (ufs_ic_acl_t
*)&dsp
->agroup
);
1425 acl_cpy(sp
->aother
, (ufs_ic_acl_t
*)&dsp
->aother
);
1426 acl_cpy(sp
->ausers
, (ufs_ic_acl_t
*)&dsp
->ausers
);
1427 acl_cpy(sp
->agroups
, (ufs_ic_acl_t
*)&dsp
->agroups
);
1429 dsp
->aclass
.acl_ismask
= sp
->aclass
.acl_ismask
;
1430 dsp
->aclass
.acl_maskbits
= sp
->aclass
.acl_maskbits
;
1432 acl_cpy(sp
->downer
, (ufs_ic_acl_t
*)&dsp
->downer
);
1433 acl_cpy(sp
->dgroup
, (ufs_ic_acl_t
*)&dsp
->dgroup
);
1434 acl_cpy(sp
->dother
, (ufs_ic_acl_t
*)&dsp
->dother
);
1435 acl_cpy(sp
->dusers
, (ufs_ic_acl_t
*)&dsp
->dusers
);
1436 acl_cpy(sp
->dgroups
, (ufs_ic_acl_t
*)&dsp
->dgroups
);
1438 dsp
->dclass
.acl_ismask
= sp
->dclass
.acl_ismask
;
1439 dsp
->dclass
.acl_maskbits
= sp
->dclass
.acl_maskbits
;
1446 ufs_acl_setattr(struct inode
*ip
, struct vattr
*vap
, cred_t
*cr
)
1450 int mask
= vap
->va_mask
;
1453 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
1455 if (!(mask
& (AT_MODE
|AT_UID
|AT_GID
)))
1459 * if no regular acl's, nothing to do, so let's get out
1461 if (!(ip
->i_ufs_acl
) || !(ip
->i_ufs_acl
->aowner
))
1464 rw_enter(&ip
->i_ufs_acl
->s_lock
, RW_READER
);
1465 sp
= ufs_acl_cp(ip
->i_ufs_acl
);
1466 ASSERT(sp
!= ip
->i_ufs_acl
);
1469 * set the mask to the group permissions if a mask entry
1470 * exists. Otherwise, set the group obj bits to the group
1471 * permissions. Since non-trivial ACLs always have a mask,
1472 * and the mask is the final arbiter of group permissions,
1473 * setting the mask has the effect of changing the effective
1474 * group permissions, even if the group_obj permissions in
1475 * the ACL aren't changed. Posix P1003.1e states that when
1476 * an ACL mask exists, chmod(2) must set the acl mask (NOT the
1477 * group_obj permissions) to the requested group permissions.
1479 if (mask
& AT_MODE
) {
1480 sp
->aowner
->acl_ic_perm
= (o_mode_t
)(ip
->i_mode
& 0700) >> 6;
1481 if (sp
->aclass
.acl_ismask
)
1482 sp
->aclass
.acl_maskbits
=
1483 (o_mode_t
)(ip
->i_mode
& 070) >> 3;
1485 sp
->agroup
->acl_ic_perm
=
1486 (o_mode_t
)(ip
->i_mode
& 070) >> 3;
1487 sp
->aother
->acl_ic_perm
= (o_mode_t
)(ip
->i_mode
& 07);
1490 if (mask
& AT_UID
) {
1491 /* Caller has verified our privileges */
1492 sp
->aowner
->acl_ic_who
= ip
->i_uid
;
1495 if (mask
& AT_GID
) {
1496 sp
->agroup
->acl_ic_who
= ip
->i_gid
;
1499 rw_exit(&ip
->i_ufs_acl
->s_lock
);
1500 error
= ufs_si_store(ip
, sp
, 0, cr
);
1505 acl_count(ufs_ic_acl_t
*p
)
1510 for (count
= 0, acl
= p
; acl
; acl
= acl
->acl_ic_next
, count
++)
1516 * Takes as input a security structure and generates a buffer
1517 * with fsd's in a form which be written to the shadow inode.
1520 ufs_sectobuf(si_t
*sp
, caddr_t
*buf
, size_t *len
)
1523 size_t def_acl_size
;
1525 struct ufs_fsd
*fsdp
;
1529 * Calc size of buffer to hold all the acls
1531 acl_size
= acl_count(sp
->aowner
) + /* owner */
1532 acl_count(sp
->agroup
) + /* owner group */
1533 acl_count(sp
->aother
) + /* owner other */
1534 acl_count(sp
->ausers
) + /* acl list */
1535 acl_count(sp
->agroups
); /* group alcs */
1536 if (sp
->aclass
.acl_ismask
)
1539 /* Convert to bytes */
1540 acl_size
*= sizeof (ufs_acl_t
);
1542 /* Add fsd header */
1544 acl_size
+= 2 * sizeof (int);
1547 * Calc size of buffer to hold all the default acls
1550 acl_count(sp
->downer
) + /* def owner */
1551 acl_count(sp
->dgroup
) + /* def owner group */
1552 acl_count(sp
->dother
) + /* def owner other */
1553 acl_count(sp
->dusers
) + /* def users */
1554 acl_count(sp
->dgroups
); /* def group acls */
1555 if (sp
->dclass
.acl_ismask
)
1561 def_acl_size
*= sizeof (ufs_acl_t
);
1567 def_acl_size
+= 2 * sizeof (int);
1569 if (acl_size
+ def_acl_size
== 0)
1572 buffer
= kmem_zalloc((acl_size
+ def_acl_size
), KM_SLEEP
);
1573 bufaclp
= (ufs_acl_t
*)buffer
;
1578 /* create fsd and copy acls */
1579 fsdp
= (struct ufs_fsd
*)bufaclp
;
1580 fsdp
->fsd_type
= FSD_ACL
;
1581 bufaclp
= (ufs_acl_t
*)&fsdp
->fsd_data
[0];
1583 ACL_MOVE(sp
->aowner
, USER_OBJ
, bufaclp
);
1584 ACL_MOVE(sp
->agroup
, GROUP_OBJ
, bufaclp
);
1585 ACL_MOVE(sp
->aother
, OTHER_OBJ
, bufaclp
);
1586 ACL_MOVE(sp
->ausers
, USER
, bufaclp
);
1587 ACL_MOVE(sp
->agroups
, GROUP
, bufaclp
);
1589 if (sp
->aclass
.acl_ismask
) {
1590 bufaclp
->acl_tag
= CLASS_OBJ
;
1591 bufaclp
->acl_who
= (uid_t
)sp
->aclass
.acl_ismask
;
1592 bufaclp
->acl_perm
= (o_mode_t
)sp
->aclass
.acl_maskbits
;
1595 ASSERT(acl_size
<= INT_MAX
);
1596 fsdp
->fsd_size
= (int)acl_size
;
1599 if (def_acl_size
== 0)
1602 /* if defaults exist then create fsd and copy default acls */
1603 fsdp
= (struct ufs_fsd
*)bufaclp
;
1604 fsdp
->fsd_type
= FSD_DFACL
;
1605 bufaclp
= (ufs_acl_t
*)&fsdp
->fsd_data
[0];
1607 ACL_MOVE(sp
->downer
, DEF_USER_OBJ
, bufaclp
);
1608 ACL_MOVE(sp
->dgroup
, DEF_GROUP_OBJ
, bufaclp
);
1609 ACL_MOVE(sp
->dother
, DEF_OTHER_OBJ
, bufaclp
);
1610 ACL_MOVE(sp
->dusers
, DEF_USER
, bufaclp
);
1611 ACL_MOVE(sp
->dgroups
, DEF_GROUP
, bufaclp
);
1612 if (sp
->dclass
.acl_ismask
) {
1613 bufaclp
->acl_tag
= DEF_CLASS_OBJ
;
1614 bufaclp
->acl_who
= (uid_t
)sp
->dclass
.acl_ismask
;
1615 bufaclp
->acl_perm
= (o_mode_t
)sp
->dclass
.acl_maskbits
;
1618 ASSERT(def_acl_size
<= INT_MAX
);
1619 fsdp
->fsd_size
= (int)def_acl_size
;
1623 *len
= acl_size
+ def_acl_size
;
1629 * free a shadow inode on disk and in memory
1632 ufs_si_free(si_t
*sp
, struct vfs
*vfsp
, cred_t
*cr
)
1643 rw_enter(&sp
->s_lock
, RW_READER
);
1644 ASSERT(sp
->s_shadow
<= INT_MAX
);
1645 shadow
= (int)sp
->s_shadow
;
1647 rw_exit(&sp
->s_lock
);
1650 * Decrement link count on the shadow inode,
1651 * and decrement reference count on the sip.
1653 if ((err
= ufs_iget_alloced(vfsp
, shadow
, &sip
, cr
)) == 0) {
1654 rw_enter(&sip
->i_contents
, RW_WRITER
);
1655 rw_enter(&sp
->s_lock
, RW_WRITER
);
1656 ASSERT(sp
->s_shadow
== shadow
);
1657 ASSERT(sip
->i_dquot
== 0);
1658 /* Decrement link count */
1659 ASSERT(sip
->i_nlink
> 0);
1661 * bug #1264710 assertion failure below
1663 sp
->s_use
= --sip
->i_nlink
;
1664 ufs_setreclaim(sip
);
1665 TRANS_INODE(sip
->i_ufsvfs
, sip
);
1666 sip
->i_flag
|= ICHG
| IMOD
;
1669 /* Dec ref counts on si referenced by this ip */
1670 refcnt
= --sp
->s_ref
;
1671 signature
= sp
->s_signature
;
1672 ASSERT(sp
->s_ref
>= 0 && sp
->s_ref
<= sp
->s_use
);
1674 * Release s_lock before calling VN_RELE
1675 * (which may want to acquire i_contents).
1677 rw_exit(&sp
->s_lock
);
1678 rw_exit(&sip
->i_contents
);
1681 rw_enter(&sp
->s_lock
, RW_WRITER
);
1682 /* Dec ref counts on si referenced by this ip */
1683 refcnt
= --sp
->s_ref
;
1684 signature
= sp
->s_signature
;
1685 ASSERT(sp
->s_ref
>= 0 && sp
->s_ref
<= sp
->s_use
);
1686 rw_exit(&sp
->s_lock
);
1690 si_cache_del(sp
, signature
);
1695 * Seach the si cache for an si structure by inode #.
1696 * Returns a locked si structure.
1699 * ip - Ptr to an inode on this fs
1700 * spp - Ptr to ptr to si struct for the results, if found.
1702 * Returns: 0 - Success (results in spp)
1703 * 1 - Failure (spp undefined)
1706 si_cachei_get(struct inode
*ip
, si_t
**spp
)
1710 rw_enter(&si_cache_lock
, RW_READER
);
1712 for (sp
= si_cachei
[SI_HASH(ip
->i_shadow
)]; sp
; sp
= sp
->s_forw
)
1713 if (sp
->s_shadow
== ip
->i_shadow
&& sp
->s_dev
== ip
->i_dev
)
1718 rw_exit(&si_cache_lock
);
1722 rw_enter(&sp
->s_lock
, RW_WRITER
);
1724 rw_exit(&si_cache_lock
);
1730 * Seach the si cache by si structure (ie duplicate of the one passed in).
1731 * In order for a match the signatures must be the same and
1732 * the devices must be the same, the acls must match and
1733 * link count of the cached shadow must be less than the
1734 * size of ic_nlink - 1. MAXLINK - 1 is used to allow the count
1735 * to be incremented one more time by the caller.
1736 * Returns a locked si structure.
1739 * ip - Ptr to an inode on this fs
1740 * spi - Ptr to si the struct we're searching the cache for.
1741 * spp - Ptr to ptr to si struct for the results, if found.
1743 * Returns: 0 - Success (results in spp)
1744 * 1 - Failure (spp undefined)
1747 si_cachea_get(struct inode
*ip
, si_t
*spi
, si_t
**spp
)
1751 spi
->s_dev
= ip
->i_dev
;
1752 spi
->s_signature
= si_signature(spi
);
1753 rw_enter(&si_cache_lock
, RW_READER
);
1755 for (sp
= si_cachea
[SI_HASH(spi
->s_signature
)]; sp
; sp
= sp
->s_next
) {
1756 if (sp
->s_signature
== spi
->s_signature
&&
1757 sp
->s_dev
== spi
->s_dev
&&
1758 sp
->s_use
> 0 && /* deleting */
1759 sp
->s_use
<= (MAXLINK
- 1) && /* Too many links */
1766 rw_exit(&si_cache_lock
);
1770 rw_enter(&sp
->s_lock
, RW_WRITER
);
1772 spi
->s_shadow
= sp
->s_shadow
; /* XXX For debugging */
1773 rw_exit(&si_cache_lock
);
1779 * Place an si structure in the si cache. May cause duplicates.
1782 * sp - Ptr to the si struct to add to the cache.
1784 * Returns: Nothing (void)
1787 si_cache_put(si_t
*sp
)
1791 ASSERT(sp
->s_fore
== NULL
);
1792 rw_enter(&si_cache_lock
, RW_WRITER
);
1793 if (!sp
->s_signature
)
1794 sp
->s_signature
= si_signature(sp
);
1795 sp
->s_flags
|= SI_CACHED
;
1798 /* The 'by acl' chains */
1799 tspp
= &si_cachea
[SI_HASH(sp
->s_signature
)];
1803 /* The 'by inode' chains */
1804 tspp
= &si_cachei
[SI_HASH(sp
->s_shadow
)];
1808 rw_exit(&si_cache_lock
);
1812 * The sp passed in is a candidate for deletion from the cache. We acquire
1813 * the cache lock first, so no cache searches can be done. Then we search
1814 * for the acl in the cache, and if we find it we can lock it and check that
1815 * nobody else attached to it while we were acquiring the locks. If the acl
1816 * is in the cache and still has a zero reference count, then we remove it
1817 * from the cache and deallocate it. If the reference count is non-zero or
1818 * it is not found in the cache, then someone else attached to it or has
1819 * already freed it, so we just return.
1822 * sp - Ptr to the sp struct which is the candicate for deletion.
1823 * signature - the signature for the acl for lookup in the hash table
1825 * Returns: Nothing (void)
1828 si_cache_del(si_t
*sp
, int signature
)
1835 * Unlink & free the sp from the other queues, then destroy it.
1836 * Search the 'by acl' chain first, then the 'by inode' chain
1837 * after the acl is locked.
1839 rw_enter(&si_cache_lock
, RW_WRITER
);
1840 hash
= SI_HASH(signature
);
1841 for (tspp
= &si_cachea
[hash
]; *tspp
; tspp
= &(*tspp
)->s_next
) {
1844 * Wait to grab the acl lock until after the acl has
1845 * been found in the cache. Otherwise it might try to
1846 * grab a lock that has already been destroyed, or
1847 * delete an acl that has already been freed.
1849 rw_enter(&sp
->s_lock
, RW_WRITER
);
1850 /* See if someone else attached to it */
1852 rw_exit(&sp
->s_lock
);
1853 rw_exit(&si_cache_lock
);
1856 ASSERT(sp
->s_fore
== NULL
);
1857 ASSERT(sp
->s_flags
& SI_CACHED
);
1865 * If the acl was not in the cache, we assume another thread has
1866 * deleted it already. This could happen if another thread attaches to
1867 * the acl and then releases it after this thread has already found the
1868 * reference count to be zero but has not yet taken the cache lock.
1869 * Both threads end up seeing a reference count of zero, and call into
1870 * si_cache_del. See bug 4244827 for details on the race condition.
1872 if (foundacl
== 0) {
1873 rw_exit(&si_cache_lock
);
1877 /* Now check the 'by inode' chain */
1878 hash
= SI_HASH(sp
->s_shadow
);
1879 for (tspp
= &si_cachei
[hash
]; *tspp
; tspp
= &(*tspp
)->s_forw
) {
1887 * At this point, we can unlock everything because this si
1888 * is no longer in the cache, thus cannot be attached to.
1890 rw_exit(&sp
->s_lock
);
1891 rw_exit(&si_cache_lock
);
1892 sp
->s_flags
&= ~SI_CACHED
;
1893 (void) ufs_si_free_mem(sp
);
1897 * Alloc the hash buckets for the si cache & initialize
1898 * the unreferenced anchor and the cache lock.
1903 rw_init(&si_cache_lock
, NULL
, RW_DEFAULT
, NULL
);
1905 /* The 'by acl' headers */
1906 si_cachea
= kmem_zalloc(si_cachecnt
* sizeof (si_t
*), KM_SLEEP
);
1907 /* The 'by inode' headers */
1908 si_cachei
= kmem_zalloc(si_cachecnt
* sizeof (si_t
*), KM_SLEEP
);
1912 * aclcksum takes an acl and generates a checksum. It takes as input
1913 * the acl to start at.
1915 * s_aclp - pointer to starting acl
1920 aclcksum(ufs_ic_acl_t
*s_aclp
)
1924 for (aclp
= s_aclp
; aclp
; aclp
= aclp
->acl_ic_next
) {
1925 signature
+= aclp
->acl_ic_perm
;
1926 signature
+= aclp
->acl_ic_who
;
1932 * Generate a unique signature for an si structure. Used by the
1933 * search routine si_cachea_get() to quickly identify candidates
1934 * prior to calling si_cmp().
1936 * sp - Ptr to the si struct to generate the signature for.
1938 * Returns: A signature for the si struct (really a checksum)
1941 si_signature(si_t
*sp
)
1943 int signature
= sp
->s_dev
;
1945 signature
+= aclcksum(sp
->aowner
) + aclcksum(sp
->agroup
) +
1946 aclcksum(sp
->aother
) + aclcksum(sp
->ausers
) +
1947 aclcksum(sp
->agroups
) + aclcksum(sp
->downer
) +
1948 aclcksum(sp
->dgroup
) + aclcksum(sp
->dother
) +
1949 aclcksum(sp
->dusers
) + aclcksum(sp
->dgroups
);
1950 if (sp
->aclass
.acl_ismask
)
1951 signature
+= sp
->aclass
.acl_maskbits
;
1952 if (sp
->dclass
.acl_ismask
)
1953 signature
+= sp
->dclass
.acl_maskbits
;
1959 * aclcmp compares to acls to see if they are identical.
1964 * returns 0 if equal and 1 if not equal
1967 aclcmp(ufs_ic_acl_t
*aclin1p
, ufs_ic_acl_t
*aclin2p
)
1969 ufs_ic_acl_t
*aclp1
;
1970 ufs_ic_acl_t
*aclp2
;
1973 * if the starting pointers are equal then they are equal so
1976 if (aclin1p
== aclin2p
)
1979 * check element by element
1981 for (aclp1
= aclin1p
, aclp2
= aclin2p
; aclp1
&& aclp2
;
1982 aclp1
= aclp1
->acl_ic_next
, aclp2
= aclp2
->acl_ic_next
) {
1983 if (aclp1
->acl_ic_perm
!= aclp2
->acl_ic_perm
||
1984 aclp1
->acl_ic_who
!= aclp2
->acl_ic_who
)
1988 * both must be zero (at the end of the acl)
1997 * Do extensive, field-by-field compare of two si structures. Returns
1998 * 0 if they are exactly identical, 1 otherwise.
2001 * sp1 - Ptr to 1st si struct
2002 * sp2 - Ptr to 2nd si struct
2009 si_cmp(si_t
*sp1
, si_t
*sp2
)
2011 if (sp1
->s_dev
!= sp2
->s_dev
)
2013 if (aclcmp(sp1
->aowner
, sp2
->aowner
) ||
2014 aclcmp(sp1
->agroup
, sp2
->agroup
) ||
2015 aclcmp(sp1
->aother
, sp2
->aother
) ||
2016 aclcmp(sp1
->ausers
, sp2
->ausers
) ||
2017 aclcmp(sp1
->agroups
, sp2
->agroups
) ||
2018 aclcmp(sp1
->downer
, sp2
->downer
) ||
2019 aclcmp(sp1
->dgroup
, sp2
->dgroup
) ||
2020 aclcmp(sp1
->dother
, sp2
->dother
) ||
2021 aclcmp(sp1
->dusers
, sp2
->dusers
) ||
2022 aclcmp(sp1
->dgroups
, sp2
->dgroups
))
2024 if (sp1
->aclass
.acl_ismask
!= sp2
->aclass
.acl_ismask
)
2026 if (sp1
->dclass
.acl_ismask
!= sp2
->dclass
.acl_ismask
)
2028 if (sp1
->aclass
.acl_ismask
&&
2029 sp1
->aclass
.acl_maskbits
!= sp2
->aclass
.acl_maskbits
)
2031 if (sp1
->dclass
.acl_ismask
&&
2032 sp1
->dclass
.acl_maskbits
!= sp2
->dclass
.acl_maskbits
)
2039 * Remove all acls associated with a device. All acls must have
2040 * a reference count of zero.
2043 * device - device to remove from the cache
2049 ufs_si_cache_flush(dev_t dev
)
2054 rw_enter(&si_cache_lock
, RW_WRITER
);
2055 for (i
= 0; i
< si_cachecnt
; i
++) {
2056 tspp
= &si_cachea
[i
];
2058 if ((*tspp
)->s_dev
== dev
) {
2059 *tspp
= (*tspp
)->s_next
;
2061 tspp
= &(*tspp
)->s_next
;
2065 for (i
= 0; i
< si_cachecnt
; i
++) {
2066 tspp
= &si_cachei
[i
];
2068 if ((*tspp
)->s_dev
== dev
) {
2070 *tspp
= (*tspp
)->s_forw
;
2071 tsp
->s_flags
&= ~SI_CACHED
;
2072 ufs_si_free_mem(tsp
);
2074 tspp
= &(*tspp
)->s_forw
;
2078 rw_exit(&si_cache_lock
);
2082 * ufs_si_del is used to unhook a sp from a inode in memory
2084 * ip is the inode to remove the sp from.
2087 ufs_si_del(struct inode
*ip
)
2089 si_t
*sp
= ip
->i_ufs_acl
;
2094 rw_enter(&sp
->s_lock
, RW_WRITER
);
2095 refcnt
= --sp
->s_ref
;
2096 signature
= sp
->s_signature
;
2097 ASSERT(sp
->s_ref
>= 0 && sp
->s_ref
<= sp
->s_use
);
2098 rw_exit(&sp
->s_lock
);
2100 si_cache_del(sp
, signature
);
2101 ip
->i_ufs_acl
= NULL
;