4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone
= {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers
= {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
43 static const struct cifs_sid sid_user
= {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred
*root_cred
;
48 shrink_idmap_tree(struct rb_root
*root
, int nr_to_scan
, int *nr_rem
,
53 struct cifs_sid_id
*psidid
;
55 node
= rb_first(root
);
59 psidid
= rb_entry(tmp
, struct cifs_sid_id
, rbnode
);
60 if (nr_to_scan
== 0 || *nr_del
== nr_to_scan
)
63 if (time_after(jiffies
, psidid
->time
+ SID_MAP_EXPIRE
)
64 && psidid
->refcount
== 0) {
74 * Run idmap cache shrinker.
77 cifs_idmap_shrinker(struct shrinker
*shrink
, struct shrink_control
*sc
)
79 int nr_to_scan
= sc
->nr_to_scan
;
85 spin_lock(&siduidlock
);
86 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
87 spin_unlock(&siduidlock
);
90 spin_lock(&sidgidlock
);
91 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
92 spin_unlock(&sidgidlock
);
95 spin_lock(&uidsidlock
);
96 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
97 spin_unlock(&uidsidlock
);
100 spin_lock(&gidsidlock
);
101 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
102 spin_unlock(&gidsidlock
);
108 sid_rb_insert(struct rb_root
*root
, unsigned long cid
,
109 struct cifs_sid_id
**psidid
, char *typestr
)
112 struct rb_node
*node
= root
->rb_node
;
113 struct rb_node
*parent
= NULL
;
114 struct rb_node
**linkto
= &(root
->rb_node
);
115 struct cifs_sid_id
*lsidid
;
118 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
120 if (cid
> lsidid
->id
) {
121 linkto
= &(node
->rb_left
);
122 node
= node
->rb_left
;
124 if (cid
< lsidid
->id
) {
125 linkto
= &(node
->rb_right
);
126 node
= node
->rb_right
;
131 (*psidid
)->time
= jiffies
- (SID_MAP_RETRY
+ 1);
132 (*psidid
)->refcount
= 0;
134 sprintf((*psidid
)->sidstr
, "%s", typestr
);
135 strptr
= (*psidid
)->sidstr
+ strlen((*psidid
)->sidstr
);
136 sprintf(strptr
, "%ld", cid
);
138 clear_bit(SID_ID_PENDING
, &(*psidid
)->state
);
139 clear_bit(SID_ID_MAPPED
, &(*psidid
)->state
);
141 rb_link_node(&(*psidid
)->rbnode
, parent
, linkto
);
142 rb_insert_color(&(*psidid
)->rbnode
, root
);
145 static struct cifs_sid_id
*
146 sid_rb_search(struct rb_root
*root
, unsigned long cid
)
148 struct rb_node
*node
= root
->rb_node
;
149 struct cifs_sid_id
*lsidid
;
152 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
153 if (cid
> lsidid
->id
)
154 node
= node
->rb_left
;
155 else if (cid
< lsidid
->id
)
156 node
= node
->rb_right
;
157 else /* node found */
164 static struct shrinker cifs_shrinker
= {
165 .shrink
= cifs_idmap_shrinker
,
166 .seeks
= DEFAULT_SEEKS
,
170 cifs_idmap_key_instantiate(struct key
*key
, const void *data
, size_t datalen
)
174 payload
= kmalloc(datalen
, GFP_KERNEL
);
178 memcpy(payload
, data
, datalen
);
179 key
->payload
.data
= payload
;
180 key
->datalen
= datalen
;
185 cifs_idmap_key_destroy(struct key
*key
)
187 kfree(key
->payload
.data
);
190 struct key_type cifs_idmap_key_type
= {
191 .name
= "cifs.idmap",
192 .instantiate
= cifs_idmap_key_instantiate
,
193 .destroy
= cifs_idmap_key_destroy
,
194 .describe
= user_describe
,
199 sid_to_str(struct cifs_sid
*sidptr
, char *sidstr
)
207 sprintf(strptr
, "%s", "S");
208 strptr
= sidstr
+ strlen(sidstr
);
210 sprintf(strptr
, "-%d", sidptr
->revision
);
211 strptr
= sidstr
+ strlen(sidstr
);
213 for (i
= 0; i
< 6; ++i
) {
214 if (sidptr
->authority
[i
]) {
215 sprintf(strptr
, "-%d", sidptr
->authority
[i
]);
216 strptr
= sidstr
+ strlen(sidstr
);
220 for (i
= 0; i
< sidptr
->num_subauth
; ++i
) {
221 saval
= le32_to_cpu(sidptr
->sub_auth
[i
]);
222 sprintf(strptr
, "-%ld", saval
);
223 strptr
= sidstr
+ strlen(sidstr
);
228 id_rb_insert(struct rb_root
*root
, struct cifs_sid
*sidptr
,
229 struct cifs_sid_id
**psidid
, char *typestr
)
233 struct rb_node
*node
= root
->rb_node
;
234 struct rb_node
*parent
= NULL
;
235 struct rb_node
**linkto
= &(root
->rb_node
);
236 struct cifs_sid_id
*lsidid
;
239 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
241 rc
= compare_sids(sidptr
, &((lsidid
)->sid
));
243 linkto
= &(node
->rb_left
);
244 node
= node
->rb_left
;
246 linkto
= &(node
->rb_right
);
247 node
= node
->rb_right
;
251 memcpy(&(*psidid
)->sid
, sidptr
, sizeof(struct cifs_sid
));
252 (*psidid
)->time
= jiffies
- (SID_MAP_RETRY
+ 1);
253 (*psidid
)->refcount
= 0;
255 sprintf((*psidid
)->sidstr
, "%s", typestr
);
256 strptr
= (*psidid
)->sidstr
+ strlen((*psidid
)->sidstr
);
257 sid_to_str(&(*psidid
)->sid
, strptr
);
259 clear_bit(SID_ID_PENDING
, &(*psidid
)->state
);
260 clear_bit(SID_ID_MAPPED
, &(*psidid
)->state
);
262 rb_link_node(&(*psidid
)->rbnode
, parent
, linkto
);
263 rb_insert_color(&(*psidid
)->rbnode
, root
);
266 static struct cifs_sid_id
*
267 id_rb_search(struct rb_root
*root
, struct cifs_sid
*sidptr
)
270 struct rb_node
*node
= root
->rb_node
;
271 struct cifs_sid_id
*lsidid
;
274 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
275 rc
= compare_sids(sidptr
, &((lsidid
)->sid
));
277 node
= node
->rb_left
;
279 node
= node
->rb_right
;
280 } else /* node found */
288 sidid_pending_wait(void *unused
)
291 return signal_pending(current
) ? -ERESTARTSYS
: 0;
295 id_to_sid(unsigned long cid
, uint sidtype
, struct cifs_sid
*ssid
)
299 const struct cred
*saved_cred
;
300 struct cifs_sid
*lsid
;
301 struct cifs_sid_id
*psidid
, *npsidid
;
302 struct rb_root
*cidtree
;
305 if (sidtype
== SIDOWNER
) {
306 cidlock
= &siduidlock
;
308 } else if (sidtype
== SIDGROUP
) {
309 cidlock
= &sidgidlock
;
315 psidid
= sid_rb_search(cidtree
, cid
);
317 if (!psidid
) { /* node does not exist, allocate one & attempt adding */
318 spin_unlock(cidlock
);
319 npsidid
= kzalloc(sizeof(struct cifs_sid_id
), GFP_KERNEL
);
323 npsidid
->sidstr
= kmalloc(SIDLEN
, GFP_KERNEL
);
324 if (!npsidid
->sidstr
) {
330 psidid
= sid_rb_search(cidtree
, cid
);
331 if (psidid
) { /* node happened to get inserted meanwhile */
333 spin_unlock(cidlock
);
334 kfree(npsidid
->sidstr
);
338 sid_rb_insert(cidtree
, cid
, &psidid
,
339 sidtype
== SIDOWNER
? "oi:" : "gi:");
341 spin_unlock(cidlock
);
345 spin_unlock(cidlock
);
349 * If we are here, it is safe to access psidid and its fields
350 * since a reference was taken earlier while holding the spinlock.
351 * A reference on the node is put without holding the spinlock
352 * and it is OK to do so in this case, shrinker will not erase
353 * this node until all references are put and we do not access
354 * any fields of the node after a reference is put .
356 if (test_bit(SID_ID_MAPPED
, &psidid
->state
)) {
357 memcpy(ssid
, &psidid
->sid
, sizeof(struct cifs_sid
));
358 psidid
->time
= jiffies
; /* update ts for accessing */
362 if (time_after(psidid
->time
+ SID_MAP_RETRY
, jiffies
)) {
367 if (!test_and_set_bit(SID_ID_PENDING
, &psidid
->state
)) {
368 saved_cred
= override_creds(root_cred
);
369 sidkey
= request_key(&cifs_idmap_key_type
, psidid
->sidstr
, "");
370 if (IS_ERR(sidkey
)) {
372 cFYI(1, "%s: Can't map and id to a SID", __func__
);
374 lsid
= (struct cifs_sid
*)sidkey
->payload
.data
;
375 memcpy(&psidid
->sid
, lsid
,
376 sidkey
->datalen
< sizeof(struct cifs_sid
) ?
377 sidkey
->datalen
: sizeof(struct cifs_sid
));
378 memcpy(ssid
, &psidid
->sid
,
379 sidkey
->datalen
< sizeof(struct cifs_sid
) ?
380 sidkey
->datalen
: sizeof(struct cifs_sid
));
381 set_bit(SID_ID_MAPPED
, &psidid
->state
);
383 kfree(psidid
->sidstr
);
385 psidid
->time
= jiffies
; /* update ts for accessing */
386 revert_creds(saved_cred
);
387 clear_bit(SID_ID_PENDING
, &psidid
->state
);
388 wake_up_bit(&psidid
->state
, SID_ID_PENDING
);
390 rc
= wait_on_bit(&psidid
->state
, SID_ID_PENDING
,
391 sidid_pending_wait
, TASK_INTERRUPTIBLE
);
393 cFYI(1, "%s: sidid_pending_wait interrupted %d",
398 if (test_bit(SID_ID_MAPPED
, &psidid
->state
))
399 memcpy(ssid
, &psidid
->sid
, sizeof(struct cifs_sid
));
409 sid_to_id(struct cifs_sb_info
*cifs_sb
, struct cifs_sid
*psid
,
410 struct cifs_fattr
*fattr
, uint sidtype
)
415 const struct cred
*saved_cred
;
416 struct cifs_sid_id
*psidid
, *npsidid
;
417 struct rb_root
*cidtree
;
420 if (sidtype
== SIDOWNER
) {
421 cid
= cifs_sb
->mnt_uid
; /* default uid, in case upcall fails */
422 cidlock
= &siduidlock
;
424 } else if (sidtype
== SIDGROUP
) {
425 cid
= cifs_sb
->mnt_gid
; /* default gid, in case upcall fails */
426 cidlock
= &sidgidlock
;
432 psidid
= id_rb_search(cidtree
, psid
);
434 if (!psidid
) { /* node does not exist, allocate one & attempt adding */
435 spin_unlock(cidlock
);
436 npsidid
= kzalloc(sizeof(struct cifs_sid_id
), GFP_KERNEL
);
440 npsidid
->sidstr
= kmalloc(SIDLEN
, GFP_KERNEL
);
441 if (!npsidid
->sidstr
) {
447 psidid
= id_rb_search(cidtree
, psid
);
448 if (psidid
) { /* node happened to get inserted meanwhile */
450 spin_unlock(cidlock
);
451 kfree(npsidid
->sidstr
);
455 id_rb_insert(cidtree
, psid
, &psidid
,
456 sidtype
== SIDOWNER
? "os:" : "gs:");
458 spin_unlock(cidlock
);
462 spin_unlock(cidlock
);
466 * If we are here, it is safe to access psidid and its fields
467 * since a reference was taken earlier while holding the spinlock.
468 * A reference on the node is put without holding the spinlock
469 * and it is OK to do so in this case, shrinker will not erase
470 * this node until all references are put and we do not access
471 * any fields of the node after a reference is put .
473 if (test_bit(SID_ID_MAPPED
, &psidid
->state
)) {
475 psidid
->time
= jiffies
; /* update ts for accessing */
479 if (time_after(psidid
->time
+ SID_MAP_RETRY
, jiffies
))
482 if (!test_and_set_bit(SID_ID_PENDING
, &psidid
->state
)) {
483 saved_cred
= override_creds(root_cred
);
484 idkey
= request_key(&cifs_idmap_key_type
, psidid
->sidstr
, "");
486 cFYI(1, "%s: Can't map SID to an id", __func__
);
488 cid
= *(unsigned long *)idkey
->payload
.value
;
490 set_bit(SID_ID_MAPPED
, &psidid
->state
);
492 kfree(psidid
->sidstr
);
494 revert_creds(saved_cred
);
495 psidid
->time
= jiffies
; /* update ts for accessing */
496 clear_bit(SID_ID_PENDING
, &psidid
->state
);
497 wake_up_bit(&psidid
->state
, SID_ID_PENDING
);
499 rc
= wait_on_bit(&psidid
->state
, SID_ID_PENDING
,
500 sidid_pending_wait
, TASK_INTERRUPTIBLE
);
502 cFYI(1, "%s: sidid_pending_wait interrupted %d",
504 --psidid
->refcount
; /* decremented without spinlock */
507 if (test_bit(SID_ID_MAPPED
, &psidid
->state
))
512 --psidid
->refcount
; /* decremented without spinlock */
513 if (sidtype
== SIDOWNER
)
522 init_cifs_idmap(void)
528 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type
.name
);
530 /* create an override credential set with a special thread keyring in
531 * which requests are cached
533 * this is used to prevent malicious redirections from being installed
536 cred
= prepare_kernel_cred(NULL
);
540 keyring
= key_alloc(&key_type_keyring
, ".cifs_idmap", 0, 0, cred
,
541 (KEY_POS_ALL
& ~KEY_POS_SETATTR
) |
542 KEY_USR_VIEW
| KEY_USR_READ
,
543 KEY_ALLOC_NOT_IN_QUOTA
);
544 if (IS_ERR(keyring
)) {
545 ret
= PTR_ERR(keyring
);
546 goto failed_put_cred
;
549 ret
= key_instantiate_and_link(keyring
, NULL
, 0, NULL
, NULL
);
553 ret
= register_key_type(&cifs_idmap_key_type
);
557 /* instruct request_key() to use this special keyring as a cache for
558 * the results it looks up */
559 cred
->thread_keyring
= keyring
;
560 cred
->jit_keyring
= KEY_REQKEY_DEFL_THREAD_KEYRING
;
563 spin_lock_init(&siduidlock
);
565 spin_lock_init(&sidgidlock
);
568 spin_lock_init(&uidsidlock
);
569 siduidtree
= RB_ROOT
;
570 spin_lock_init(&gidsidlock
);
571 sidgidtree
= RB_ROOT
;
572 register_shrinker(&cifs_shrinker
);
574 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring
));
585 exit_cifs_idmap(void)
587 key_revoke(root_cred
->thread_keyring
);
588 unregister_key_type(&cifs_idmap_key_type
);
590 unregister_shrinker(&cifs_shrinker
);
591 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type
.name
);
595 cifs_destroy_idmaptrees(void)
597 struct rb_root
*root
;
598 struct rb_node
*node
;
601 spin_lock(&siduidlock
);
602 while ((node
= rb_first(root
)))
603 rb_erase(node
, root
);
604 spin_unlock(&siduidlock
);
607 spin_lock(&sidgidlock
);
608 while ((node
= rb_first(root
)))
609 rb_erase(node
, root
);
610 spin_unlock(&sidgidlock
);
613 spin_lock(&uidsidlock
);
614 while ((node
= rb_first(root
)))
615 rb_erase(node
, root
);
616 spin_unlock(&uidsidlock
);
619 spin_lock(&gidsidlock
);
620 while ((node
= rb_first(root
)))
621 rb_erase(node
, root
);
622 spin_unlock(&gidsidlock
);
625 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
626 the same returns 1, if they do not match returns 0 */
627 int compare_sids(const struct cifs_sid
*ctsid
, const struct cifs_sid
*cwsid
)
630 int num_subauth
, num_sat
, num_saw
;
632 if ((!ctsid
) || (!cwsid
))
635 /* compare the revision */
636 if (ctsid
->revision
!= cwsid
->revision
) {
637 if (ctsid
->revision
> cwsid
->revision
)
643 /* compare all of the six auth values */
644 for (i
= 0; i
< 6; ++i
) {
645 if (ctsid
->authority
[i
] != cwsid
->authority
[i
]) {
646 if (ctsid
->authority
[i
] > cwsid
->authority
[i
])
653 /* compare all of the subauth values if any */
654 num_sat
= ctsid
->num_subauth
;
655 num_saw
= cwsid
->num_subauth
;
656 num_subauth
= num_sat
< num_saw
? num_sat
: num_saw
;
658 for (i
= 0; i
< num_subauth
; ++i
) {
659 if (ctsid
->sub_auth
[i
] != cwsid
->sub_auth
[i
]) {
660 if (le32_to_cpu(ctsid
->sub_auth
[i
]) >
661 le32_to_cpu(cwsid
->sub_auth
[i
]))
669 return 0; /* sids compare/match */
673 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
674 static void copy_sec_desc(const struct cifs_ntsd
*pntsd
,
675 struct cifs_ntsd
*pnntsd
, __u32 sidsoffset
)
679 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
680 struct cifs_sid
*nowner_sid_ptr
, *ngroup_sid_ptr
;
682 /* copy security descriptor control portion */
683 pnntsd
->revision
= pntsd
->revision
;
684 pnntsd
->type
= pntsd
->type
;
685 pnntsd
->dacloffset
= cpu_to_le32(sizeof(struct cifs_ntsd
));
686 pnntsd
->sacloffset
= 0;
687 pnntsd
->osidoffset
= cpu_to_le32(sidsoffset
);
688 pnntsd
->gsidoffset
= cpu_to_le32(sidsoffset
+ sizeof(struct cifs_sid
));
691 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
692 le32_to_cpu(pntsd
->osidoffset
));
693 nowner_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+ sidsoffset
);
695 nowner_sid_ptr
->revision
= owner_sid_ptr
->revision
;
696 nowner_sid_ptr
->num_subauth
= owner_sid_ptr
->num_subauth
;
697 for (i
= 0; i
< 6; i
++)
698 nowner_sid_ptr
->authority
[i
] = owner_sid_ptr
->authority
[i
];
699 for (i
= 0; i
< 5; i
++)
700 nowner_sid_ptr
->sub_auth
[i
] = owner_sid_ptr
->sub_auth
[i
];
703 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
704 le32_to_cpu(pntsd
->gsidoffset
));
705 ngroup_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+ sidsoffset
+
706 sizeof(struct cifs_sid
));
708 ngroup_sid_ptr
->revision
= group_sid_ptr
->revision
;
709 ngroup_sid_ptr
->num_subauth
= group_sid_ptr
->num_subauth
;
710 for (i
= 0; i
< 6; i
++)
711 ngroup_sid_ptr
->authority
[i
] = group_sid_ptr
->authority
[i
];
712 for (i
= 0; i
< 5; i
++)
713 ngroup_sid_ptr
->sub_auth
[i
] = group_sid_ptr
->sub_auth
[i
];
720 change posix mode to reflect permissions
721 pmode is the existing mode (we only want to overwrite part of this
722 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
724 static void access_flags_to_mode(__le32 ace_flags
, int type
, umode_t
*pmode
,
725 umode_t
*pbits_to_set
)
727 __u32 flags
= le32_to_cpu(ace_flags
);
728 /* the order of ACEs is important. The canonical order is to begin with
729 DENY entries followed by ALLOW, otherwise an allow entry could be
730 encountered first, making the subsequent deny entry like "dead code"
731 which would be superflous since Windows stops when a match is made
732 for the operation you are trying to perform for your user */
734 /* For deny ACEs we change the mask so that subsequent allow access
735 control entries do not turn on the bits we are denying */
736 if (type
== ACCESS_DENIED
) {
737 if (flags
& GENERIC_ALL
)
738 *pbits_to_set
&= ~S_IRWXUGO
;
740 if ((flags
& GENERIC_WRITE
) ||
741 ((flags
& FILE_WRITE_RIGHTS
) == FILE_WRITE_RIGHTS
))
742 *pbits_to_set
&= ~S_IWUGO
;
743 if ((flags
& GENERIC_READ
) ||
744 ((flags
& FILE_READ_RIGHTS
) == FILE_READ_RIGHTS
))
745 *pbits_to_set
&= ~S_IRUGO
;
746 if ((flags
& GENERIC_EXECUTE
) ||
747 ((flags
& FILE_EXEC_RIGHTS
) == FILE_EXEC_RIGHTS
))
748 *pbits_to_set
&= ~S_IXUGO
;
750 } else if (type
!= ACCESS_ALLOWED
) {
751 cERROR(1, "unknown access control type %d", type
);
754 /* else ACCESS_ALLOWED type */
756 if (flags
& GENERIC_ALL
) {
757 *pmode
|= (S_IRWXUGO
& (*pbits_to_set
));
758 cFYI(DBG2
, "all perms");
761 if ((flags
& GENERIC_WRITE
) ||
762 ((flags
& FILE_WRITE_RIGHTS
) == FILE_WRITE_RIGHTS
))
763 *pmode
|= (S_IWUGO
& (*pbits_to_set
));
764 if ((flags
& GENERIC_READ
) ||
765 ((flags
& FILE_READ_RIGHTS
) == FILE_READ_RIGHTS
))
766 *pmode
|= (S_IRUGO
& (*pbits_to_set
));
767 if ((flags
& GENERIC_EXECUTE
) ||
768 ((flags
& FILE_EXEC_RIGHTS
) == FILE_EXEC_RIGHTS
))
769 *pmode
|= (S_IXUGO
& (*pbits_to_set
));
771 cFYI(DBG2
, "access flags 0x%x mode now 0x%x", flags
, *pmode
);
776 Generate access flags to reflect permissions mode is the existing mode.
777 This function is called for every ACE in the DACL whose SID matches
778 with either owner or group or everyone.
781 static void mode_to_access_flags(umode_t mode
, umode_t bits_to_use
,
784 /* reset access mask */
787 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
790 /* check for R/W/X UGO since we do not know whose flags
791 is this but we have cleared all the bits sans RWX for
792 either user or group or other as per bits_to_use */
794 *pace_flags
|= SET_FILE_READ_RIGHTS
;
796 *pace_flags
|= SET_FILE_WRITE_RIGHTS
;
798 *pace_flags
|= SET_FILE_EXEC_RIGHTS
;
800 cFYI(DBG2
, "mode: 0x%x, access flags now 0x%x", mode
, *pace_flags
);
804 static __u16
fill_ace_for_sid(struct cifs_ace
*pntace
,
805 const struct cifs_sid
*psid
, __u64 nmode
, umode_t bits
)
809 __u32 access_req
= 0;
811 pntace
->type
= ACCESS_ALLOWED
;
813 mode_to_access_flags(nmode
, bits
, &access_req
);
815 access_req
= SET_MINIMUM_RIGHTS
;
816 pntace
->access_req
= cpu_to_le32(access_req
);
818 pntace
->sid
.revision
= psid
->revision
;
819 pntace
->sid
.num_subauth
= psid
->num_subauth
;
820 for (i
= 0; i
< 6; i
++)
821 pntace
->sid
.authority
[i
] = psid
->authority
[i
];
822 for (i
= 0; i
< psid
->num_subauth
; i
++)
823 pntace
->sid
.sub_auth
[i
] = psid
->sub_auth
[i
];
825 size
= 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid
->num_subauth
* 4);
826 pntace
->size
= cpu_to_le16(size
);
832 #ifdef CONFIG_CIFS_DEBUG2
833 static void dump_ace(struct cifs_ace
*pace
, char *end_of_acl
)
837 /* validate that we do not go past end of acl */
839 if (le16_to_cpu(pace
->size
) < 16) {
840 cERROR(1, "ACE too small %d", le16_to_cpu(pace
->size
));
844 if (end_of_acl
< (char *)pace
+ le16_to_cpu(pace
->size
)) {
845 cERROR(1, "ACL too small to parse ACE");
849 num_subauth
= pace
->sid
.num_subauth
;
852 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
853 pace
->sid
.revision
, pace
->sid
.num_subauth
, pace
->type
,
854 pace
->flags
, le16_to_cpu(pace
->size
));
855 for (i
= 0; i
< num_subauth
; ++i
) {
856 cFYI(1, "ACE sub_auth[%d]: 0x%x", i
,
857 le32_to_cpu(pace
->sid
.sub_auth
[i
]));
860 /* BB add length check to make sure that we do not have huge
861 num auths and therefore go off the end */
869 static void parse_dacl(struct cifs_acl
*pdacl
, char *end_of_acl
,
870 struct cifs_sid
*pownersid
, struct cifs_sid
*pgrpsid
,
871 struct cifs_fattr
*fattr
)
877 struct cifs_ace
**ppace
;
879 /* BB need to add parm so we can store the SID BB */
882 /* no DACL in the security descriptor, set
883 all the permissions for user/group/other */
884 fattr
->cf_mode
|= S_IRWXUGO
;
888 /* validate that we do not go past end of acl */
889 if (end_of_acl
< (char *)pdacl
+ le16_to_cpu(pdacl
->size
)) {
890 cERROR(1, "ACL too small to parse DACL");
894 cFYI(DBG2
, "DACL revision %d size %d num aces %d",
895 le16_to_cpu(pdacl
->revision
), le16_to_cpu(pdacl
->size
),
896 le32_to_cpu(pdacl
->num_aces
));
898 /* reset rwx permissions for user/group/other.
899 Also, if num_aces is 0 i.e. DACL has no ACEs,
900 user/group/other have no permissions */
901 fattr
->cf_mode
&= ~(S_IRWXUGO
);
903 acl_base
= (char *)pdacl
;
904 acl_size
= sizeof(struct cifs_acl
);
906 num_aces
= le32_to_cpu(pdacl
->num_aces
);
908 umode_t user_mask
= S_IRWXU
;
909 umode_t group_mask
= S_IRWXG
;
910 umode_t other_mask
= S_IRWXU
| S_IRWXG
| S_IRWXO
;
912 ppace
= kmalloc(num_aces
* sizeof(struct cifs_ace
*),
915 cERROR(1, "DACL memory allocation error");
919 for (i
= 0; i
< num_aces
; ++i
) {
920 ppace
[i
] = (struct cifs_ace
*) (acl_base
+ acl_size
);
921 #ifdef CONFIG_CIFS_DEBUG2
922 dump_ace(ppace
[i
], end_of_acl
);
924 if (compare_sids(&(ppace
[i
]->sid
), pownersid
) == 0)
925 access_flags_to_mode(ppace
[i
]->access_req
,
929 if (compare_sids(&(ppace
[i
]->sid
), pgrpsid
) == 0)
930 access_flags_to_mode(ppace
[i
]->access_req
,
934 if (compare_sids(&(ppace
[i
]->sid
), &sid_everyone
) == 0)
935 access_flags_to_mode(ppace
[i
]->access_req
,
939 if (compare_sids(&(ppace
[i
]->sid
), &sid_authusers
) == 0)
940 access_flags_to_mode(ppace
[i
]->access_req
,
946 /* memcpy((void *)(&(cifscred->aces[i])),
948 sizeof(struct cifs_ace)); */
950 acl_base
= (char *)ppace
[i
];
951 acl_size
= le16_to_cpu(ppace
[i
]->size
);
961 static int set_chmod_dacl(struct cifs_acl
*pndacl
, struct cifs_sid
*pownersid
,
962 struct cifs_sid
*pgrpsid
, __u64 nmode
)
965 struct cifs_acl
*pnndacl
;
967 pnndacl
= (struct cifs_acl
*)((char *)pndacl
+ sizeof(struct cifs_acl
));
969 size
+= fill_ace_for_sid((struct cifs_ace
*) ((char *)pnndacl
+ size
),
970 pownersid
, nmode
, S_IRWXU
);
971 size
+= fill_ace_for_sid((struct cifs_ace
*)((char *)pnndacl
+ size
),
972 pgrpsid
, nmode
, S_IRWXG
);
973 size
+= fill_ace_for_sid((struct cifs_ace
*)((char *)pnndacl
+ size
),
974 &sid_everyone
, nmode
, S_IRWXO
);
976 pndacl
->size
= cpu_to_le16(size
+ sizeof(struct cifs_acl
));
977 pndacl
->num_aces
= cpu_to_le32(3);
983 static int parse_sid(struct cifs_sid
*psid
, char *end_of_acl
)
985 /* BB need to add parm so we can store the SID BB */
987 /* validate that we do not go past end of ACL - sid must be at least 8
988 bytes long (assuming no sub-auths - e.g. the null SID */
989 if (end_of_acl
< (char *)psid
+ 8) {
990 cERROR(1, "ACL too small to parse SID %p", psid
);
994 if (psid
->num_subauth
) {
995 #ifdef CONFIG_CIFS_DEBUG2
997 cFYI(1, "SID revision %d num_auth %d",
998 psid
->revision
, psid
->num_subauth
);
1000 for (i
= 0; i
< psid
->num_subauth
; i
++) {
1001 cFYI(1, "SID sub_auth[%d]: 0x%x ", i
,
1002 le32_to_cpu(psid
->sub_auth
[i
]));
1005 /* BB add length check to make sure that we do not have huge
1006 num auths and therefore go off the end */
1008 le32_to_cpu(psid
->sub_auth
[psid
->num_subauth
-1]));
1016 /* Convert CIFS ACL to POSIX form */
1017 static int parse_sec_desc(struct cifs_sb_info
*cifs_sb
,
1018 struct cifs_ntsd
*pntsd
, int acl_len
, struct cifs_fattr
*fattr
)
1021 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
1022 struct cifs_acl
*dacl_ptr
; /* no need for SACL ptr */
1023 char *end_of_acl
= ((char *)pntsd
) + acl_len
;
1029 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1030 le32_to_cpu(pntsd
->osidoffset
));
1031 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1032 le32_to_cpu(pntsd
->gsidoffset
));
1033 dacloffset
= le32_to_cpu(pntsd
->dacloffset
);
1034 dacl_ptr
= (struct cifs_acl
*)((char *)pntsd
+ dacloffset
);
1035 cFYI(DBG2
, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1036 "sacloffset 0x%x dacloffset 0x%x",
1037 pntsd
->revision
, pntsd
->type
, le32_to_cpu(pntsd
->osidoffset
),
1038 le32_to_cpu(pntsd
->gsidoffset
),
1039 le32_to_cpu(pntsd
->sacloffset
), dacloffset
);
1040 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1041 rc
= parse_sid(owner_sid_ptr
, end_of_acl
);
1043 cFYI(1, "%s: Error %d parsing Owner SID", __func__
, rc
);
1046 rc
= sid_to_id(cifs_sb
, owner_sid_ptr
, fattr
, SIDOWNER
);
1048 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__
, rc
);
1052 rc
= parse_sid(group_sid_ptr
, end_of_acl
);
1054 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__
, rc
);
1057 rc
= sid_to_id(cifs_sb
, group_sid_ptr
, fattr
, SIDGROUP
);
1059 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__
, rc
);
1064 parse_dacl(dacl_ptr
, end_of_acl
, owner_sid_ptr
,
1065 group_sid_ptr
, fattr
);
1067 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1072 /* Convert permission bits from mode to equivalent CIFS ACL */
1073 static int build_sec_desc(struct cifs_ntsd
*pntsd
, struct cifs_ntsd
*pnntsd
,
1074 __u32 secdesclen
, __u64 nmode
, uid_t uid
, gid_t gid
, int *aclflag
)
1080 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
1081 struct cifs_sid
*nowner_sid_ptr
, *ngroup_sid_ptr
;
1082 struct cifs_acl
*dacl_ptr
= NULL
; /* no need for SACL ptr */
1083 struct cifs_acl
*ndacl_ptr
= NULL
; /* no need for SACL ptr */
1085 if (nmode
!= NO_CHANGE_64
) { /* chmod */
1086 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1087 le32_to_cpu(pntsd
->osidoffset
));
1088 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1089 le32_to_cpu(pntsd
->gsidoffset
));
1090 dacloffset
= le32_to_cpu(pntsd
->dacloffset
);
1091 dacl_ptr
= (struct cifs_acl
*)((char *)pntsd
+ dacloffset
);
1092 ndacloffset
= sizeof(struct cifs_ntsd
);
1093 ndacl_ptr
= (struct cifs_acl
*)((char *)pnntsd
+ ndacloffset
);
1094 ndacl_ptr
->revision
= dacl_ptr
->revision
;
1095 ndacl_ptr
->size
= 0;
1096 ndacl_ptr
->num_aces
= 0;
1098 rc
= set_chmod_dacl(ndacl_ptr
, owner_sid_ptr
, group_sid_ptr
,
1100 sidsoffset
= ndacloffset
+ le16_to_cpu(ndacl_ptr
->size
);
1101 /* copy sec desc control portion & owner and group sids */
1102 copy_sec_desc(pntsd
, pnntsd
, sidsoffset
);
1103 *aclflag
= CIFS_ACL_DACL
;
1105 memcpy(pnntsd
, pntsd
, secdesclen
);
1106 if (uid
!= NO_CHANGE_32
) { /* chown */
1107 owner_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+
1108 le32_to_cpu(pnntsd
->osidoffset
));
1109 nowner_sid_ptr
= kmalloc(sizeof(struct cifs_sid
),
1111 if (!nowner_sid_ptr
)
1113 rc
= id_to_sid(uid
, SIDOWNER
, nowner_sid_ptr
);
1115 cFYI(1, "%s: Mapping error %d for owner id %d",
1117 kfree(nowner_sid_ptr
);
1120 memcpy(owner_sid_ptr
, nowner_sid_ptr
,
1121 sizeof(struct cifs_sid
));
1122 kfree(nowner_sid_ptr
);
1123 *aclflag
= CIFS_ACL_OWNER
;
1125 if (gid
!= NO_CHANGE_32
) { /* chgrp */
1126 group_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+
1127 le32_to_cpu(pnntsd
->gsidoffset
));
1128 ngroup_sid_ptr
= kmalloc(sizeof(struct cifs_sid
),
1130 if (!ngroup_sid_ptr
)
1132 rc
= id_to_sid(gid
, SIDGROUP
, ngroup_sid_ptr
);
1134 cFYI(1, "%s: Mapping error %d for group id %d",
1136 kfree(ngroup_sid_ptr
);
1139 memcpy(group_sid_ptr
, ngroup_sid_ptr
,
1140 sizeof(struct cifs_sid
));
1141 kfree(ngroup_sid_ptr
);
1142 *aclflag
= CIFS_ACL_GROUP
;
1149 static struct cifs_ntsd
*get_cifs_acl_by_fid(struct cifs_sb_info
*cifs_sb
,
1150 __u16 fid
, u32
*pacllen
)
1152 struct cifs_ntsd
*pntsd
= NULL
;
1154 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1157 return ERR_CAST(tlink
);
1160 rc
= CIFSSMBGetCIFSACL(xid
, tlink_tcon(tlink
), fid
, &pntsd
, pacllen
);
1163 cifs_put_tlink(tlink
);
1165 cFYI(1, "%s: rc = %d ACL len %d", __func__
, rc
, *pacllen
);
1171 static struct cifs_ntsd
*get_cifs_acl_by_path(struct cifs_sb_info
*cifs_sb
,
1172 const char *path
, u32
*pacllen
)
1174 struct cifs_ntsd
*pntsd
= NULL
;
1176 int xid
, rc
, create_options
= 0;
1178 struct cifs_tcon
*tcon
;
1179 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1182 return ERR_CAST(tlink
);
1184 tcon
= tlink_tcon(tlink
);
1187 if (backup_cred(cifs_sb
))
1188 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
1190 rc
= CIFSSMBOpen(xid
, tcon
, path
, FILE_OPEN
, READ_CONTROL
,
1191 create_options
, &fid
, &oplock
, NULL
, cifs_sb
->local_nls
,
1192 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
);
1194 rc
= CIFSSMBGetCIFSACL(xid
, tcon
, fid
, &pntsd
, pacllen
);
1195 CIFSSMBClose(xid
, tcon
, fid
);
1198 cifs_put_tlink(tlink
);
1201 cFYI(1, "%s: rc = %d ACL len %d", __func__
, rc
, *pacllen
);
1207 /* Retrieve an ACL from the server */
1208 struct cifs_ntsd
*get_cifs_acl(struct cifs_sb_info
*cifs_sb
,
1209 struct inode
*inode
, const char *path
,
1212 struct cifs_ntsd
*pntsd
= NULL
;
1213 struct cifsFileInfo
*open_file
= NULL
;
1216 open_file
= find_readable_file(CIFS_I(inode
), true);
1218 return get_cifs_acl_by_path(cifs_sb
, path
, pacllen
);
1220 pntsd
= get_cifs_acl_by_fid(cifs_sb
, open_file
->netfid
, pacllen
);
1221 cifsFileInfo_put(open_file
);
1225 /* Set an ACL on the server */
1226 int set_cifs_acl(struct cifs_ntsd
*pnntsd
, __u32 acllen
,
1227 struct inode
*inode
, const char *path
, int aclflag
)
1230 int xid
, rc
, access_flags
, create_options
= 0;
1232 struct cifs_tcon
*tcon
;
1233 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
1234 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1237 return PTR_ERR(tlink
);
1239 tcon
= tlink_tcon(tlink
);
1242 if (backup_cred(cifs_sb
))
1243 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
1245 if (aclflag
== CIFS_ACL_OWNER
|| aclflag
== CIFS_ACL_GROUP
)
1246 access_flags
= WRITE_OWNER
;
1248 access_flags
= WRITE_DAC
;
1250 rc
= CIFSSMBOpen(xid
, tcon
, path
, FILE_OPEN
, access_flags
,
1251 create_options
, &fid
, &oplock
, NULL
, cifs_sb
->local_nls
,
1252 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
);
1254 cERROR(1, "Unable to open file to set ACL");
1258 rc
= CIFSSMBSetCIFSACL(xid
, tcon
, fid
, pnntsd
, acllen
, aclflag
);
1259 cFYI(DBG2
, "SetCIFSACL rc = %d", rc
);
1261 CIFSSMBClose(xid
, tcon
, fid
);
1264 cifs_put_tlink(tlink
);
1268 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1270 cifs_acl_to_fattr(struct cifs_sb_info
*cifs_sb
, struct cifs_fattr
*fattr
,
1271 struct inode
*inode
, const char *path
, const __u16
*pfid
)
1273 struct cifs_ntsd
*pntsd
= NULL
;
1277 cFYI(DBG2
, "converting ACL to mode for %s", path
);
1280 pntsd
= get_cifs_acl_by_fid(cifs_sb
, *pfid
, &acllen
);
1282 pntsd
= get_cifs_acl(cifs_sb
, inode
, path
, &acllen
);
1284 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1285 if (IS_ERR(pntsd
)) {
1286 rc
= PTR_ERR(pntsd
);
1287 cERROR(1, "%s: error %d getting sec desc", __func__
, rc
);
1289 rc
= parse_sec_desc(cifs_sb
, pntsd
, acllen
, fattr
);
1292 cERROR(1, "parse sec desc failed rc = %d", rc
);
1298 /* Convert mode bits to an ACL so we can update the ACL on the server */
1300 id_mode_to_cifs_acl(struct inode
*inode
, const char *path
, __u64 nmode
,
1301 uid_t uid
, gid_t gid
)
1304 int aclflag
= CIFS_ACL_DACL
; /* default flag to set */
1305 __u32 secdesclen
= 0;
1306 struct cifs_ntsd
*pntsd
= NULL
; /* acl obtained from server */
1307 struct cifs_ntsd
*pnntsd
= NULL
; /* modified acl to be sent to server */
1309 cFYI(DBG2
, "set ACL from mode for %s", path
);
1311 /* Get the security descriptor */
1312 pntsd
= get_cifs_acl(CIFS_SB(inode
->i_sb
), inode
, path
, &secdesclen
);
1314 /* Add three ACEs for owner, group, everyone getting rid of
1315 other ACEs as chmod disables ACEs and set the security descriptor */
1317 if (IS_ERR(pntsd
)) {
1318 rc
= PTR_ERR(pntsd
);
1319 cERROR(1, "%s: error %d getting sec desc", __func__
, rc
);
1321 /* allocate memory for the smb header,
1322 set security descriptor request security descriptor
1323 parameters, and secuirty descriptor itself */
1325 secdesclen
= secdesclen
< DEFSECDESCLEN
?
1326 DEFSECDESCLEN
: secdesclen
;
1327 pnntsd
= kmalloc(secdesclen
, GFP_KERNEL
);
1329 cERROR(1, "Unable to allocate security descriptor");
1334 rc
= build_sec_desc(pntsd
, pnntsd
, secdesclen
, nmode
, uid
, gid
,
1337 cFYI(DBG2
, "build_sec_desc rc: %d", rc
);
1340 /* Set the security descriptor */
1341 rc
= set_cifs_acl(pnntsd
, secdesclen
, inode
,
1343 cFYI(DBG2
, "set_cifs_acl rc: %d", rc
);