include: replace linux/module.h with "struct module" wherever possible
[linux-2.6/next.git] / fs / cifs / cifsacl.c
blob21de1d6d5849e21977ce9c23bb480439693f2d1e
1 /*
2 * fs/cifs/cifsacl.c
4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
47 static void
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 int *nr_del)
51 struct rb_node *node;
52 struct rb_node *tmp;
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
56 while (node) {
57 tmp = node;
58 node = rb_next(tmp);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 ++(*nr_rem);
62 else {
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
65 rb_erase(tmp, root);
66 ++(*nr_del);
67 } else
68 ++(*nr_rem);
74 * Run idmap cache shrinker.
76 static int
77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
79 int nr_to_scan = sc->nr_to_scan;
80 int nr_del = 0;
81 int nr_rem = 0;
82 struct rb_root *root;
84 root = &uidtree;
85 spin_lock(&siduidlock);
86 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 spin_unlock(&siduidlock);
89 root = &gidtree;
90 spin_lock(&sidgidlock);
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock);
94 return nr_rem;
97 static struct shrinker cifs_shrinker = {
98 .shrink = cifs_idmap_shrinker,
99 .seeks = DEFAULT_SEEKS,
102 static int
103 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
105 char *payload;
107 payload = kmalloc(datalen, GFP_KERNEL);
108 if (!payload)
109 return -ENOMEM;
111 memcpy(payload, data, datalen);
112 key->payload.data = payload;
113 return 0;
116 static inline void
117 cifs_idmap_key_destroy(struct key *key)
119 kfree(key->payload.data);
122 struct key_type cifs_idmap_key_type = {
123 .name = "cifs.idmap",
124 .instantiate = cifs_idmap_key_instantiate,
125 .destroy = cifs_idmap_key_destroy,
126 .describe = user_describe,
127 .match = user_match,
130 static void
131 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
133 int i;
134 unsigned long saval;
135 char *strptr;
137 strptr = sidstr;
139 sprintf(strptr, "%s", "S");
140 strptr = sidstr + strlen(sidstr);
142 sprintf(strptr, "-%d", sidptr->revision);
143 strptr = sidstr + strlen(sidstr);
145 for (i = 0; i < 6; ++i) {
146 if (sidptr->authority[i]) {
147 sprintf(strptr, "-%d", sidptr->authority[i]);
148 strptr = sidstr + strlen(sidstr);
152 for (i = 0; i < sidptr->num_subauth; ++i) {
153 saval = le32_to_cpu(sidptr->sub_auth[i]);
154 sprintf(strptr, "-%ld", saval);
155 strptr = sidstr + strlen(sidstr);
159 static void
160 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
161 struct cifs_sid_id **psidid, char *typestr)
163 int rc;
164 char *strptr;
165 struct rb_node *node = root->rb_node;
166 struct rb_node *parent = NULL;
167 struct rb_node **linkto = &(root->rb_node);
168 struct cifs_sid_id *lsidid;
170 while (node) {
171 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
172 parent = node;
173 rc = compare_sids(sidptr, &((lsidid)->sid));
174 if (rc > 0) {
175 linkto = &(node->rb_left);
176 node = node->rb_left;
177 } else if (rc < 0) {
178 linkto = &(node->rb_right);
179 node = node->rb_right;
183 memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
184 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
185 (*psidid)->refcount = 0;
187 sprintf((*psidid)->sidstr, "%s", typestr);
188 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
189 sid_to_str(&(*psidid)->sid, strptr);
191 clear_bit(SID_ID_PENDING, &(*psidid)->state);
192 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
194 rb_link_node(&(*psidid)->rbnode, parent, linkto);
195 rb_insert_color(&(*psidid)->rbnode, root);
198 static struct cifs_sid_id *
199 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
201 int rc;
202 struct rb_node *node = root->rb_node;
203 struct cifs_sid_id *lsidid;
205 while (node) {
206 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
207 rc = compare_sids(sidptr, &((lsidid)->sid));
208 if (rc > 0) {
209 node = node->rb_left;
210 } else if (rc < 0) {
211 node = node->rb_right;
212 } else /* node found */
213 return lsidid;
216 return NULL;
219 static int
220 sidid_pending_wait(void *unused)
222 schedule();
223 return signal_pending(current) ? -ERESTARTSYS : 0;
226 static int
227 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
228 struct cifs_fattr *fattr, uint sidtype)
230 int rc;
231 unsigned long cid;
232 struct key *idkey;
233 const struct cred *saved_cred;
234 struct cifs_sid_id *psidid, *npsidid;
235 struct rb_root *cidtree;
236 spinlock_t *cidlock;
238 if (sidtype == SIDOWNER) {
239 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
240 cidlock = &siduidlock;
241 cidtree = &uidtree;
242 } else if (sidtype == SIDGROUP) {
243 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
244 cidlock = &sidgidlock;
245 cidtree = &gidtree;
246 } else
247 return -ENOENT;
249 spin_lock(cidlock);
250 psidid = id_rb_search(cidtree, psid);
252 if (!psidid) { /* node does not exist, allocate one & attempt adding */
253 spin_unlock(cidlock);
254 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
255 if (!npsidid)
256 return -ENOMEM;
258 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
259 if (!npsidid->sidstr) {
260 kfree(npsidid);
261 return -ENOMEM;
264 spin_lock(cidlock);
265 psidid = id_rb_search(cidtree, psid);
266 if (psidid) { /* node happened to get inserted meanwhile */
267 ++psidid->refcount;
268 spin_unlock(cidlock);
269 kfree(npsidid->sidstr);
270 kfree(npsidid);
271 } else {
272 psidid = npsidid;
273 id_rb_insert(cidtree, psid, &psidid,
274 sidtype == SIDOWNER ? "os:" : "gs:");
275 ++psidid->refcount;
276 spin_unlock(cidlock);
278 } else {
279 ++psidid->refcount;
280 spin_unlock(cidlock);
284 * If we are here, it is safe to access psidid and its fields
285 * since a reference was taken earlier while holding the spinlock.
286 * A reference on the node is put without holding the spinlock
287 * and it is OK to do so in this case, shrinker will not erase
288 * this node until all references are put and we do not access
289 * any fields of the node after a reference is put .
291 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
292 cid = psidid->id;
293 psidid->time = jiffies; /* update ts for accessing */
294 goto sid_to_id_out;
297 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
298 goto sid_to_id_out;
300 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
301 saved_cred = override_creds(root_cred);
302 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
303 if (IS_ERR(idkey))
304 cFYI(1, "%s: Can't map SID to an id", __func__);
305 else {
306 cid = *(unsigned long *)idkey->payload.value;
307 psidid->id = cid;
308 set_bit(SID_ID_MAPPED, &psidid->state);
309 key_put(idkey);
310 kfree(psidid->sidstr);
312 revert_creds(saved_cred);
313 psidid->time = jiffies; /* update ts for accessing */
314 clear_bit(SID_ID_PENDING, &psidid->state);
315 wake_up_bit(&psidid->state, SID_ID_PENDING);
316 } else {
317 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
318 sidid_pending_wait, TASK_INTERRUPTIBLE);
319 if (rc) {
320 cFYI(1, "%s: sidid_pending_wait interrupted %d",
321 __func__, rc);
322 --psidid->refcount; /* decremented without spinlock */
323 return rc;
325 if (test_bit(SID_ID_MAPPED, &psidid->state))
326 cid = psidid->id;
329 sid_to_id_out:
330 --psidid->refcount; /* decremented without spinlock */
331 if (sidtype == SIDOWNER)
332 fattr->cf_uid = cid;
333 else
334 fattr->cf_gid = cid;
336 return 0;
340 init_cifs_idmap(void)
342 struct cred *cred;
343 struct key *keyring;
344 int ret;
346 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
348 /* create an override credential set with a special thread keyring in
349 * which requests are cached
351 * this is used to prevent malicious redirections from being installed
352 * with add_key().
354 cred = prepare_kernel_cred(NULL);
355 if (!cred)
356 return -ENOMEM;
358 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
359 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
360 KEY_USR_VIEW | KEY_USR_READ,
361 KEY_ALLOC_NOT_IN_QUOTA);
362 if (IS_ERR(keyring)) {
363 ret = PTR_ERR(keyring);
364 goto failed_put_cred;
367 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
368 if (ret < 0)
369 goto failed_put_key;
371 ret = register_key_type(&cifs_idmap_key_type);
372 if (ret < 0)
373 goto failed_put_key;
375 /* instruct request_key() to use this special keyring as a cache for
376 * the results it looks up */
377 cred->thread_keyring = keyring;
378 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
379 root_cred = cred;
381 spin_lock_init(&siduidlock);
382 uidtree = RB_ROOT;
383 spin_lock_init(&sidgidlock);
384 gidtree = RB_ROOT;
386 register_shrinker(&cifs_shrinker);
388 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
389 return 0;
391 failed_put_key:
392 key_put(keyring);
393 failed_put_cred:
394 put_cred(cred);
395 return ret;
398 void
399 exit_cifs_idmap(void)
401 key_revoke(root_cred->thread_keyring);
402 unregister_key_type(&cifs_idmap_key_type);
403 put_cred(root_cred);
404 unregister_shrinker(&cifs_shrinker);
405 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
408 void
409 cifs_destroy_idmaptrees(void)
411 struct rb_root *root;
412 struct rb_node *node;
414 root = &uidtree;
415 spin_lock(&siduidlock);
416 while ((node = rb_first(root)))
417 rb_erase(node, root);
418 spin_unlock(&siduidlock);
420 root = &gidtree;
421 spin_lock(&sidgidlock);
422 while ((node = rb_first(root)))
423 rb_erase(node, root);
424 spin_unlock(&sidgidlock);
427 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
428 the same returns 1, if they do not match returns 0 */
429 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
431 int i;
432 int num_subauth, num_sat, num_saw;
434 if ((!ctsid) || (!cwsid))
435 return 1;
437 /* compare the revision */
438 if (ctsid->revision != cwsid->revision) {
439 if (ctsid->revision > cwsid->revision)
440 return 1;
441 else
442 return -1;
445 /* compare all of the six auth values */
446 for (i = 0; i < 6; ++i) {
447 if (ctsid->authority[i] != cwsid->authority[i]) {
448 if (ctsid->authority[i] > cwsid->authority[i])
449 return 1;
450 else
451 return -1;
455 /* compare all of the subauth values if any */
456 num_sat = ctsid->num_subauth;
457 num_saw = cwsid->num_subauth;
458 num_subauth = num_sat < num_saw ? num_sat : num_saw;
459 if (num_subauth) {
460 for (i = 0; i < num_subauth; ++i) {
461 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
462 if (le32_to_cpu(ctsid->sub_auth[i]) >
463 le32_to_cpu(cwsid->sub_auth[i]))
464 return 1;
465 else
466 return -1;
471 return 0; /* sids compare/match */
475 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
476 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
477 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
479 int i;
481 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
482 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
484 /* copy security descriptor control portion */
485 pnntsd->revision = pntsd->revision;
486 pnntsd->type = pntsd->type;
487 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
488 pnntsd->sacloffset = 0;
489 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
490 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
492 /* copy owner sid */
493 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
494 le32_to_cpu(pntsd->osidoffset));
495 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
497 nowner_sid_ptr->revision = owner_sid_ptr->revision;
498 nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
499 for (i = 0; i < 6; i++)
500 nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
501 for (i = 0; i < 5; i++)
502 nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
504 /* copy group sid */
505 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
506 le32_to_cpu(pntsd->gsidoffset));
507 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
508 sizeof(struct cifs_sid));
510 ngroup_sid_ptr->revision = group_sid_ptr->revision;
511 ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
512 for (i = 0; i < 6; i++)
513 ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
514 for (i = 0; i < 5; i++)
515 ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
517 return;
522 change posix mode to reflect permissions
523 pmode is the existing mode (we only want to overwrite part of this
524 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
526 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
527 umode_t *pbits_to_set)
529 __u32 flags = le32_to_cpu(ace_flags);
530 /* the order of ACEs is important. The canonical order is to begin with
531 DENY entries followed by ALLOW, otherwise an allow entry could be
532 encountered first, making the subsequent deny entry like "dead code"
533 which would be superflous since Windows stops when a match is made
534 for the operation you are trying to perform for your user */
536 /* For deny ACEs we change the mask so that subsequent allow access
537 control entries do not turn on the bits we are denying */
538 if (type == ACCESS_DENIED) {
539 if (flags & GENERIC_ALL)
540 *pbits_to_set &= ~S_IRWXUGO;
542 if ((flags & GENERIC_WRITE) ||
543 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
544 *pbits_to_set &= ~S_IWUGO;
545 if ((flags & GENERIC_READ) ||
546 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
547 *pbits_to_set &= ~S_IRUGO;
548 if ((flags & GENERIC_EXECUTE) ||
549 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
550 *pbits_to_set &= ~S_IXUGO;
551 return;
552 } else if (type != ACCESS_ALLOWED) {
553 cERROR(1, "unknown access control type %d", type);
554 return;
556 /* else ACCESS_ALLOWED type */
558 if (flags & GENERIC_ALL) {
559 *pmode |= (S_IRWXUGO & (*pbits_to_set));
560 cFYI(DBG2, "all perms");
561 return;
563 if ((flags & GENERIC_WRITE) ||
564 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
565 *pmode |= (S_IWUGO & (*pbits_to_set));
566 if ((flags & GENERIC_READ) ||
567 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
568 *pmode |= (S_IRUGO & (*pbits_to_set));
569 if ((flags & GENERIC_EXECUTE) ||
570 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
571 *pmode |= (S_IXUGO & (*pbits_to_set));
573 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
574 return;
578 Generate access flags to reflect permissions mode is the existing mode.
579 This function is called for every ACE in the DACL whose SID matches
580 with either owner or group or everyone.
583 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
584 __u32 *pace_flags)
586 /* reset access mask */
587 *pace_flags = 0x0;
589 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
590 mode &= bits_to_use;
592 /* check for R/W/X UGO since we do not know whose flags
593 is this but we have cleared all the bits sans RWX for
594 either user or group or other as per bits_to_use */
595 if (mode & S_IRUGO)
596 *pace_flags |= SET_FILE_READ_RIGHTS;
597 if (mode & S_IWUGO)
598 *pace_flags |= SET_FILE_WRITE_RIGHTS;
599 if (mode & S_IXUGO)
600 *pace_flags |= SET_FILE_EXEC_RIGHTS;
602 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
603 return;
606 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
607 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
609 int i;
610 __u16 size = 0;
611 __u32 access_req = 0;
613 pntace->type = ACCESS_ALLOWED;
614 pntace->flags = 0x0;
615 mode_to_access_flags(nmode, bits, &access_req);
616 if (!access_req)
617 access_req = SET_MINIMUM_RIGHTS;
618 pntace->access_req = cpu_to_le32(access_req);
620 pntace->sid.revision = psid->revision;
621 pntace->sid.num_subauth = psid->num_subauth;
622 for (i = 0; i < 6; i++)
623 pntace->sid.authority[i] = psid->authority[i];
624 for (i = 0; i < psid->num_subauth; i++)
625 pntace->sid.sub_auth[i] = psid->sub_auth[i];
627 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
628 pntace->size = cpu_to_le16(size);
630 return size;
634 #ifdef CONFIG_CIFS_DEBUG2
635 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
637 int num_subauth;
639 /* validate that we do not go past end of acl */
641 if (le16_to_cpu(pace->size) < 16) {
642 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
643 return;
646 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
647 cERROR(1, "ACL too small to parse ACE");
648 return;
651 num_subauth = pace->sid.num_subauth;
652 if (num_subauth) {
653 int i;
654 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
655 pace->sid.revision, pace->sid.num_subauth, pace->type,
656 pace->flags, le16_to_cpu(pace->size));
657 for (i = 0; i < num_subauth; ++i) {
658 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
659 le32_to_cpu(pace->sid.sub_auth[i]));
662 /* BB add length check to make sure that we do not have huge
663 num auths and therefore go off the end */
666 return;
668 #endif
671 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
672 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
673 struct cifs_fattr *fattr)
675 int i;
676 int num_aces = 0;
677 int acl_size;
678 char *acl_base;
679 struct cifs_ace **ppace;
681 /* BB need to add parm so we can store the SID BB */
683 if (!pdacl) {
684 /* no DACL in the security descriptor, set
685 all the permissions for user/group/other */
686 fattr->cf_mode |= S_IRWXUGO;
687 return;
690 /* validate that we do not go past end of acl */
691 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
692 cERROR(1, "ACL too small to parse DACL");
693 return;
696 cFYI(DBG2, "DACL revision %d size %d num aces %d",
697 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
698 le32_to_cpu(pdacl->num_aces));
700 /* reset rwx permissions for user/group/other.
701 Also, if num_aces is 0 i.e. DACL has no ACEs,
702 user/group/other have no permissions */
703 fattr->cf_mode &= ~(S_IRWXUGO);
705 acl_base = (char *)pdacl;
706 acl_size = sizeof(struct cifs_acl);
708 num_aces = le32_to_cpu(pdacl->num_aces);
709 if (num_aces > 0) {
710 umode_t user_mask = S_IRWXU;
711 umode_t group_mask = S_IRWXG;
712 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
714 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
715 GFP_KERNEL);
716 if (!ppace) {
717 cERROR(1, "DACL memory allocation error");
718 return;
721 for (i = 0; i < num_aces; ++i) {
722 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
723 #ifdef CONFIG_CIFS_DEBUG2
724 dump_ace(ppace[i], end_of_acl);
725 #endif
726 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
727 access_flags_to_mode(ppace[i]->access_req,
728 ppace[i]->type,
729 &fattr->cf_mode,
730 &user_mask);
731 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
732 access_flags_to_mode(ppace[i]->access_req,
733 ppace[i]->type,
734 &fattr->cf_mode,
735 &group_mask);
736 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
737 access_flags_to_mode(ppace[i]->access_req,
738 ppace[i]->type,
739 &fattr->cf_mode,
740 &other_mask);
741 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
742 access_flags_to_mode(ppace[i]->access_req,
743 ppace[i]->type,
744 &fattr->cf_mode,
745 &other_mask);
748 /* memcpy((void *)(&(cifscred->aces[i])),
749 (void *)ppace[i],
750 sizeof(struct cifs_ace)); */
752 acl_base = (char *)ppace[i];
753 acl_size = le16_to_cpu(ppace[i]->size);
756 kfree(ppace);
759 return;
763 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
764 struct cifs_sid *pgrpsid, __u64 nmode)
766 u16 size = 0;
767 struct cifs_acl *pnndacl;
769 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
771 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
772 pownersid, nmode, S_IRWXU);
773 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
774 pgrpsid, nmode, S_IRWXG);
775 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
776 &sid_everyone, nmode, S_IRWXO);
778 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
779 pndacl->num_aces = cpu_to_le32(3);
781 return 0;
785 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
787 /* BB need to add parm so we can store the SID BB */
789 /* validate that we do not go past end of ACL - sid must be at least 8
790 bytes long (assuming no sub-auths - e.g. the null SID */
791 if (end_of_acl < (char *)psid + 8) {
792 cERROR(1, "ACL too small to parse SID %p", psid);
793 return -EINVAL;
796 if (psid->num_subauth) {
797 #ifdef CONFIG_CIFS_DEBUG2
798 int i;
799 cFYI(1, "SID revision %d num_auth %d",
800 psid->revision, psid->num_subauth);
802 for (i = 0; i < psid->num_subauth; i++) {
803 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
804 le32_to_cpu(psid->sub_auth[i]));
807 /* BB add length check to make sure that we do not have huge
808 num auths and therefore go off the end */
809 cFYI(1, "RID 0x%x",
810 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
811 #endif
814 return 0;
818 /* Convert CIFS ACL to POSIX form */
819 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
820 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
822 int rc = 0;
823 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
824 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
825 char *end_of_acl = ((char *)pntsd) + acl_len;
826 __u32 dacloffset;
828 if (pntsd == NULL)
829 return -EIO;
831 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
832 le32_to_cpu(pntsd->osidoffset));
833 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
834 le32_to_cpu(pntsd->gsidoffset));
835 dacloffset = le32_to_cpu(pntsd->dacloffset);
836 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
837 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
838 "sacloffset 0x%x dacloffset 0x%x",
839 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
840 le32_to_cpu(pntsd->gsidoffset),
841 le32_to_cpu(pntsd->sacloffset), dacloffset);
842 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
843 rc = parse_sid(owner_sid_ptr, end_of_acl);
844 if (rc) {
845 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
846 return rc;
848 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
849 if (rc) {
850 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
851 return rc;
854 rc = parse_sid(group_sid_ptr, end_of_acl);
855 if (rc) {
856 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
857 return rc;
859 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
860 if (rc) {
861 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
862 return rc;
865 if (dacloffset)
866 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
867 group_sid_ptr, fattr);
868 else
869 cFYI(1, "no ACL"); /* BB grant all or default perms? */
871 /* cifscred->uid = owner_sid_ptr->rid;
872 cifscred->gid = group_sid_ptr->rid;
873 memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
874 sizeof(struct cifs_sid));
875 memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
876 sizeof(struct cifs_sid)); */
878 return rc;
882 /* Convert permission bits from mode to equivalent CIFS ACL */
883 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
884 struct inode *inode, __u64 nmode)
886 int rc = 0;
887 __u32 dacloffset;
888 __u32 ndacloffset;
889 __u32 sidsoffset;
890 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
891 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
892 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
894 if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL))
895 return -EIO;
897 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
898 le32_to_cpu(pntsd->osidoffset));
899 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
900 le32_to_cpu(pntsd->gsidoffset));
902 dacloffset = le32_to_cpu(pntsd->dacloffset);
903 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
905 ndacloffset = sizeof(struct cifs_ntsd);
906 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
907 ndacl_ptr->revision = dacl_ptr->revision;
908 ndacl_ptr->size = 0;
909 ndacl_ptr->num_aces = 0;
911 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode);
913 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
915 /* copy security descriptor control portion and owner and group sid */
916 copy_sec_desc(pntsd, pnntsd, sidsoffset);
918 return rc;
921 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
922 __u16 fid, u32 *pacllen)
924 struct cifs_ntsd *pntsd = NULL;
925 int xid, rc;
926 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
928 if (IS_ERR(tlink))
929 return ERR_CAST(tlink);
931 xid = GetXid();
932 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
933 FreeXid(xid);
935 cifs_put_tlink(tlink);
937 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
938 if (rc)
939 return ERR_PTR(rc);
940 return pntsd;
943 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
944 const char *path, u32 *pacllen)
946 struct cifs_ntsd *pntsd = NULL;
947 int oplock = 0;
948 int xid, rc;
949 __u16 fid;
950 struct cifs_tcon *tcon;
951 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
953 if (IS_ERR(tlink))
954 return ERR_CAST(tlink);
956 tcon = tlink_tcon(tlink);
957 xid = GetXid();
959 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
960 &fid, &oplock, NULL, cifs_sb->local_nls,
961 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
962 if (!rc) {
963 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
964 CIFSSMBClose(xid, tcon, fid);
967 cifs_put_tlink(tlink);
968 FreeXid(xid);
970 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
971 if (rc)
972 return ERR_PTR(rc);
973 return pntsd;
976 /* Retrieve an ACL from the server */
977 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
978 struct inode *inode, const char *path,
979 u32 *pacllen)
981 struct cifs_ntsd *pntsd = NULL;
982 struct cifsFileInfo *open_file = NULL;
984 if (inode)
985 open_file = find_readable_file(CIFS_I(inode), true);
986 if (!open_file)
987 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
989 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
990 cifsFileInfo_put(open_file);
991 return pntsd;
994 static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
995 struct cifs_ntsd *pnntsd, u32 acllen)
997 int xid, rc;
998 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1000 if (IS_ERR(tlink))
1001 return PTR_ERR(tlink);
1003 xid = GetXid();
1004 rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
1005 FreeXid(xid);
1006 cifs_put_tlink(tlink);
1008 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1009 return rc;
1012 static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1013 struct cifs_ntsd *pnntsd, u32 acllen)
1015 int oplock = 0;
1016 int xid, rc;
1017 __u16 fid;
1018 struct cifs_tcon *tcon;
1019 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1021 if (IS_ERR(tlink))
1022 return PTR_ERR(tlink);
1024 tcon = tlink_tcon(tlink);
1025 xid = GetXid();
1027 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
1028 &fid, &oplock, NULL, cifs_sb->local_nls,
1029 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1030 if (rc) {
1031 cERROR(1, "Unable to open file to set ACL");
1032 goto out;
1035 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
1036 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1038 CIFSSMBClose(xid, tcon, fid);
1039 out:
1040 FreeXid(xid);
1041 cifs_put_tlink(tlink);
1042 return rc;
1045 /* Set an ACL on the server */
1046 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1047 struct inode *inode, const char *path)
1049 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1050 struct cifsFileInfo *open_file;
1051 int rc;
1053 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
1055 open_file = find_readable_file(CIFS_I(inode), true);
1056 if (!open_file)
1057 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1059 rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen);
1060 cifsFileInfo_put(open_file);
1061 return rc;
1064 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1066 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1067 struct inode *inode, const char *path, const __u16 *pfid)
1069 struct cifs_ntsd *pntsd = NULL;
1070 u32 acllen = 0;
1071 int rc = 0;
1073 cFYI(DBG2, "converting ACL to mode for %s", path);
1075 if (pfid)
1076 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1077 else
1078 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1080 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1081 if (IS_ERR(pntsd)) {
1082 rc = PTR_ERR(pntsd);
1083 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1084 } else {
1085 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1086 kfree(pntsd);
1087 if (rc)
1088 cERROR(1, "parse sec desc failed rc = %d", rc);
1091 return rc;
1094 /* Convert mode bits to an ACL so we can update the ACL on the server */
1095 int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
1097 int rc = 0;
1098 __u32 secdesclen = 0;
1099 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1100 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1102 cFYI(DBG2, "set ACL from mode for %s", path);
1104 /* Get the security descriptor */
1105 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1107 /* Add three ACEs for owner, group, everyone getting rid of
1108 other ACEs as chmod disables ACEs and set the security descriptor */
1110 if (IS_ERR(pntsd)) {
1111 rc = PTR_ERR(pntsd);
1112 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1113 } else {
1114 /* allocate memory for the smb header,
1115 set security descriptor request security descriptor
1116 parameters, and secuirty descriptor itself */
1118 secdesclen = secdesclen < DEFSECDESCLEN ?
1119 DEFSECDESCLEN : secdesclen;
1120 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1121 if (!pnntsd) {
1122 cERROR(1, "Unable to allocate security descriptor");
1123 kfree(pntsd);
1124 return -ENOMEM;
1127 rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
1129 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1131 if (!rc) {
1132 /* Set the security descriptor */
1133 rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
1134 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1137 kfree(pnntsd);
1138 kfree(pntsd);
1141 return rc;