IPoIB: Fix send lockup due to missed TX completion
[linux/fpc-iii.git] / fs / cifs / cifsacl.c
blobb3522af3f9964ae4e3b8801139a828f7e833ef72
1 /*
2 * fs/cifs/cifsacl.c
4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
47 static void
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 int *nr_del)
51 struct rb_node *node;
52 struct rb_node *tmp;
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
56 while (node) {
57 tmp = node;
58 node = rb_next(tmp);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 ++(*nr_rem);
62 else {
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
65 rb_erase(tmp, root);
66 ++(*nr_del);
67 } else
68 ++(*nr_rem);
74 * Run idmap cache shrinker.
76 static int
77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
79 int nr_to_scan = sc->nr_to_scan;
80 int nr_del = 0;
81 int nr_rem = 0;
82 struct rb_root *root;
84 root = &uidtree;
85 spin_lock(&siduidlock);
86 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 spin_unlock(&siduidlock);
89 root = &gidtree;
90 spin_lock(&sidgidlock);
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock);
94 root = &siduidtree;
95 spin_lock(&uidsidlock);
96 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 spin_unlock(&uidsidlock);
99 root = &sidgidtree;
100 spin_lock(&gidsidlock);
101 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 spin_unlock(&gidsidlock);
104 return nr_rem;
107 static void
108 sid_rb_insert(struct rb_root *root, unsigned long cid,
109 struct cifs_sid_id **psidid, char *typestr)
111 char *strptr;
112 struct rb_node *node = root->rb_node;
113 struct rb_node *parent = NULL;
114 struct rb_node **linkto = &(root->rb_node);
115 struct cifs_sid_id *lsidid;
117 while (node) {
118 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
119 parent = node;
120 if (cid > lsidid->id) {
121 linkto = &(node->rb_left);
122 node = node->rb_left;
124 if (cid < lsidid->id) {
125 linkto = &(node->rb_right);
126 node = node->rb_right;
130 (*psidid)->id = cid;
131 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 (*psidid)->refcount = 0;
134 sprintf((*psidid)->sidstr, "%s", typestr);
135 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 sprintf(strptr, "%ld", cid);
138 clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
141 rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 rb_insert_color(&(*psidid)->rbnode, root);
145 static struct cifs_sid_id *
146 sid_rb_search(struct rb_root *root, unsigned long cid)
148 struct rb_node *node = root->rb_node;
149 struct cifs_sid_id *lsidid;
151 while (node) {
152 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 if (cid > lsidid->id)
154 node = node->rb_left;
155 else if (cid < lsidid->id)
156 node = node->rb_right;
157 else /* node found */
158 return lsidid;
161 return NULL;
164 static struct shrinker cifs_shrinker = {
165 .shrink = cifs_idmap_shrinker,
166 .seeks = DEFAULT_SEEKS,
169 static int
170 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
172 char *payload;
174 payload = kmalloc(datalen, GFP_KERNEL);
175 if (!payload)
176 return -ENOMEM;
178 memcpy(payload, data, datalen);
179 key->payload.data = payload;
180 key->datalen = datalen;
181 return 0;
184 static inline void
185 cifs_idmap_key_destroy(struct key *key)
187 kfree(key->payload.data);
190 struct key_type cifs_idmap_key_type = {
191 .name = "cifs.idmap",
192 .instantiate = cifs_idmap_key_instantiate,
193 .destroy = cifs_idmap_key_destroy,
194 .describe = user_describe,
195 .match = user_match,
198 static void
199 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
201 int i;
202 unsigned long saval;
203 char *strptr;
205 strptr = sidstr;
207 sprintf(strptr, "%s", "S");
208 strptr = sidstr + strlen(sidstr);
210 sprintf(strptr, "-%d", sidptr->revision);
211 strptr = sidstr + strlen(sidstr);
213 for (i = 0; i < 6; ++i) {
214 if (sidptr->authority[i]) {
215 sprintf(strptr, "-%d", sidptr->authority[i]);
216 strptr = sidstr + strlen(sidstr);
220 for (i = 0; i < sidptr->num_subauth; ++i) {
221 saval = le32_to_cpu(sidptr->sub_auth[i]);
222 sprintf(strptr, "-%ld", saval);
223 strptr = sidstr + strlen(sidstr);
227 static void
228 cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
230 memcpy(dst, src, sizeof(*dst));
231 dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
234 static void
235 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
236 struct cifs_sid_id **psidid, char *typestr)
238 int rc;
239 char *strptr;
240 struct rb_node *node = root->rb_node;
241 struct rb_node *parent = NULL;
242 struct rb_node **linkto = &(root->rb_node);
243 struct cifs_sid_id *lsidid;
245 while (node) {
246 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
247 parent = node;
248 rc = compare_sids(sidptr, &((lsidid)->sid));
249 if (rc > 0) {
250 linkto = &(node->rb_left);
251 node = node->rb_left;
252 } else if (rc < 0) {
253 linkto = &(node->rb_right);
254 node = node->rb_right;
258 cifs_copy_sid(&(*psidid)->sid, sidptr);
259 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
260 (*psidid)->refcount = 0;
262 sprintf((*psidid)->sidstr, "%s", typestr);
263 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
264 sid_to_str(&(*psidid)->sid, strptr);
266 clear_bit(SID_ID_PENDING, &(*psidid)->state);
267 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
269 rb_link_node(&(*psidid)->rbnode, parent, linkto);
270 rb_insert_color(&(*psidid)->rbnode, root);
273 static struct cifs_sid_id *
274 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
276 int rc;
277 struct rb_node *node = root->rb_node;
278 struct cifs_sid_id *lsidid;
280 while (node) {
281 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
282 rc = compare_sids(sidptr, &((lsidid)->sid));
283 if (rc > 0) {
284 node = node->rb_left;
285 } else if (rc < 0) {
286 node = node->rb_right;
287 } else /* node found */
288 return lsidid;
291 return NULL;
294 static int
295 sidid_pending_wait(void *unused)
297 schedule();
298 return signal_pending(current) ? -ERESTARTSYS : 0;
301 static int
302 id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
304 int rc = 0;
305 struct key *sidkey;
306 const struct cred *saved_cred;
307 struct cifs_sid *lsid;
308 struct cifs_sid_id *psidid, *npsidid;
309 struct rb_root *cidtree;
310 spinlock_t *cidlock;
312 if (sidtype == SIDOWNER) {
313 cidlock = &siduidlock;
314 cidtree = &uidtree;
315 } else if (sidtype == SIDGROUP) {
316 cidlock = &sidgidlock;
317 cidtree = &gidtree;
318 } else
319 return -EINVAL;
321 spin_lock(cidlock);
322 psidid = sid_rb_search(cidtree, cid);
324 if (!psidid) { /* node does not exist, allocate one & attempt adding */
325 spin_unlock(cidlock);
326 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
327 if (!npsidid)
328 return -ENOMEM;
330 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
331 if (!npsidid->sidstr) {
332 kfree(npsidid);
333 return -ENOMEM;
336 spin_lock(cidlock);
337 psidid = sid_rb_search(cidtree, cid);
338 if (psidid) { /* node happened to get inserted meanwhile */
339 ++psidid->refcount;
340 spin_unlock(cidlock);
341 kfree(npsidid->sidstr);
342 kfree(npsidid);
343 } else {
344 psidid = npsidid;
345 sid_rb_insert(cidtree, cid, &psidid,
346 sidtype == SIDOWNER ? "oi:" : "gi:");
347 ++psidid->refcount;
348 spin_unlock(cidlock);
350 } else {
351 ++psidid->refcount;
352 spin_unlock(cidlock);
356 * If we are here, it is safe to access psidid and its fields
357 * since a reference was taken earlier while holding the spinlock.
358 * A reference on the node is put without holding the spinlock
359 * and it is OK to do so in this case, shrinker will not erase
360 * this node until all references are put and we do not access
361 * any fields of the node after a reference is put .
363 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
364 cifs_copy_sid(ssid, &psidid->sid);
365 psidid->time = jiffies; /* update ts for accessing */
366 goto id_sid_out;
369 if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
370 rc = -EINVAL;
371 goto id_sid_out;
374 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
375 saved_cred = override_creds(root_cred);
376 sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
377 if (IS_ERR(sidkey)) {
378 rc = -EINVAL;
379 cFYI(1, "%s: Can't map and id to a SID", __func__);
380 } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
381 rc = -EIO;
382 cFYI(1, "%s: Downcall contained malformed key "
383 "(datalen=%hu)", __func__, sidkey->datalen);
384 } else {
385 lsid = (struct cifs_sid *)sidkey->payload.data;
386 cifs_copy_sid(&psidid->sid, lsid);
387 cifs_copy_sid(ssid, &psidid->sid);
388 set_bit(SID_ID_MAPPED, &psidid->state);
389 key_put(sidkey);
390 kfree(psidid->sidstr);
392 psidid->time = jiffies; /* update ts for accessing */
393 revert_creds(saved_cred);
394 clear_bit(SID_ID_PENDING, &psidid->state);
395 wake_up_bit(&psidid->state, SID_ID_PENDING);
396 } else {
397 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
398 sidid_pending_wait, TASK_INTERRUPTIBLE);
399 if (rc) {
400 cFYI(1, "%s: sidid_pending_wait interrupted %d",
401 __func__, rc);
402 --psidid->refcount;
403 return rc;
405 if (test_bit(SID_ID_MAPPED, &psidid->state))
406 cifs_copy_sid(ssid, &psidid->sid);
407 else
408 rc = -EINVAL;
410 id_sid_out:
411 --psidid->refcount;
412 return rc;
415 static int
416 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
417 struct cifs_fattr *fattr, uint sidtype)
419 int rc;
420 unsigned long cid;
421 struct key *idkey;
422 const struct cred *saved_cred;
423 struct cifs_sid_id *psidid, *npsidid;
424 struct rb_root *cidtree;
425 spinlock_t *cidlock;
427 if (sidtype == SIDOWNER) {
428 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
429 cidlock = &siduidlock;
430 cidtree = &uidtree;
431 } else if (sidtype == SIDGROUP) {
432 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
433 cidlock = &sidgidlock;
434 cidtree = &gidtree;
435 } else
436 return -ENOENT;
438 spin_lock(cidlock);
439 psidid = id_rb_search(cidtree, psid);
441 if (!psidid) { /* node does not exist, allocate one & attempt adding */
442 spin_unlock(cidlock);
443 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
444 if (!npsidid)
445 return -ENOMEM;
447 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
448 if (!npsidid->sidstr) {
449 kfree(npsidid);
450 return -ENOMEM;
453 spin_lock(cidlock);
454 psidid = id_rb_search(cidtree, psid);
455 if (psidid) { /* node happened to get inserted meanwhile */
456 ++psidid->refcount;
457 spin_unlock(cidlock);
458 kfree(npsidid->sidstr);
459 kfree(npsidid);
460 } else {
461 psidid = npsidid;
462 id_rb_insert(cidtree, psid, &psidid,
463 sidtype == SIDOWNER ? "os:" : "gs:");
464 ++psidid->refcount;
465 spin_unlock(cidlock);
467 } else {
468 ++psidid->refcount;
469 spin_unlock(cidlock);
473 * If we are here, it is safe to access psidid and its fields
474 * since a reference was taken earlier while holding the spinlock.
475 * A reference on the node is put without holding the spinlock
476 * and it is OK to do so in this case, shrinker will not erase
477 * this node until all references are put and we do not access
478 * any fields of the node after a reference is put .
480 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
481 cid = psidid->id;
482 psidid->time = jiffies; /* update ts for accessing */
483 goto sid_to_id_out;
486 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
487 goto sid_to_id_out;
489 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
490 saved_cred = override_creds(root_cred);
491 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
492 if (IS_ERR(idkey))
493 cFYI(1, "%s: Can't map SID to an id", __func__);
494 else {
495 cid = *(unsigned long *)idkey->payload.value;
496 psidid->id = cid;
497 set_bit(SID_ID_MAPPED, &psidid->state);
498 key_put(idkey);
499 kfree(psidid->sidstr);
501 revert_creds(saved_cred);
502 psidid->time = jiffies; /* update ts for accessing */
503 clear_bit(SID_ID_PENDING, &psidid->state);
504 wake_up_bit(&psidid->state, SID_ID_PENDING);
505 } else {
506 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
507 sidid_pending_wait, TASK_INTERRUPTIBLE);
508 if (rc) {
509 cFYI(1, "%s: sidid_pending_wait interrupted %d",
510 __func__, rc);
511 --psidid->refcount; /* decremented without spinlock */
512 return rc;
514 if (test_bit(SID_ID_MAPPED, &psidid->state))
515 cid = psidid->id;
518 sid_to_id_out:
519 --psidid->refcount; /* decremented without spinlock */
520 if (sidtype == SIDOWNER)
521 fattr->cf_uid = cid;
522 else
523 fattr->cf_gid = cid;
525 return 0;
529 init_cifs_idmap(void)
531 struct cred *cred;
532 struct key *keyring;
533 int ret;
535 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
537 /* create an override credential set with a special thread keyring in
538 * which requests are cached
540 * this is used to prevent malicious redirections from being installed
541 * with add_key().
543 cred = prepare_kernel_cred(NULL);
544 if (!cred)
545 return -ENOMEM;
547 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
548 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
549 KEY_USR_VIEW | KEY_USR_READ,
550 KEY_ALLOC_NOT_IN_QUOTA);
551 if (IS_ERR(keyring)) {
552 ret = PTR_ERR(keyring);
553 goto failed_put_cred;
556 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
557 if (ret < 0)
558 goto failed_put_key;
560 ret = register_key_type(&cifs_idmap_key_type);
561 if (ret < 0)
562 goto failed_put_key;
564 /* instruct request_key() to use this special keyring as a cache for
565 * the results it looks up */
566 cred->thread_keyring = keyring;
567 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
568 root_cred = cred;
570 spin_lock_init(&siduidlock);
571 uidtree = RB_ROOT;
572 spin_lock_init(&sidgidlock);
573 gidtree = RB_ROOT;
575 spin_lock_init(&uidsidlock);
576 siduidtree = RB_ROOT;
577 spin_lock_init(&gidsidlock);
578 sidgidtree = RB_ROOT;
579 register_shrinker(&cifs_shrinker);
581 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
582 return 0;
584 failed_put_key:
585 key_put(keyring);
586 failed_put_cred:
587 put_cred(cred);
588 return ret;
591 void
592 exit_cifs_idmap(void)
594 key_revoke(root_cred->thread_keyring);
595 unregister_key_type(&cifs_idmap_key_type);
596 put_cred(root_cred);
597 unregister_shrinker(&cifs_shrinker);
598 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
601 void
602 cifs_destroy_idmaptrees(void)
604 struct rb_root *root;
605 struct rb_node *node;
607 root = &uidtree;
608 spin_lock(&siduidlock);
609 while ((node = rb_first(root)))
610 rb_erase(node, root);
611 spin_unlock(&siduidlock);
613 root = &gidtree;
614 spin_lock(&sidgidlock);
615 while ((node = rb_first(root)))
616 rb_erase(node, root);
617 spin_unlock(&sidgidlock);
619 root = &siduidtree;
620 spin_lock(&uidsidlock);
621 while ((node = rb_first(root)))
622 rb_erase(node, root);
623 spin_unlock(&uidsidlock);
625 root = &sidgidtree;
626 spin_lock(&gidsidlock);
627 while ((node = rb_first(root)))
628 rb_erase(node, root);
629 spin_unlock(&gidsidlock);
632 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
633 the same returns 1, if they do not match returns 0 */
634 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
636 int i;
637 int num_subauth, num_sat, num_saw;
639 if ((!ctsid) || (!cwsid))
640 return 1;
642 /* compare the revision */
643 if (ctsid->revision != cwsid->revision) {
644 if (ctsid->revision > cwsid->revision)
645 return 1;
646 else
647 return -1;
650 /* compare all of the six auth values */
651 for (i = 0; i < 6; ++i) {
652 if (ctsid->authority[i] != cwsid->authority[i]) {
653 if (ctsid->authority[i] > cwsid->authority[i])
654 return 1;
655 else
656 return -1;
660 /* compare all of the subauth values if any */
661 num_sat = ctsid->num_subauth;
662 num_saw = cwsid->num_subauth;
663 num_subauth = num_sat < num_saw ? num_sat : num_saw;
664 if (num_subauth) {
665 for (i = 0; i < num_subauth; ++i) {
666 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
667 if (le32_to_cpu(ctsid->sub_auth[i]) >
668 le32_to_cpu(cwsid->sub_auth[i]))
669 return 1;
670 else
671 return -1;
676 return 0; /* sids compare/match */
680 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
681 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
682 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
684 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
685 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
687 /* copy security descriptor control portion */
688 pnntsd->revision = pntsd->revision;
689 pnntsd->type = pntsd->type;
690 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
691 pnntsd->sacloffset = 0;
692 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
693 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
695 /* copy owner sid */
696 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
697 le32_to_cpu(pntsd->osidoffset));
698 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
699 cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
701 /* copy group sid */
702 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
703 le32_to_cpu(pntsd->gsidoffset));
704 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
705 sizeof(struct cifs_sid));
706 cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
708 return;
713 change posix mode to reflect permissions
714 pmode is the existing mode (we only want to overwrite part of this
715 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
717 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
718 umode_t *pbits_to_set)
720 __u32 flags = le32_to_cpu(ace_flags);
721 /* the order of ACEs is important. The canonical order is to begin with
722 DENY entries followed by ALLOW, otherwise an allow entry could be
723 encountered first, making the subsequent deny entry like "dead code"
724 which would be superflous since Windows stops when a match is made
725 for the operation you are trying to perform for your user */
727 /* For deny ACEs we change the mask so that subsequent allow access
728 control entries do not turn on the bits we are denying */
729 if (type == ACCESS_DENIED) {
730 if (flags & GENERIC_ALL)
731 *pbits_to_set &= ~S_IRWXUGO;
733 if ((flags & GENERIC_WRITE) ||
734 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
735 *pbits_to_set &= ~S_IWUGO;
736 if ((flags & GENERIC_READ) ||
737 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
738 *pbits_to_set &= ~S_IRUGO;
739 if ((flags & GENERIC_EXECUTE) ||
740 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
741 *pbits_to_set &= ~S_IXUGO;
742 return;
743 } else if (type != ACCESS_ALLOWED) {
744 cERROR(1, "unknown access control type %d", type);
745 return;
747 /* else ACCESS_ALLOWED type */
749 if (flags & GENERIC_ALL) {
750 *pmode |= (S_IRWXUGO & (*pbits_to_set));
751 cFYI(DBG2, "all perms");
752 return;
754 if ((flags & GENERIC_WRITE) ||
755 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
756 *pmode |= (S_IWUGO & (*pbits_to_set));
757 if ((flags & GENERIC_READ) ||
758 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
759 *pmode |= (S_IRUGO & (*pbits_to_set));
760 if ((flags & GENERIC_EXECUTE) ||
761 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
762 *pmode |= (S_IXUGO & (*pbits_to_set));
764 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
765 return;
769 Generate access flags to reflect permissions mode is the existing mode.
770 This function is called for every ACE in the DACL whose SID matches
771 with either owner or group or everyone.
774 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
775 __u32 *pace_flags)
777 /* reset access mask */
778 *pace_flags = 0x0;
780 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
781 mode &= bits_to_use;
783 /* check for R/W/X UGO since we do not know whose flags
784 is this but we have cleared all the bits sans RWX for
785 either user or group or other as per bits_to_use */
786 if (mode & S_IRUGO)
787 *pace_flags |= SET_FILE_READ_RIGHTS;
788 if (mode & S_IWUGO)
789 *pace_flags |= SET_FILE_WRITE_RIGHTS;
790 if (mode & S_IXUGO)
791 *pace_flags |= SET_FILE_EXEC_RIGHTS;
793 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
794 return;
797 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
798 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
800 int i;
801 __u16 size = 0;
802 __u32 access_req = 0;
804 pntace->type = ACCESS_ALLOWED;
805 pntace->flags = 0x0;
806 mode_to_access_flags(nmode, bits, &access_req);
807 if (!access_req)
808 access_req = SET_MINIMUM_RIGHTS;
809 pntace->access_req = cpu_to_le32(access_req);
811 pntace->sid.revision = psid->revision;
812 pntace->sid.num_subauth = psid->num_subauth;
813 for (i = 0; i < 6; i++)
814 pntace->sid.authority[i] = psid->authority[i];
815 for (i = 0; i < psid->num_subauth; i++)
816 pntace->sid.sub_auth[i] = psid->sub_auth[i];
818 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
819 pntace->size = cpu_to_le16(size);
821 return size;
825 #ifdef CONFIG_CIFS_DEBUG2
826 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
828 int num_subauth;
830 /* validate that we do not go past end of acl */
832 if (le16_to_cpu(pace->size) < 16) {
833 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
834 return;
837 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
838 cERROR(1, "ACL too small to parse ACE");
839 return;
842 num_subauth = pace->sid.num_subauth;
843 if (num_subauth) {
844 int i;
845 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
846 pace->sid.revision, pace->sid.num_subauth, pace->type,
847 pace->flags, le16_to_cpu(pace->size));
848 for (i = 0; i < num_subauth; ++i) {
849 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
850 le32_to_cpu(pace->sid.sub_auth[i]));
853 /* BB add length check to make sure that we do not have huge
854 num auths and therefore go off the end */
857 return;
859 #endif
862 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
863 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
864 struct cifs_fattr *fattr)
866 int i;
867 int num_aces = 0;
868 int acl_size;
869 char *acl_base;
870 struct cifs_ace **ppace;
872 /* BB need to add parm so we can store the SID BB */
874 if (!pdacl) {
875 /* no DACL in the security descriptor, set
876 all the permissions for user/group/other */
877 fattr->cf_mode |= S_IRWXUGO;
878 return;
881 /* validate that we do not go past end of acl */
882 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
883 cERROR(1, "ACL too small to parse DACL");
884 return;
887 cFYI(DBG2, "DACL revision %d size %d num aces %d",
888 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
889 le32_to_cpu(pdacl->num_aces));
891 /* reset rwx permissions for user/group/other.
892 Also, if num_aces is 0 i.e. DACL has no ACEs,
893 user/group/other have no permissions */
894 fattr->cf_mode &= ~(S_IRWXUGO);
896 acl_base = (char *)pdacl;
897 acl_size = sizeof(struct cifs_acl);
899 num_aces = le32_to_cpu(pdacl->num_aces);
900 if (num_aces > 0) {
901 umode_t user_mask = S_IRWXU;
902 umode_t group_mask = S_IRWXG;
903 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
905 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
906 GFP_KERNEL);
907 if (!ppace) {
908 cERROR(1, "DACL memory allocation error");
909 return;
912 for (i = 0; i < num_aces; ++i) {
913 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
914 #ifdef CONFIG_CIFS_DEBUG2
915 dump_ace(ppace[i], end_of_acl);
916 #endif
917 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
918 access_flags_to_mode(ppace[i]->access_req,
919 ppace[i]->type,
920 &fattr->cf_mode,
921 &user_mask);
922 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
923 access_flags_to_mode(ppace[i]->access_req,
924 ppace[i]->type,
925 &fattr->cf_mode,
926 &group_mask);
927 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
928 access_flags_to_mode(ppace[i]->access_req,
929 ppace[i]->type,
930 &fattr->cf_mode,
931 &other_mask);
932 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
933 access_flags_to_mode(ppace[i]->access_req,
934 ppace[i]->type,
935 &fattr->cf_mode,
936 &other_mask);
939 /* memcpy((void *)(&(cifscred->aces[i])),
940 (void *)ppace[i],
941 sizeof(struct cifs_ace)); */
943 acl_base = (char *)ppace[i];
944 acl_size = le16_to_cpu(ppace[i]->size);
947 kfree(ppace);
950 return;
954 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
955 struct cifs_sid *pgrpsid, __u64 nmode)
957 u16 size = 0;
958 struct cifs_acl *pnndacl;
960 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
962 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
963 pownersid, nmode, S_IRWXU);
964 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
965 pgrpsid, nmode, S_IRWXG);
966 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
967 &sid_everyone, nmode, S_IRWXO);
969 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
970 pndacl->num_aces = cpu_to_le32(3);
972 return 0;
976 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
978 /* BB need to add parm so we can store the SID BB */
980 /* validate that we do not go past end of ACL - sid must be at least 8
981 bytes long (assuming no sub-auths - e.g. the null SID */
982 if (end_of_acl < (char *)psid + 8) {
983 cERROR(1, "ACL too small to parse SID %p", psid);
984 return -EINVAL;
987 if (psid->num_subauth) {
988 #ifdef CONFIG_CIFS_DEBUG2
989 int i;
990 cFYI(1, "SID revision %d num_auth %d",
991 psid->revision, psid->num_subauth);
993 for (i = 0; i < psid->num_subauth; i++) {
994 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
995 le32_to_cpu(psid->sub_auth[i]));
998 /* BB add length check to make sure that we do not have huge
999 num auths and therefore go off the end */
1000 cFYI(1, "RID 0x%x",
1001 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1002 #endif
1005 return 0;
1009 /* Convert CIFS ACL to POSIX form */
1010 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1011 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1013 int rc = 0;
1014 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1015 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1016 char *end_of_acl = ((char *)pntsd) + acl_len;
1017 __u32 dacloffset;
1019 if (pntsd == NULL)
1020 return -EIO;
1022 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1023 le32_to_cpu(pntsd->osidoffset));
1024 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1025 le32_to_cpu(pntsd->gsidoffset));
1026 dacloffset = le32_to_cpu(pntsd->dacloffset);
1027 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1028 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1029 "sacloffset 0x%x dacloffset 0x%x",
1030 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1031 le32_to_cpu(pntsd->gsidoffset),
1032 le32_to_cpu(pntsd->sacloffset), dacloffset);
1033 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1034 rc = parse_sid(owner_sid_ptr, end_of_acl);
1035 if (rc) {
1036 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1037 return rc;
1039 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1040 if (rc) {
1041 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1042 return rc;
1045 rc = parse_sid(group_sid_ptr, end_of_acl);
1046 if (rc) {
1047 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1048 return rc;
1050 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1051 if (rc) {
1052 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1053 return rc;
1056 if (dacloffset)
1057 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1058 group_sid_ptr, fattr);
1059 else
1060 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1062 return rc;
1065 /* Convert permission bits from mode to equivalent CIFS ACL */
1066 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1067 __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1069 int rc = 0;
1070 __u32 dacloffset;
1071 __u32 ndacloffset;
1072 __u32 sidsoffset;
1073 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1074 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1075 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
1076 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1078 if (nmode != NO_CHANGE_64) { /* chmod */
1079 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1080 le32_to_cpu(pntsd->osidoffset));
1081 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1082 le32_to_cpu(pntsd->gsidoffset));
1083 dacloffset = le32_to_cpu(pntsd->dacloffset);
1084 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1085 ndacloffset = sizeof(struct cifs_ntsd);
1086 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1087 ndacl_ptr->revision = dacl_ptr->revision;
1088 ndacl_ptr->size = 0;
1089 ndacl_ptr->num_aces = 0;
1091 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1092 nmode);
1093 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1094 /* copy sec desc control portion & owner and group sids */
1095 copy_sec_desc(pntsd, pnntsd, sidsoffset);
1096 *aclflag = CIFS_ACL_DACL;
1097 } else {
1098 memcpy(pnntsd, pntsd, secdesclen);
1099 if (uid != NO_CHANGE_32) { /* chown */
1100 owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1101 le32_to_cpu(pnntsd->osidoffset));
1102 nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1103 GFP_KERNEL);
1104 if (!nowner_sid_ptr)
1105 return -ENOMEM;
1106 rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1107 if (rc) {
1108 cFYI(1, "%s: Mapping error %d for owner id %d",
1109 __func__, rc, uid);
1110 kfree(nowner_sid_ptr);
1111 return rc;
1113 cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
1114 kfree(nowner_sid_ptr);
1115 *aclflag = CIFS_ACL_OWNER;
1117 if (gid != NO_CHANGE_32) { /* chgrp */
1118 group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1119 le32_to_cpu(pnntsd->gsidoffset));
1120 ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1121 GFP_KERNEL);
1122 if (!ngroup_sid_ptr)
1123 return -ENOMEM;
1124 rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1125 if (rc) {
1126 cFYI(1, "%s: Mapping error %d for group id %d",
1127 __func__, rc, gid);
1128 kfree(ngroup_sid_ptr);
1129 return rc;
1131 cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
1132 kfree(ngroup_sid_ptr);
1133 *aclflag = CIFS_ACL_GROUP;
1137 return rc;
1140 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1141 __u16 fid, u32 *pacllen)
1143 struct cifs_ntsd *pntsd = NULL;
1144 int xid, rc;
1145 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1147 if (IS_ERR(tlink))
1148 return ERR_CAST(tlink);
1150 xid = GetXid();
1151 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1152 FreeXid(xid);
1154 cifs_put_tlink(tlink);
1156 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1157 if (rc)
1158 return ERR_PTR(rc);
1159 return pntsd;
1162 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1163 const char *path, u32 *pacllen)
1165 struct cifs_ntsd *pntsd = NULL;
1166 int oplock = 0;
1167 int xid, rc, create_options = 0;
1168 __u16 fid;
1169 struct cifs_tcon *tcon;
1170 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1172 if (IS_ERR(tlink))
1173 return ERR_CAST(tlink);
1175 tcon = tlink_tcon(tlink);
1176 xid = GetXid();
1178 if (backup_cred(cifs_sb))
1179 create_options |= CREATE_OPEN_BACKUP_INTENT;
1181 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1182 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1183 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1184 if (!rc) {
1185 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1186 CIFSSMBClose(xid, tcon, fid);
1189 cifs_put_tlink(tlink);
1190 FreeXid(xid);
1192 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1193 if (rc)
1194 return ERR_PTR(rc);
1195 return pntsd;
1198 /* Retrieve an ACL from the server */
1199 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1200 struct inode *inode, const char *path,
1201 u32 *pacllen)
1203 struct cifs_ntsd *pntsd = NULL;
1204 struct cifsFileInfo *open_file = NULL;
1206 if (inode)
1207 open_file = find_readable_file(CIFS_I(inode), true);
1208 if (!open_file)
1209 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1211 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
1212 cifsFileInfo_put(open_file);
1213 return pntsd;
1216 /* Set an ACL on the server */
1217 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1218 struct inode *inode, const char *path, int aclflag)
1220 int oplock = 0;
1221 int xid, rc, access_flags, create_options = 0;
1222 __u16 fid;
1223 struct cifs_tcon *tcon;
1224 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1225 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1227 if (IS_ERR(tlink))
1228 return PTR_ERR(tlink);
1230 tcon = tlink_tcon(tlink);
1231 xid = GetXid();
1233 if (backup_cred(cifs_sb))
1234 create_options |= CREATE_OPEN_BACKUP_INTENT;
1236 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1237 access_flags = WRITE_OWNER;
1238 else
1239 access_flags = WRITE_DAC;
1241 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1242 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1243 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1244 if (rc) {
1245 cERROR(1, "Unable to open file to set ACL");
1246 goto out;
1249 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1250 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1252 CIFSSMBClose(xid, tcon, fid);
1253 out:
1254 FreeXid(xid);
1255 cifs_put_tlink(tlink);
1256 return rc;
1259 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1261 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1262 struct inode *inode, const char *path, const __u16 *pfid)
1264 struct cifs_ntsd *pntsd = NULL;
1265 u32 acllen = 0;
1266 int rc = 0;
1268 cFYI(DBG2, "converting ACL to mode for %s", path);
1270 if (pfid)
1271 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1272 else
1273 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1275 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1276 if (IS_ERR(pntsd)) {
1277 rc = PTR_ERR(pntsd);
1278 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1279 } else {
1280 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1281 kfree(pntsd);
1282 if (rc)
1283 cERROR(1, "parse sec desc failed rc = %d", rc);
1286 return rc;
1289 /* Convert mode bits to an ACL so we can update the ACL on the server */
1291 id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1292 uid_t uid, gid_t gid)
1294 int rc = 0;
1295 int aclflag = CIFS_ACL_DACL; /* default flag to set */
1296 __u32 secdesclen = 0;
1297 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1298 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1300 cFYI(DBG2, "set ACL from mode for %s", path);
1302 /* Get the security descriptor */
1303 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1305 /* Add three ACEs for owner, group, everyone getting rid of
1306 other ACEs as chmod disables ACEs and set the security descriptor */
1308 if (IS_ERR(pntsd)) {
1309 rc = PTR_ERR(pntsd);
1310 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1311 } else {
1312 /* allocate memory for the smb header,
1313 set security descriptor request security descriptor
1314 parameters, and secuirty descriptor itself */
1316 secdesclen = secdesclen < DEFSECDESCLEN ?
1317 DEFSECDESCLEN : secdesclen;
1318 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1319 if (!pnntsd) {
1320 cERROR(1, "Unable to allocate security descriptor");
1321 kfree(pntsd);
1322 return -ENOMEM;
1325 rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1326 &aclflag);
1328 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1330 if (!rc) {
1331 /* Set the security descriptor */
1332 rc = set_cifs_acl(pnntsd, secdesclen, inode,
1333 path, aclflag);
1334 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1337 kfree(pnntsd);
1338 kfree(pntsd);
1341 return rc;