uio: documentation fixups
[zen-stable.git] / fs / cifs / cifsacl.c
blob72ddf23ef6f7d77145dbe765732633ff8e588076
1 /*
2 * fs/cifs/cifsacl.c
4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
47 static void
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 int *nr_del)
51 struct rb_node *node;
52 struct rb_node *tmp;
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
56 while (node) {
57 tmp = node;
58 node = rb_next(tmp);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 ++(*nr_rem);
62 else {
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
65 rb_erase(tmp, root);
66 ++(*nr_del);
67 } else
68 ++(*nr_rem);
74 * Run idmap cache shrinker.
76 static int
77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
79 int nr_to_scan = sc->nr_to_scan;
80 int nr_del = 0;
81 int nr_rem = 0;
82 struct rb_root *root;
84 root = &uidtree;
85 spin_lock(&siduidlock);
86 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 spin_unlock(&siduidlock);
89 root = &gidtree;
90 spin_lock(&sidgidlock);
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock);
94 root = &siduidtree;
95 spin_lock(&uidsidlock);
96 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 spin_unlock(&uidsidlock);
99 root = &sidgidtree;
100 spin_lock(&gidsidlock);
101 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 spin_unlock(&gidsidlock);
104 return nr_rem;
107 static void
108 sid_rb_insert(struct rb_root *root, unsigned long cid,
109 struct cifs_sid_id **psidid, char *typestr)
111 char *strptr;
112 struct rb_node *node = root->rb_node;
113 struct rb_node *parent = NULL;
114 struct rb_node **linkto = &(root->rb_node);
115 struct cifs_sid_id *lsidid;
117 while (node) {
118 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
119 parent = node;
120 if (cid > lsidid->id) {
121 linkto = &(node->rb_left);
122 node = node->rb_left;
124 if (cid < lsidid->id) {
125 linkto = &(node->rb_right);
126 node = node->rb_right;
130 (*psidid)->id = cid;
131 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 (*psidid)->refcount = 0;
134 sprintf((*psidid)->sidstr, "%s", typestr);
135 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 sprintf(strptr, "%ld", cid);
138 clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
141 rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 rb_insert_color(&(*psidid)->rbnode, root);
145 static struct cifs_sid_id *
146 sid_rb_search(struct rb_root *root, unsigned long cid)
148 struct rb_node *node = root->rb_node;
149 struct cifs_sid_id *lsidid;
151 while (node) {
152 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 if (cid > lsidid->id)
154 node = node->rb_left;
155 else if (cid < lsidid->id)
156 node = node->rb_right;
157 else /* node found */
158 return lsidid;
161 return NULL;
164 static struct shrinker cifs_shrinker = {
165 .shrink = cifs_idmap_shrinker,
166 .seeks = DEFAULT_SEEKS,
169 static int
170 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
172 char *payload;
174 payload = kmalloc(datalen, GFP_KERNEL);
175 if (!payload)
176 return -ENOMEM;
178 memcpy(payload, data, datalen);
179 key->payload.data = payload;
180 key->datalen = datalen;
181 return 0;
184 static inline void
185 cifs_idmap_key_destroy(struct key *key)
187 kfree(key->payload.data);
190 struct key_type cifs_idmap_key_type = {
191 .name = "cifs.idmap",
192 .instantiate = cifs_idmap_key_instantiate,
193 .destroy = cifs_idmap_key_destroy,
194 .describe = user_describe,
195 .match = user_match,
198 static void
199 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
201 int i;
202 unsigned long saval;
203 char *strptr;
205 strptr = sidstr;
207 sprintf(strptr, "%s", "S");
208 strptr = sidstr + strlen(sidstr);
210 sprintf(strptr, "-%d", sidptr->revision);
211 strptr = sidstr + strlen(sidstr);
213 for (i = 0; i < 6; ++i) {
214 if (sidptr->authority[i]) {
215 sprintf(strptr, "-%d", sidptr->authority[i]);
216 strptr = sidstr + strlen(sidstr);
220 for (i = 0; i < sidptr->num_subauth; ++i) {
221 saval = le32_to_cpu(sidptr->sub_auth[i]);
222 sprintf(strptr, "-%ld", saval);
223 strptr = sidstr + strlen(sidstr);
227 static void
228 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
229 struct cifs_sid_id **psidid, char *typestr)
231 int rc;
232 char *strptr;
233 struct rb_node *node = root->rb_node;
234 struct rb_node *parent = NULL;
235 struct rb_node **linkto = &(root->rb_node);
236 struct cifs_sid_id *lsidid;
238 while (node) {
239 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
240 parent = node;
241 rc = compare_sids(sidptr, &((lsidid)->sid));
242 if (rc > 0) {
243 linkto = &(node->rb_left);
244 node = node->rb_left;
245 } else if (rc < 0) {
246 linkto = &(node->rb_right);
247 node = node->rb_right;
251 memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
252 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
253 (*psidid)->refcount = 0;
255 sprintf((*psidid)->sidstr, "%s", typestr);
256 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
257 sid_to_str(&(*psidid)->sid, strptr);
259 clear_bit(SID_ID_PENDING, &(*psidid)->state);
260 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
262 rb_link_node(&(*psidid)->rbnode, parent, linkto);
263 rb_insert_color(&(*psidid)->rbnode, root);
266 static struct cifs_sid_id *
267 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
269 int rc;
270 struct rb_node *node = root->rb_node;
271 struct cifs_sid_id *lsidid;
273 while (node) {
274 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
275 rc = compare_sids(sidptr, &((lsidid)->sid));
276 if (rc > 0) {
277 node = node->rb_left;
278 } else if (rc < 0) {
279 node = node->rb_right;
280 } else /* node found */
281 return lsidid;
284 return NULL;
287 static int
288 sidid_pending_wait(void *unused)
290 schedule();
291 return signal_pending(current) ? -ERESTARTSYS : 0;
294 static int
295 id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
297 int rc = 0;
298 struct key *sidkey;
299 const struct cred *saved_cred;
300 struct cifs_sid *lsid;
301 struct cifs_sid_id *psidid, *npsidid;
302 struct rb_root *cidtree;
303 spinlock_t *cidlock;
305 if (sidtype == SIDOWNER) {
306 cidlock = &siduidlock;
307 cidtree = &uidtree;
308 } else if (sidtype == SIDGROUP) {
309 cidlock = &sidgidlock;
310 cidtree = &gidtree;
311 } else
312 return -EINVAL;
314 spin_lock(cidlock);
315 psidid = sid_rb_search(cidtree, cid);
317 if (!psidid) { /* node does not exist, allocate one & attempt adding */
318 spin_unlock(cidlock);
319 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
320 if (!npsidid)
321 return -ENOMEM;
323 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
324 if (!npsidid->sidstr) {
325 kfree(npsidid);
326 return -ENOMEM;
329 spin_lock(cidlock);
330 psidid = sid_rb_search(cidtree, cid);
331 if (psidid) { /* node happened to get inserted meanwhile */
332 ++psidid->refcount;
333 spin_unlock(cidlock);
334 kfree(npsidid->sidstr);
335 kfree(npsidid);
336 } else {
337 psidid = npsidid;
338 sid_rb_insert(cidtree, cid, &psidid,
339 sidtype == SIDOWNER ? "oi:" : "gi:");
340 ++psidid->refcount;
341 spin_unlock(cidlock);
343 } else {
344 ++psidid->refcount;
345 spin_unlock(cidlock);
349 * If we are here, it is safe to access psidid and its fields
350 * since a reference was taken earlier while holding the spinlock.
351 * A reference on the node is put without holding the spinlock
352 * and it is OK to do so in this case, shrinker will not erase
353 * this node until all references are put and we do not access
354 * any fields of the node after a reference is put .
356 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
357 memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
358 psidid->time = jiffies; /* update ts for accessing */
359 goto id_sid_out;
362 if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
363 rc = -EINVAL;
364 goto id_sid_out;
367 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
368 saved_cred = override_creds(root_cred);
369 sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
370 if (IS_ERR(sidkey)) {
371 rc = -EINVAL;
372 cFYI(1, "%s: Can't map and id to a SID", __func__);
373 } else {
374 lsid = (struct cifs_sid *)sidkey->payload.data;
375 memcpy(&psidid->sid, lsid,
376 sidkey->datalen < sizeof(struct cifs_sid) ?
377 sidkey->datalen : sizeof(struct cifs_sid));
378 memcpy(ssid, &psidid->sid,
379 sidkey->datalen < sizeof(struct cifs_sid) ?
380 sidkey->datalen : sizeof(struct cifs_sid));
381 set_bit(SID_ID_MAPPED, &psidid->state);
382 key_put(sidkey);
383 kfree(psidid->sidstr);
385 psidid->time = jiffies; /* update ts for accessing */
386 revert_creds(saved_cred);
387 clear_bit(SID_ID_PENDING, &psidid->state);
388 wake_up_bit(&psidid->state, SID_ID_PENDING);
389 } else {
390 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
391 sidid_pending_wait, TASK_INTERRUPTIBLE);
392 if (rc) {
393 cFYI(1, "%s: sidid_pending_wait interrupted %d",
394 __func__, rc);
395 --psidid->refcount;
396 return rc;
398 if (test_bit(SID_ID_MAPPED, &psidid->state))
399 memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
400 else
401 rc = -EINVAL;
403 id_sid_out:
404 --psidid->refcount;
405 return rc;
408 static int
409 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
410 struct cifs_fattr *fattr, uint sidtype)
412 int rc;
413 unsigned long cid;
414 struct key *idkey;
415 const struct cred *saved_cred;
416 struct cifs_sid_id *psidid, *npsidid;
417 struct rb_root *cidtree;
418 spinlock_t *cidlock;
420 if (sidtype == SIDOWNER) {
421 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
422 cidlock = &siduidlock;
423 cidtree = &uidtree;
424 } else if (sidtype == SIDGROUP) {
425 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
426 cidlock = &sidgidlock;
427 cidtree = &gidtree;
428 } else
429 return -ENOENT;
431 spin_lock(cidlock);
432 psidid = id_rb_search(cidtree, psid);
434 if (!psidid) { /* node does not exist, allocate one & attempt adding */
435 spin_unlock(cidlock);
436 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
437 if (!npsidid)
438 return -ENOMEM;
440 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
441 if (!npsidid->sidstr) {
442 kfree(npsidid);
443 return -ENOMEM;
446 spin_lock(cidlock);
447 psidid = id_rb_search(cidtree, psid);
448 if (psidid) { /* node happened to get inserted meanwhile */
449 ++psidid->refcount;
450 spin_unlock(cidlock);
451 kfree(npsidid->sidstr);
452 kfree(npsidid);
453 } else {
454 psidid = npsidid;
455 id_rb_insert(cidtree, psid, &psidid,
456 sidtype == SIDOWNER ? "os:" : "gs:");
457 ++psidid->refcount;
458 spin_unlock(cidlock);
460 } else {
461 ++psidid->refcount;
462 spin_unlock(cidlock);
466 * If we are here, it is safe to access psidid and its fields
467 * since a reference was taken earlier while holding the spinlock.
468 * A reference on the node is put without holding the spinlock
469 * and it is OK to do so in this case, shrinker will not erase
470 * this node until all references are put and we do not access
471 * any fields of the node after a reference is put .
473 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
474 cid = psidid->id;
475 psidid->time = jiffies; /* update ts for accessing */
476 goto sid_to_id_out;
479 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
480 goto sid_to_id_out;
482 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
483 saved_cred = override_creds(root_cred);
484 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
485 if (IS_ERR(idkey))
486 cFYI(1, "%s: Can't map SID to an id", __func__);
487 else {
488 cid = *(unsigned long *)idkey->payload.value;
489 psidid->id = cid;
490 set_bit(SID_ID_MAPPED, &psidid->state);
491 key_put(idkey);
492 kfree(psidid->sidstr);
494 revert_creds(saved_cred);
495 psidid->time = jiffies; /* update ts for accessing */
496 clear_bit(SID_ID_PENDING, &psidid->state);
497 wake_up_bit(&psidid->state, SID_ID_PENDING);
498 } else {
499 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
500 sidid_pending_wait, TASK_INTERRUPTIBLE);
501 if (rc) {
502 cFYI(1, "%s: sidid_pending_wait interrupted %d",
503 __func__, rc);
504 --psidid->refcount; /* decremented without spinlock */
505 return rc;
507 if (test_bit(SID_ID_MAPPED, &psidid->state))
508 cid = psidid->id;
511 sid_to_id_out:
512 --psidid->refcount; /* decremented without spinlock */
513 if (sidtype == SIDOWNER)
514 fattr->cf_uid = cid;
515 else
516 fattr->cf_gid = cid;
518 return 0;
522 init_cifs_idmap(void)
524 struct cred *cred;
525 struct key *keyring;
526 int ret;
528 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
530 /* create an override credential set with a special thread keyring in
531 * which requests are cached
533 * this is used to prevent malicious redirections from being installed
534 * with add_key().
536 cred = prepare_kernel_cred(NULL);
537 if (!cred)
538 return -ENOMEM;
540 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
541 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
542 KEY_USR_VIEW | KEY_USR_READ,
543 KEY_ALLOC_NOT_IN_QUOTA);
544 if (IS_ERR(keyring)) {
545 ret = PTR_ERR(keyring);
546 goto failed_put_cred;
549 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
550 if (ret < 0)
551 goto failed_put_key;
553 ret = register_key_type(&cifs_idmap_key_type);
554 if (ret < 0)
555 goto failed_put_key;
557 /* instruct request_key() to use this special keyring as a cache for
558 * the results it looks up */
559 cred->thread_keyring = keyring;
560 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
561 root_cred = cred;
563 spin_lock_init(&siduidlock);
564 uidtree = RB_ROOT;
565 spin_lock_init(&sidgidlock);
566 gidtree = RB_ROOT;
568 spin_lock_init(&uidsidlock);
569 siduidtree = RB_ROOT;
570 spin_lock_init(&gidsidlock);
571 sidgidtree = RB_ROOT;
572 register_shrinker(&cifs_shrinker);
574 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
575 return 0;
577 failed_put_key:
578 key_put(keyring);
579 failed_put_cred:
580 put_cred(cred);
581 return ret;
584 void
585 exit_cifs_idmap(void)
587 key_revoke(root_cred->thread_keyring);
588 unregister_key_type(&cifs_idmap_key_type);
589 put_cred(root_cred);
590 unregister_shrinker(&cifs_shrinker);
591 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
594 void
595 cifs_destroy_idmaptrees(void)
597 struct rb_root *root;
598 struct rb_node *node;
600 root = &uidtree;
601 spin_lock(&siduidlock);
602 while ((node = rb_first(root)))
603 rb_erase(node, root);
604 spin_unlock(&siduidlock);
606 root = &gidtree;
607 spin_lock(&sidgidlock);
608 while ((node = rb_first(root)))
609 rb_erase(node, root);
610 spin_unlock(&sidgidlock);
612 root = &siduidtree;
613 spin_lock(&uidsidlock);
614 while ((node = rb_first(root)))
615 rb_erase(node, root);
616 spin_unlock(&uidsidlock);
618 root = &sidgidtree;
619 spin_lock(&gidsidlock);
620 while ((node = rb_first(root)))
621 rb_erase(node, root);
622 spin_unlock(&gidsidlock);
625 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
626 the same returns 1, if they do not match returns 0 */
627 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
629 int i;
630 int num_subauth, num_sat, num_saw;
632 if ((!ctsid) || (!cwsid))
633 return 1;
635 /* compare the revision */
636 if (ctsid->revision != cwsid->revision) {
637 if (ctsid->revision > cwsid->revision)
638 return 1;
639 else
640 return -1;
643 /* compare all of the six auth values */
644 for (i = 0; i < 6; ++i) {
645 if (ctsid->authority[i] != cwsid->authority[i]) {
646 if (ctsid->authority[i] > cwsid->authority[i])
647 return 1;
648 else
649 return -1;
653 /* compare all of the subauth values if any */
654 num_sat = ctsid->num_subauth;
655 num_saw = cwsid->num_subauth;
656 num_subauth = num_sat < num_saw ? num_sat : num_saw;
657 if (num_subauth) {
658 for (i = 0; i < num_subauth; ++i) {
659 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
660 if (le32_to_cpu(ctsid->sub_auth[i]) >
661 le32_to_cpu(cwsid->sub_auth[i]))
662 return 1;
663 else
664 return -1;
669 return 0; /* sids compare/match */
673 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
674 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
675 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
677 int i;
679 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
680 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
682 /* copy security descriptor control portion */
683 pnntsd->revision = pntsd->revision;
684 pnntsd->type = pntsd->type;
685 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
686 pnntsd->sacloffset = 0;
687 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
688 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
690 /* copy owner sid */
691 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
692 le32_to_cpu(pntsd->osidoffset));
693 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
695 nowner_sid_ptr->revision = owner_sid_ptr->revision;
696 nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
697 for (i = 0; i < 6; i++)
698 nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
699 for (i = 0; i < 5; i++)
700 nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
702 /* copy group sid */
703 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
704 le32_to_cpu(pntsd->gsidoffset));
705 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
706 sizeof(struct cifs_sid));
708 ngroup_sid_ptr->revision = group_sid_ptr->revision;
709 ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
710 for (i = 0; i < 6; i++)
711 ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
712 for (i = 0; i < 5; i++)
713 ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
715 return;
720 change posix mode to reflect permissions
721 pmode is the existing mode (we only want to overwrite part of this
722 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
724 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
725 umode_t *pbits_to_set)
727 __u32 flags = le32_to_cpu(ace_flags);
728 /* the order of ACEs is important. The canonical order is to begin with
729 DENY entries followed by ALLOW, otherwise an allow entry could be
730 encountered first, making the subsequent deny entry like "dead code"
731 which would be superflous since Windows stops when a match is made
732 for the operation you are trying to perform for your user */
734 /* For deny ACEs we change the mask so that subsequent allow access
735 control entries do not turn on the bits we are denying */
736 if (type == ACCESS_DENIED) {
737 if (flags & GENERIC_ALL)
738 *pbits_to_set &= ~S_IRWXUGO;
740 if ((flags & GENERIC_WRITE) ||
741 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
742 *pbits_to_set &= ~S_IWUGO;
743 if ((flags & GENERIC_READ) ||
744 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
745 *pbits_to_set &= ~S_IRUGO;
746 if ((flags & GENERIC_EXECUTE) ||
747 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
748 *pbits_to_set &= ~S_IXUGO;
749 return;
750 } else if (type != ACCESS_ALLOWED) {
751 cERROR(1, "unknown access control type %d", type);
752 return;
754 /* else ACCESS_ALLOWED type */
756 if (flags & GENERIC_ALL) {
757 *pmode |= (S_IRWXUGO & (*pbits_to_set));
758 cFYI(DBG2, "all perms");
759 return;
761 if ((flags & GENERIC_WRITE) ||
762 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
763 *pmode |= (S_IWUGO & (*pbits_to_set));
764 if ((flags & GENERIC_READ) ||
765 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
766 *pmode |= (S_IRUGO & (*pbits_to_set));
767 if ((flags & GENERIC_EXECUTE) ||
768 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
769 *pmode |= (S_IXUGO & (*pbits_to_set));
771 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
772 return;
776 Generate access flags to reflect permissions mode is the existing mode.
777 This function is called for every ACE in the DACL whose SID matches
778 with either owner or group or everyone.
781 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
782 __u32 *pace_flags)
784 /* reset access mask */
785 *pace_flags = 0x0;
787 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
788 mode &= bits_to_use;
790 /* check for R/W/X UGO since we do not know whose flags
791 is this but we have cleared all the bits sans RWX for
792 either user or group or other as per bits_to_use */
793 if (mode & S_IRUGO)
794 *pace_flags |= SET_FILE_READ_RIGHTS;
795 if (mode & S_IWUGO)
796 *pace_flags |= SET_FILE_WRITE_RIGHTS;
797 if (mode & S_IXUGO)
798 *pace_flags |= SET_FILE_EXEC_RIGHTS;
800 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
801 return;
804 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
805 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
807 int i;
808 __u16 size = 0;
809 __u32 access_req = 0;
811 pntace->type = ACCESS_ALLOWED;
812 pntace->flags = 0x0;
813 mode_to_access_flags(nmode, bits, &access_req);
814 if (!access_req)
815 access_req = SET_MINIMUM_RIGHTS;
816 pntace->access_req = cpu_to_le32(access_req);
818 pntace->sid.revision = psid->revision;
819 pntace->sid.num_subauth = psid->num_subauth;
820 for (i = 0; i < 6; i++)
821 pntace->sid.authority[i] = psid->authority[i];
822 for (i = 0; i < psid->num_subauth; i++)
823 pntace->sid.sub_auth[i] = psid->sub_auth[i];
825 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
826 pntace->size = cpu_to_le16(size);
828 return size;
832 #ifdef CONFIG_CIFS_DEBUG2
833 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
835 int num_subauth;
837 /* validate that we do not go past end of acl */
839 if (le16_to_cpu(pace->size) < 16) {
840 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
841 return;
844 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
845 cERROR(1, "ACL too small to parse ACE");
846 return;
849 num_subauth = pace->sid.num_subauth;
850 if (num_subauth) {
851 int i;
852 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
853 pace->sid.revision, pace->sid.num_subauth, pace->type,
854 pace->flags, le16_to_cpu(pace->size));
855 for (i = 0; i < num_subauth; ++i) {
856 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
857 le32_to_cpu(pace->sid.sub_auth[i]));
860 /* BB add length check to make sure that we do not have huge
861 num auths and therefore go off the end */
864 return;
866 #endif
869 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
870 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
871 struct cifs_fattr *fattr)
873 int i;
874 int num_aces = 0;
875 int acl_size;
876 char *acl_base;
877 struct cifs_ace **ppace;
879 /* BB need to add parm so we can store the SID BB */
881 if (!pdacl) {
882 /* no DACL in the security descriptor, set
883 all the permissions for user/group/other */
884 fattr->cf_mode |= S_IRWXUGO;
885 return;
888 /* validate that we do not go past end of acl */
889 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
890 cERROR(1, "ACL too small to parse DACL");
891 return;
894 cFYI(DBG2, "DACL revision %d size %d num aces %d",
895 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
896 le32_to_cpu(pdacl->num_aces));
898 /* reset rwx permissions for user/group/other.
899 Also, if num_aces is 0 i.e. DACL has no ACEs,
900 user/group/other have no permissions */
901 fattr->cf_mode &= ~(S_IRWXUGO);
903 acl_base = (char *)pdacl;
904 acl_size = sizeof(struct cifs_acl);
906 num_aces = le32_to_cpu(pdacl->num_aces);
907 if (num_aces > 0) {
908 umode_t user_mask = S_IRWXU;
909 umode_t group_mask = S_IRWXG;
910 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
912 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
913 GFP_KERNEL);
914 if (!ppace) {
915 cERROR(1, "DACL memory allocation error");
916 return;
919 for (i = 0; i < num_aces; ++i) {
920 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
921 #ifdef CONFIG_CIFS_DEBUG2
922 dump_ace(ppace[i], end_of_acl);
923 #endif
924 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
925 access_flags_to_mode(ppace[i]->access_req,
926 ppace[i]->type,
927 &fattr->cf_mode,
928 &user_mask);
929 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
930 access_flags_to_mode(ppace[i]->access_req,
931 ppace[i]->type,
932 &fattr->cf_mode,
933 &group_mask);
934 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
935 access_flags_to_mode(ppace[i]->access_req,
936 ppace[i]->type,
937 &fattr->cf_mode,
938 &other_mask);
939 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
940 access_flags_to_mode(ppace[i]->access_req,
941 ppace[i]->type,
942 &fattr->cf_mode,
943 &other_mask);
946 /* memcpy((void *)(&(cifscred->aces[i])),
947 (void *)ppace[i],
948 sizeof(struct cifs_ace)); */
950 acl_base = (char *)ppace[i];
951 acl_size = le16_to_cpu(ppace[i]->size);
954 kfree(ppace);
957 return;
961 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
962 struct cifs_sid *pgrpsid, __u64 nmode)
964 u16 size = 0;
965 struct cifs_acl *pnndacl;
967 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
969 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
970 pownersid, nmode, S_IRWXU);
971 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
972 pgrpsid, nmode, S_IRWXG);
973 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
974 &sid_everyone, nmode, S_IRWXO);
976 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
977 pndacl->num_aces = cpu_to_le32(3);
979 return 0;
983 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
985 /* BB need to add parm so we can store the SID BB */
987 /* validate that we do not go past end of ACL - sid must be at least 8
988 bytes long (assuming no sub-auths - e.g. the null SID */
989 if (end_of_acl < (char *)psid + 8) {
990 cERROR(1, "ACL too small to parse SID %p", psid);
991 return -EINVAL;
994 if (psid->num_subauth) {
995 #ifdef CONFIG_CIFS_DEBUG2
996 int i;
997 cFYI(1, "SID revision %d num_auth %d",
998 psid->revision, psid->num_subauth);
1000 for (i = 0; i < psid->num_subauth; i++) {
1001 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
1002 le32_to_cpu(psid->sub_auth[i]));
1005 /* BB add length check to make sure that we do not have huge
1006 num auths and therefore go off the end */
1007 cFYI(1, "RID 0x%x",
1008 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1009 #endif
1012 return 0;
1016 /* Convert CIFS ACL to POSIX form */
1017 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1018 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1020 int rc = 0;
1021 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1022 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1023 char *end_of_acl = ((char *)pntsd) + acl_len;
1024 __u32 dacloffset;
1026 if (pntsd == NULL)
1027 return -EIO;
1029 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1030 le32_to_cpu(pntsd->osidoffset));
1031 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1032 le32_to_cpu(pntsd->gsidoffset));
1033 dacloffset = le32_to_cpu(pntsd->dacloffset);
1034 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1035 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1036 "sacloffset 0x%x dacloffset 0x%x",
1037 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1038 le32_to_cpu(pntsd->gsidoffset),
1039 le32_to_cpu(pntsd->sacloffset), dacloffset);
1040 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1041 rc = parse_sid(owner_sid_ptr, end_of_acl);
1042 if (rc) {
1043 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1044 return rc;
1046 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1047 if (rc) {
1048 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1049 return rc;
1052 rc = parse_sid(group_sid_ptr, end_of_acl);
1053 if (rc) {
1054 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1055 return rc;
1057 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1058 if (rc) {
1059 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1060 return rc;
1063 if (dacloffset)
1064 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1065 group_sid_ptr, fattr);
1066 else
1067 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1069 return rc;
1072 /* Convert permission bits from mode to equivalent CIFS ACL */
1073 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1074 __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1076 int rc = 0;
1077 __u32 dacloffset;
1078 __u32 ndacloffset;
1079 __u32 sidsoffset;
1080 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1081 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1082 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
1083 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1085 if (nmode != NO_CHANGE_64) { /* chmod */
1086 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1087 le32_to_cpu(pntsd->osidoffset));
1088 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1089 le32_to_cpu(pntsd->gsidoffset));
1090 dacloffset = le32_to_cpu(pntsd->dacloffset);
1091 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1092 ndacloffset = sizeof(struct cifs_ntsd);
1093 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1094 ndacl_ptr->revision = dacl_ptr->revision;
1095 ndacl_ptr->size = 0;
1096 ndacl_ptr->num_aces = 0;
1098 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1099 nmode);
1100 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1101 /* copy sec desc control portion & owner and group sids */
1102 copy_sec_desc(pntsd, pnntsd, sidsoffset);
1103 *aclflag = CIFS_ACL_DACL;
1104 } else {
1105 memcpy(pnntsd, pntsd, secdesclen);
1106 if (uid != NO_CHANGE_32) { /* chown */
1107 owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1108 le32_to_cpu(pnntsd->osidoffset));
1109 nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1110 GFP_KERNEL);
1111 if (!nowner_sid_ptr)
1112 return -ENOMEM;
1113 rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1114 if (rc) {
1115 cFYI(1, "%s: Mapping error %d for owner id %d",
1116 __func__, rc, uid);
1117 kfree(nowner_sid_ptr);
1118 return rc;
1120 memcpy(owner_sid_ptr, nowner_sid_ptr,
1121 sizeof(struct cifs_sid));
1122 kfree(nowner_sid_ptr);
1123 *aclflag = CIFS_ACL_OWNER;
1125 if (gid != NO_CHANGE_32) { /* chgrp */
1126 group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1127 le32_to_cpu(pnntsd->gsidoffset));
1128 ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1129 GFP_KERNEL);
1130 if (!ngroup_sid_ptr)
1131 return -ENOMEM;
1132 rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1133 if (rc) {
1134 cFYI(1, "%s: Mapping error %d for group id %d",
1135 __func__, rc, gid);
1136 kfree(ngroup_sid_ptr);
1137 return rc;
1139 memcpy(group_sid_ptr, ngroup_sid_ptr,
1140 sizeof(struct cifs_sid));
1141 kfree(ngroup_sid_ptr);
1142 *aclflag = CIFS_ACL_GROUP;
1146 return rc;
1149 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1150 __u16 fid, u32 *pacllen)
1152 struct cifs_ntsd *pntsd = NULL;
1153 int xid, rc;
1154 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1156 if (IS_ERR(tlink))
1157 return ERR_CAST(tlink);
1159 xid = GetXid();
1160 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1161 FreeXid(xid);
1163 cifs_put_tlink(tlink);
1165 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1166 if (rc)
1167 return ERR_PTR(rc);
1168 return pntsd;
1171 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1172 const char *path, u32 *pacllen)
1174 struct cifs_ntsd *pntsd = NULL;
1175 int oplock = 0;
1176 int xid, rc, create_options = 0;
1177 __u16 fid;
1178 struct cifs_tcon *tcon;
1179 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1181 if (IS_ERR(tlink))
1182 return ERR_CAST(tlink);
1184 tcon = tlink_tcon(tlink);
1185 xid = GetXid();
1187 if (backup_cred(cifs_sb))
1188 create_options |= CREATE_OPEN_BACKUP_INTENT;
1190 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1191 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1192 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1193 if (!rc) {
1194 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1195 CIFSSMBClose(xid, tcon, fid);
1198 cifs_put_tlink(tlink);
1199 FreeXid(xid);
1201 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1202 if (rc)
1203 return ERR_PTR(rc);
1204 return pntsd;
1207 /* Retrieve an ACL from the server */
1208 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1209 struct inode *inode, const char *path,
1210 u32 *pacllen)
1212 struct cifs_ntsd *pntsd = NULL;
1213 struct cifsFileInfo *open_file = NULL;
1215 if (inode)
1216 open_file = find_readable_file(CIFS_I(inode), true);
1217 if (!open_file)
1218 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1220 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
1221 cifsFileInfo_put(open_file);
1222 return pntsd;
1225 /* Set an ACL on the server */
1226 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1227 struct inode *inode, const char *path, int aclflag)
1229 int oplock = 0;
1230 int xid, rc, access_flags, create_options = 0;
1231 __u16 fid;
1232 struct cifs_tcon *tcon;
1233 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1234 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1236 if (IS_ERR(tlink))
1237 return PTR_ERR(tlink);
1239 tcon = tlink_tcon(tlink);
1240 xid = GetXid();
1242 if (backup_cred(cifs_sb))
1243 create_options |= CREATE_OPEN_BACKUP_INTENT;
1245 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1246 access_flags = WRITE_OWNER;
1247 else
1248 access_flags = WRITE_DAC;
1250 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1251 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1252 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1253 if (rc) {
1254 cERROR(1, "Unable to open file to set ACL");
1255 goto out;
1258 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1259 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1261 CIFSSMBClose(xid, tcon, fid);
1262 out:
1263 FreeXid(xid);
1264 cifs_put_tlink(tlink);
1265 return rc;
1268 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1270 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1271 struct inode *inode, const char *path, const __u16 *pfid)
1273 struct cifs_ntsd *pntsd = NULL;
1274 u32 acllen = 0;
1275 int rc = 0;
1277 cFYI(DBG2, "converting ACL to mode for %s", path);
1279 if (pfid)
1280 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1281 else
1282 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1284 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1285 if (IS_ERR(pntsd)) {
1286 rc = PTR_ERR(pntsd);
1287 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1288 } else {
1289 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1290 kfree(pntsd);
1291 if (rc)
1292 cERROR(1, "parse sec desc failed rc = %d", rc);
1295 return rc;
1298 /* Convert mode bits to an ACL so we can update the ACL on the server */
1300 id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1301 uid_t uid, gid_t gid)
1303 int rc = 0;
1304 int aclflag = CIFS_ACL_DACL; /* default flag to set */
1305 __u32 secdesclen = 0;
1306 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1307 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1309 cFYI(DBG2, "set ACL from mode for %s", path);
1311 /* Get the security descriptor */
1312 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1314 /* Add three ACEs for owner, group, everyone getting rid of
1315 other ACEs as chmod disables ACEs and set the security descriptor */
1317 if (IS_ERR(pntsd)) {
1318 rc = PTR_ERR(pntsd);
1319 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1320 } else {
1321 /* allocate memory for the smb header,
1322 set security descriptor request security descriptor
1323 parameters, and secuirty descriptor itself */
1325 secdesclen = secdesclen < DEFSECDESCLEN ?
1326 DEFSECDESCLEN : secdesclen;
1327 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1328 if (!pnntsd) {
1329 cERROR(1, "Unable to allocate security descriptor");
1330 kfree(pntsd);
1331 return -ENOMEM;
1334 rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1335 &aclflag);
1337 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1339 if (!rc) {
1340 /* Set the security descriptor */
1341 rc = set_cifs_acl(pnntsd, secdesclen, inode,
1342 path, aclflag);
1343 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1346 kfree(pnntsd);
1347 kfree(pntsd);
1350 return rc;