Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[pv_ops_mirror.git] / net / sunrpc / svcauth_unix.c
blob3c64051e455533aeb11df479f3c72877838e4483
1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
9 #include <linux/err.h>
10 #include <linux/seq_file.h>
11 #include <linux/hash.h>
12 #include <linux/string.h>
13 #include <net/sock.h>
15 #define RPCDBG_FACILITY RPCDBG_AUTH
19 * AUTHUNIX and AUTHNULL credentials are both handled here.
20 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
21 * are always nobody (-2). i.e. we do the same IP address checks for
22 * AUTHNULL as for AUTHUNIX, and that is done here.
26 struct unix_domain {
27 struct auth_domain h;
28 int addr_changes;
29 /* other stuff later */
32 extern struct auth_ops svcauth_unix;
34 struct auth_domain *unix_domain_find(char *name)
36 struct auth_domain *rv;
37 struct unix_domain *new = NULL;
39 rv = auth_domain_lookup(name, NULL);
40 while(1) {
41 if (rv) {
42 if (new && rv != &new->h)
43 auth_domain_put(&new->h);
45 if (rv->flavour != &svcauth_unix) {
46 auth_domain_put(rv);
47 return NULL;
49 return rv;
52 new = kmalloc(sizeof(*new), GFP_KERNEL);
53 if (new == NULL)
54 return NULL;
55 kref_init(&new->h.ref);
56 new->h.name = kstrdup(name, GFP_KERNEL);
57 if (new->h.name == NULL) {
58 kfree(new);
59 return NULL;
61 new->h.flavour = &svcauth_unix;
62 new->addr_changes = 0;
63 rv = auth_domain_lookup(name, &new->h);
66 EXPORT_SYMBOL(unix_domain_find);
68 static void svcauth_unix_domain_release(struct auth_domain *dom)
70 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
72 kfree(dom->name);
73 kfree(ud);
77 /**************************************************
78 * cache for IP address to unix_domain
79 * as needed by AUTH_UNIX
81 #define IP_HASHBITS 8
82 #define IP_HASHMAX (1<<IP_HASHBITS)
83 #define IP_HASHMASK (IP_HASHMAX-1)
85 struct ip_map {
86 struct cache_head h;
87 char m_class[8]; /* e.g. "nfsd" */
88 struct in_addr m_addr;
89 struct unix_domain *m_client;
90 int m_add_change;
92 static struct cache_head *ip_table[IP_HASHMAX];
94 static void ip_map_put(struct kref *kref)
96 struct cache_head *item = container_of(kref, struct cache_head, ref);
97 struct ip_map *im = container_of(item, struct ip_map,h);
99 if (test_bit(CACHE_VALID, &item->flags) &&
100 !test_bit(CACHE_NEGATIVE, &item->flags))
101 auth_domain_put(&im->m_client->h);
102 kfree(im);
105 #if IP_HASHBITS == 8
106 /* hash_long on a 64 bit machine is currently REALLY BAD for
107 * IP addresses in reverse-endian (i.e. on a little-endian machine).
108 * So use a trivial but reliable hash instead
110 static inline int hash_ip(__be32 ip)
112 int hash = (__force u32)ip ^ ((__force u32)ip>>16);
113 return (hash ^ (hash>>8)) & 0xff;
115 #endif
116 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
118 struct ip_map *orig = container_of(corig, struct ip_map, h);
119 struct ip_map *new = container_of(cnew, struct ip_map, h);
120 return strcmp(orig->m_class, new->m_class) == 0
121 && orig->m_addr.s_addr == new->m_addr.s_addr;
123 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
125 struct ip_map *new = container_of(cnew, struct ip_map, h);
126 struct ip_map *item = container_of(citem, struct ip_map, h);
128 strcpy(new->m_class, item->m_class);
129 new->m_addr.s_addr = item->m_addr.s_addr;
131 static void update(struct cache_head *cnew, struct cache_head *citem)
133 struct ip_map *new = container_of(cnew, struct ip_map, h);
134 struct ip_map *item = container_of(citem, struct ip_map, h);
136 kref_get(&item->m_client->h.ref);
137 new->m_client = item->m_client;
138 new->m_add_change = item->m_add_change;
140 static struct cache_head *ip_map_alloc(void)
142 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
143 if (i)
144 return &i->h;
145 else
146 return NULL;
149 static void ip_map_request(struct cache_detail *cd,
150 struct cache_head *h,
151 char **bpp, int *blen)
153 char text_addr[20];
154 struct ip_map *im = container_of(h, struct ip_map, h);
155 __be32 addr = im->m_addr.s_addr;
157 snprintf(text_addr, 20, "%u.%u.%u.%u",
158 ntohl(addr) >> 24 & 0xff,
159 ntohl(addr) >> 16 & 0xff,
160 ntohl(addr) >> 8 & 0xff,
161 ntohl(addr) >> 0 & 0xff);
163 qword_add(bpp, blen, im->m_class);
164 qword_add(bpp, blen, text_addr);
165 (*bpp)[-1] = '\n';
168 static struct ip_map *ip_map_lookup(char *class, struct in_addr addr);
169 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
171 static int ip_map_parse(struct cache_detail *cd,
172 char *mesg, int mlen)
174 /* class ipaddress [domainname] */
175 /* should be safe just to use the start of the input buffer
176 * for scratch: */
177 char *buf = mesg;
178 int len;
179 int b1,b2,b3,b4;
180 char c;
181 char class[8];
182 struct in_addr addr;
183 int err;
185 struct ip_map *ipmp;
186 struct auth_domain *dom;
187 time_t expiry;
189 if (mesg[mlen-1] != '\n')
190 return -EINVAL;
191 mesg[mlen-1] = 0;
193 /* class */
194 len = qword_get(&mesg, class, sizeof(class));
195 if (len <= 0) return -EINVAL;
197 /* ip address */
198 len = qword_get(&mesg, buf, mlen);
199 if (len <= 0) return -EINVAL;
201 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
202 return -EINVAL;
204 expiry = get_expiry(&mesg);
205 if (expiry ==0)
206 return -EINVAL;
208 /* domainname, or empty for NEGATIVE */
209 len = qword_get(&mesg, buf, mlen);
210 if (len < 0) return -EINVAL;
212 if (len) {
213 dom = unix_domain_find(buf);
214 if (dom == NULL)
215 return -ENOENT;
216 } else
217 dom = NULL;
219 addr.s_addr =
220 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
222 ipmp = ip_map_lookup(class,addr);
223 if (ipmp) {
224 err = ip_map_update(ipmp,
225 container_of(dom, struct unix_domain, h),
226 expiry);
227 } else
228 err = -ENOMEM;
230 if (dom)
231 auth_domain_put(dom);
233 cache_flush();
234 return err;
237 static int ip_map_show(struct seq_file *m,
238 struct cache_detail *cd,
239 struct cache_head *h)
241 struct ip_map *im;
242 struct in_addr addr;
243 char *dom = "-no-domain-";
245 if (h == NULL) {
246 seq_puts(m, "#class IP domain\n");
247 return 0;
249 im = container_of(h, struct ip_map, h);
250 /* class addr domain */
251 addr = im->m_addr;
253 if (test_bit(CACHE_VALID, &h->flags) &&
254 !test_bit(CACHE_NEGATIVE, &h->flags))
255 dom = im->m_client->h.name;
257 seq_printf(m, "%s %d.%d.%d.%d %s\n",
258 im->m_class,
259 ntohl(addr.s_addr) >> 24 & 0xff,
260 ntohl(addr.s_addr) >> 16 & 0xff,
261 ntohl(addr.s_addr) >> 8 & 0xff,
262 ntohl(addr.s_addr) >> 0 & 0xff,
265 return 0;
269 struct cache_detail ip_map_cache = {
270 .owner = THIS_MODULE,
271 .hash_size = IP_HASHMAX,
272 .hash_table = ip_table,
273 .name = "auth.unix.ip",
274 .cache_put = ip_map_put,
275 .cache_request = ip_map_request,
276 .cache_parse = ip_map_parse,
277 .cache_show = ip_map_show,
278 .match = ip_map_match,
279 .init = ip_map_init,
280 .update = update,
281 .alloc = ip_map_alloc,
284 static struct ip_map *ip_map_lookup(char *class, struct in_addr addr)
286 struct ip_map ip;
287 struct cache_head *ch;
289 strcpy(ip.m_class, class);
290 ip.m_addr = addr;
291 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
292 hash_str(class, IP_HASHBITS) ^
293 hash_ip(addr.s_addr));
295 if (ch)
296 return container_of(ch, struct ip_map, h);
297 else
298 return NULL;
301 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
303 struct ip_map ip;
304 struct cache_head *ch;
306 ip.m_client = udom;
307 ip.h.flags = 0;
308 if (!udom)
309 set_bit(CACHE_NEGATIVE, &ip.h.flags);
310 else {
311 ip.m_add_change = udom->addr_changes;
312 /* if this is from the legacy set_client system call,
313 * we need m_add_change to be one higher
315 if (expiry == NEVER)
316 ip.m_add_change++;
318 ip.h.expiry_time = expiry;
319 ch = sunrpc_cache_update(&ip_map_cache,
320 &ip.h, &ipm->h,
321 hash_str(ipm->m_class, IP_HASHBITS) ^
322 hash_ip(ipm->m_addr.s_addr));
323 if (!ch)
324 return -ENOMEM;
325 cache_put(ch, &ip_map_cache);
326 return 0;
329 int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
331 struct unix_domain *udom;
332 struct ip_map *ipmp;
334 if (dom->flavour != &svcauth_unix)
335 return -EINVAL;
336 udom = container_of(dom, struct unix_domain, h);
337 ipmp = ip_map_lookup("nfsd", addr);
339 if (ipmp)
340 return ip_map_update(ipmp, udom, NEVER);
341 else
342 return -ENOMEM;
344 EXPORT_SYMBOL(auth_unix_add_addr);
346 int auth_unix_forget_old(struct auth_domain *dom)
348 struct unix_domain *udom;
350 if (dom->flavour != &svcauth_unix)
351 return -EINVAL;
352 udom = container_of(dom, struct unix_domain, h);
353 udom->addr_changes++;
354 return 0;
356 EXPORT_SYMBOL(auth_unix_forget_old);
358 struct auth_domain *auth_unix_lookup(struct in_addr addr)
360 struct ip_map *ipm;
361 struct auth_domain *rv;
363 ipm = ip_map_lookup("nfsd", addr);
365 if (!ipm)
366 return NULL;
367 if (cache_check(&ip_map_cache, &ipm->h, NULL))
368 return NULL;
370 if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
371 if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
372 auth_domain_put(&ipm->m_client->h);
373 rv = NULL;
374 } else {
375 rv = &ipm->m_client->h;
376 kref_get(&rv->ref);
378 cache_put(&ipm->h, &ip_map_cache);
379 return rv;
381 EXPORT_SYMBOL(auth_unix_lookup);
383 void svcauth_unix_purge(void)
385 cache_purge(&ip_map_cache);
387 EXPORT_SYMBOL(svcauth_unix_purge);
389 static inline struct ip_map *
390 ip_map_cached_get(struct svc_rqst *rqstp)
392 struct ip_map *ipm = NULL;
393 struct svc_xprt *xprt = rqstp->rq_xprt;
395 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
396 spin_lock(&xprt->xpt_lock);
397 ipm = xprt->xpt_auth_cache;
398 if (ipm != NULL) {
399 if (!cache_valid(&ipm->h)) {
401 * The entry has been invalidated since it was
402 * remembered, e.g. by a second mount from the
403 * same IP address.
405 xprt->xpt_auth_cache = NULL;
406 spin_unlock(&xprt->xpt_lock);
407 cache_put(&ipm->h, &ip_map_cache);
408 return NULL;
410 cache_get(&ipm->h);
412 spin_unlock(&xprt->xpt_lock);
414 return ipm;
417 static inline void
418 ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
420 struct svc_xprt *xprt = rqstp->rq_xprt;
422 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
423 spin_lock(&xprt->xpt_lock);
424 if (xprt->xpt_auth_cache == NULL) {
425 /* newly cached, keep the reference */
426 xprt->xpt_auth_cache = ipm;
427 ipm = NULL;
429 spin_unlock(&xprt->xpt_lock);
431 if (ipm)
432 cache_put(&ipm->h, &ip_map_cache);
435 void
436 svcauth_unix_info_release(void *info)
438 struct ip_map *ipm = info;
439 cache_put(&ipm->h, &ip_map_cache);
442 /****************************************************************************
443 * auth.unix.gid cache
444 * simple cache to map a UID to a list of GIDs
445 * because AUTH_UNIX aka AUTH_SYS has a max of 16
447 #define GID_HASHBITS 8
448 #define GID_HASHMAX (1<<GID_HASHBITS)
449 #define GID_HASHMASK (GID_HASHMAX - 1)
451 struct unix_gid {
452 struct cache_head h;
453 uid_t uid;
454 struct group_info *gi;
456 static struct cache_head *gid_table[GID_HASHMAX];
458 static void unix_gid_put(struct kref *kref)
460 struct cache_head *item = container_of(kref, struct cache_head, ref);
461 struct unix_gid *ug = container_of(item, struct unix_gid, h);
462 if (test_bit(CACHE_VALID, &item->flags) &&
463 !test_bit(CACHE_NEGATIVE, &item->flags))
464 put_group_info(ug->gi);
465 kfree(ug);
468 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
470 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
471 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
472 return orig->uid == new->uid;
474 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
476 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
477 struct unix_gid *item = container_of(citem, struct unix_gid, h);
478 new->uid = item->uid;
480 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
482 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
483 struct unix_gid *item = container_of(citem, struct unix_gid, h);
485 get_group_info(item->gi);
486 new->gi = item->gi;
488 static struct cache_head *unix_gid_alloc(void)
490 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
491 if (g)
492 return &g->h;
493 else
494 return NULL;
497 static void unix_gid_request(struct cache_detail *cd,
498 struct cache_head *h,
499 char **bpp, int *blen)
501 char tuid[20];
502 struct unix_gid *ug = container_of(h, struct unix_gid, h);
504 snprintf(tuid, 20, "%u", ug->uid);
505 qword_add(bpp, blen, tuid);
506 (*bpp)[-1] = '\n';
509 static struct unix_gid *unix_gid_lookup(uid_t uid);
510 extern struct cache_detail unix_gid_cache;
512 static int unix_gid_parse(struct cache_detail *cd,
513 char *mesg, int mlen)
515 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
516 int uid;
517 int gids;
518 int rv;
519 int i;
520 int err;
521 time_t expiry;
522 struct unix_gid ug, *ugp;
524 if (mlen <= 0 || mesg[mlen-1] != '\n')
525 return -EINVAL;
526 mesg[mlen-1] = 0;
528 rv = get_int(&mesg, &uid);
529 if (rv)
530 return -EINVAL;
531 ug.uid = uid;
533 expiry = get_expiry(&mesg);
534 if (expiry == 0)
535 return -EINVAL;
537 rv = get_int(&mesg, &gids);
538 if (rv || gids < 0 || gids > 8192)
539 return -EINVAL;
541 ug.gi = groups_alloc(gids);
542 if (!ug.gi)
543 return -ENOMEM;
545 for (i = 0 ; i < gids ; i++) {
546 int gid;
547 rv = get_int(&mesg, &gid);
548 err = -EINVAL;
549 if (rv)
550 goto out;
551 GROUP_AT(ug.gi, i) = gid;
554 ugp = unix_gid_lookup(uid);
555 if (ugp) {
556 struct cache_head *ch;
557 ug.h.flags = 0;
558 ug.h.expiry_time = expiry;
559 ch = sunrpc_cache_update(&unix_gid_cache,
560 &ug.h, &ugp->h,
561 hash_long(uid, GID_HASHBITS));
562 if (!ch)
563 err = -ENOMEM;
564 else {
565 err = 0;
566 cache_put(ch, &unix_gid_cache);
568 } else
569 err = -ENOMEM;
570 out:
571 if (ug.gi)
572 put_group_info(ug.gi);
573 return err;
576 static int unix_gid_show(struct seq_file *m,
577 struct cache_detail *cd,
578 struct cache_head *h)
580 struct unix_gid *ug;
581 int i;
582 int glen;
584 if (h == NULL) {
585 seq_puts(m, "#uid cnt: gids...\n");
586 return 0;
588 ug = container_of(h, struct unix_gid, h);
589 if (test_bit(CACHE_VALID, &h->flags) &&
590 !test_bit(CACHE_NEGATIVE, &h->flags))
591 glen = ug->gi->ngroups;
592 else
593 glen = 0;
595 seq_printf(m, "%d %d:", ug->uid, glen);
596 for (i = 0; i < glen; i++)
597 seq_printf(m, " %d", GROUP_AT(ug->gi, i));
598 seq_printf(m, "\n");
599 return 0;
602 struct cache_detail unix_gid_cache = {
603 .owner = THIS_MODULE,
604 .hash_size = GID_HASHMAX,
605 .hash_table = gid_table,
606 .name = "auth.unix.gid",
607 .cache_put = unix_gid_put,
608 .cache_request = unix_gid_request,
609 .cache_parse = unix_gid_parse,
610 .cache_show = unix_gid_show,
611 .match = unix_gid_match,
612 .init = unix_gid_init,
613 .update = unix_gid_update,
614 .alloc = unix_gid_alloc,
617 static struct unix_gid *unix_gid_lookup(uid_t uid)
619 struct unix_gid ug;
620 struct cache_head *ch;
622 ug.uid = uid;
623 ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
624 hash_long(uid, GID_HASHBITS));
625 if (ch)
626 return container_of(ch, struct unix_gid, h);
627 else
628 return NULL;
631 static int unix_gid_find(uid_t uid, struct group_info **gip,
632 struct svc_rqst *rqstp)
634 struct unix_gid *ug = unix_gid_lookup(uid);
635 if (!ug)
636 return -EAGAIN;
637 switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
638 case -ENOENT:
639 *gip = NULL;
640 return 0;
641 case 0:
642 *gip = ug->gi;
643 get_group_info(*gip);
644 return 0;
645 default:
646 return -EAGAIN;
651 svcauth_unix_set_client(struct svc_rqst *rqstp)
653 struct sockaddr_in *sin = svc_addr_in(rqstp);
654 struct ip_map *ipm;
656 rqstp->rq_client = NULL;
657 if (rqstp->rq_proc == 0)
658 return SVC_OK;
660 ipm = ip_map_cached_get(rqstp);
661 if (ipm == NULL)
662 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
663 sin->sin_addr);
665 if (ipm == NULL)
666 return SVC_DENIED;
668 switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
669 default:
670 BUG();
671 case -EAGAIN:
672 case -ETIMEDOUT:
673 return SVC_DROP;
674 case -ENOENT:
675 return SVC_DENIED;
676 case 0:
677 rqstp->rq_client = &ipm->m_client->h;
678 kref_get(&rqstp->rq_client->ref);
679 ip_map_cached_put(rqstp, ipm);
680 break;
682 return SVC_OK;
685 EXPORT_SYMBOL(svcauth_unix_set_client);
687 static int
688 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
690 struct kvec *argv = &rqstp->rq_arg.head[0];
691 struct kvec *resv = &rqstp->rq_res.head[0];
692 struct svc_cred *cred = &rqstp->rq_cred;
694 cred->cr_group_info = NULL;
695 rqstp->rq_client = NULL;
697 if (argv->iov_len < 3*4)
698 return SVC_GARBAGE;
700 if (svc_getu32(argv) != 0) {
701 dprintk("svc: bad null cred\n");
702 *authp = rpc_autherr_badcred;
703 return SVC_DENIED;
705 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
706 dprintk("svc: bad null verf\n");
707 *authp = rpc_autherr_badverf;
708 return SVC_DENIED;
711 /* Signal that mapping to nobody uid/gid is required */
712 cred->cr_uid = (uid_t) -1;
713 cred->cr_gid = (gid_t) -1;
714 cred->cr_group_info = groups_alloc(0);
715 if (cred->cr_group_info == NULL)
716 return SVC_DROP; /* kmalloc failure - client must retry */
718 /* Put NULL verifier */
719 svc_putnl(resv, RPC_AUTH_NULL);
720 svc_putnl(resv, 0);
722 rqstp->rq_flavor = RPC_AUTH_NULL;
723 return SVC_OK;
726 static int
727 svcauth_null_release(struct svc_rqst *rqstp)
729 if (rqstp->rq_client)
730 auth_domain_put(rqstp->rq_client);
731 rqstp->rq_client = NULL;
732 if (rqstp->rq_cred.cr_group_info)
733 put_group_info(rqstp->rq_cred.cr_group_info);
734 rqstp->rq_cred.cr_group_info = NULL;
736 return 0; /* don't drop */
740 struct auth_ops svcauth_null = {
741 .name = "null",
742 .owner = THIS_MODULE,
743 .flavour = RPC_AUTH_NULL,
744 .accept = svcauth_null_accept,
745 .release = svcauth_null_release,
746 .set_client = svcauth_unix_set_client,
750 static int
751 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
753 struct kvec *argv = &rqstp->rq_arg.head[0];
754 struct kvec *resv = &rqstp->rq_res.head[0];
755 struct svc_cred *cred = &rqstp->rq_cred;
756 u32 slen, i;
757 int len = argv->iov_len;
759 cred->cr_group_info = NULL;
760 rqstp->rq_client = NULL;
762 if ((len -= 3*4) < 0)
763 return SVC_GARBAGE;
765 svc_getu32(argv); /* length */
766 svc_getu32(argv); /* time stamp */
767 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
768 if (slen > 64 || (len -= (slen + 3)*4) < 0)
769 goto badcred;
770 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
771 argv->iov_len -= slen*4;
773 cred->cr_uid = svc_getnl(argv); /* uid */
774 cred->cr_gid = svc_getnl(argv); /* gid */
775 slen = svc_getnl(argv); /* gids length */
776 if (slen > 16 || (len -= (slen + 2)*4) < 0)
777 goto badcred;
778 if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
779 == -EAGAIN)
780 return SVC_DROP;
781 if (cred->cr_group_info == NULL) {
782 cred->cr_group_info = groups_alloc(slen);
783 if (cred->cr_group_info == NULL)
784 return SVC_DROP;
785 for (i = 0; i < slen; i++)
786 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
787 } else {
788 for (i = 0; i < slen ; i++)
789 svc_getnl(argv);
791 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
792 *authp = rpc_autherr_badverf;
793 return SVC_DENIED;
796 /* Put NULL verifier */
797 svc_putnl(resv, RPC_AUTH_NULL);
798 svc_putnl(resv, 0);
800 rqstp->rq_flavor = RPC_AUTH_UNIX;
801 return SVC_OK;
803 badcred:
804 *authp = rpc_autherr_badcred;
805 return SVC_DENIED;
808 static int
809 svcauth_unix_release(struct svc_rqst *rqstp)
811 /* Verifier (such as it is) is already in place.
813 if (rqstp->rq_client)
814 auth_domain_put(rqstp->rq_client);
815 rqstp->rq_client = NULL;
816 if (rqstp->rq_cred.cr_group_info)
817 put_group_info(rqstp->rq_cred.cr_group_info);
818 rqstp->rq_cred.cr_group_info = NULL;
820 return 0;
824 struct auth_ops svcauth_unix = {
825 .name = "unix",
826 .owner = THIS_MODULE,
827 .flavour = RPC_AUTH_UNIX,
828 .accept = svcauth_unix_accept,
829 .release = svcauth_unix_release,
830 .domain_release = svcauth_unix_domain_release,
831 .set_client = svcauth_unix_set_client,