2 * linux/net/sunrpc/auth_gss/auth_gss.c
4 * RPCSEC_GSS client authentication.
6 * Copyright (c) 2000 The Regents of the University of Michigan.
9 * Dug Song <dugsong@monkey.org>
10 * Andy Adamson <andros@umich.edu>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/types.h>
42 #include <linux/slab.h>
43 #include <linux/sched.h>
44 #include <linux/pagemap.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/auth.h>
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/svcauth_gss.h>
49 #include <linux/sunrpc/gss_err.h>
50 #include <linux/workqueue.h>
51 #include <linux/sunrpc/rpc_pipe_fs.h>
52 #include <linux/sunrpc/gss_api.h>
53 #include <asm/uaccess.h>
55 static const struct rpc_authops authgss_ops
;
57 static const struct rpc_credops gss_credops
;
58 static const struct rpc_credops gss_nullops
;
61 # define RPCDBG_FACILITY RPCDBG_AUTH
64 #define GSS_CRED_SLACK 1024
65 /* length of a krb5 verifier (48), plus data added before arguments when
66 * using integrity (two 4-byte integers): */
67 #define GSS_VERF_SLACK 100
71 struct rpc_auth rpc_auth
;
72 struct gss_api_mech
*mech
;
73 enum rpc_gss_svc service
;
74 struct rpc_clnt
*client
;
76 * There are two upcall pipes; dentry[1], named "gssd", is used
77 * for the new text-based upcall; dentry[0] is named after the
78 * mechanism (for example, "krb5") and exists for
79 * backwards-compatibility with older gssd's.
81 struct dentry
*dentry
[2];
84 /* pipe_version >= 0 if and only if someone has a pipe open. */
85 static int pipe_version
= -1;
86 static atomic_t pipe_users
= ATOMIC_INIT(0);
87 static DEFINE_SPINLOCK(pipe_version_lock
);
88 static struct rpc_wait_queue pipe_version_rpc_waitqueue
;
89 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue
);
91 static void gss_free_ctx(struct gss_cl_ctx
*);
92 static struct rpc_pipe_ops gss_upcall_ops_v0
;
93 static struct rpc_pipe_ops gss_upcall_ops_v1
;
95 static inline struct gss_cl_ctx
*
96 gss_get_ctx(struct gss_cl_ctx
*ctx
)
98 atomic_inc(&ctx
->count
);
103 gss_put_ctx(struct gss_cl_ctx
*ctx
)
105 if (atomic_dec_and_test(&ctx
->count
))
110 * called by gss_upcall_callback and gss_create_upcall in order
111 * to set the gss context. The actual exchange of an old context
112 * and a new one is protected by the inode->i_lock.
115 gss_cred_set_ctx(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
)
117 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
119 if (!test_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
))
122 rcu_assign_pointer(gss_cred
->gc_ctx
, ctx
);
123 set_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
124 smp_mb__before_clear_bit();
125 clear_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
);
129 simple_get_bytes(const void *p
, const void *end
, void *res
, size_t len
)
131 const void *q
= (const void *)((const char *)p
+ len
);
132 if (unlikely(q
> end
|| q
< p
))
133 return ERR_PTR(-EFAULT
);
138 static inline const void *
139 simple_get_netobj(const void *p
, const void *end
, struct xdr_netobj
*dest
)
144 p
= simple_get_bytes(p
, end
, &len
, sizeof(len
));
147 q
= (const void *)((const char *)p
+ len
);
148 if (unlikely(q
> end
|| q
< p
))
149 return ERR_PTR(-EFAULT
);
150 dest
->data
= kmemdup(p
, len
, GFP_NOFS
);
151 if (unlikely(dest
->data
== NULL
))
152 return ERR_PTR(-ENOMEM
);
157 static struct gss_cl_ctx
*
158 gss_cred_get_ctx(struct rpc_cred
*cred
)
160 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
161 struct gss_cl_ctx
*ctx
= NULL
;
164 if (gss_cred
->gc_ctx
)
165 ctx
= gss_get_ctx(gss_cred
->gc_ctx
);
170 static struct gss_cl_ctx
*
171 gss_alloc_context(void)
173 struct gss_cl_ctx
*ctx
;
175 ctx
= kzalloc(sizeof(*ctx
), GFP_NOFS
);
177 ctx
->gc_proc
= RPC_GSS_PROC_DATA
;
178 ctx
->gc_seq
= 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
179 spin_lock_init(&ctx
->gc_seq_lock
);
180 atomic_set(&ctx
->count
,1);
185 #define GSSD_MIN_TIMEOUT (60 * 60)
187 gss_fill_context(const void *p
, const void *end
, struct gss_cl_ctx
*ctx
, struct gss_api_mech
*gm
)
191 unsigned int timeout
;
195 /* First unsigned int gives the lifetime (in seconds) of the cred */
196 p
= simple_get_bytes(p
, end
, &timeout
, sizeof(timeout
));
200 timeout
= GSSD_MIN_TIMEOUT
;
201 ctx
->gc_expiry
= jiffies
+ (unsigned long)timeout
* HZ
* 3 / 4;
202 /* Sequence number window. Determines the maximum number of simultaneous requests */
203 p
= simple_get_bytes(p
, end
, &window_size
, sizeof(window_size
));
206 ctx
->gc_win
= window_size
;
207 /* gssd signals an error by passing ctx->gc_win = 0: */
208 if (ctx
->gc_win
== 0) {
209 /* in which case, p points to an error code which we ignore */
210 p
= ERR_PTR(-EACCES
);
213 /* copy the opaque wire context */
214 p
= simple_get_netobj(p
, end
, &ctx
->gc_wire_ctx
);
217 /* import the opaque security context */
218 p
= simple_get_bytes(p
, end
, &seclen
, sizeof(seclen
));
221 q
= (const void *)((const char *)p
+ seclen
);
222 if (unlikely(q
> end
|| q
< p
)) {
223 p
= ERR_PTR(-EFAULT
);
226 ret
= gss_import_sec_context(p
, seclen
, gm
, &ctx
->gc_gss_ctx
);
233 dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p
));
237 #define UPCALL_BUF_LEN 128
239 struct gss_upcall_msg
{
242 struct rpc_pipe_msg msg
;
243 struct list_head list
;
244 struct gss_auth
*auth
;
245 struct rpc_inode
*inode
;
246 struct rpc_wait_queue rpc_waitqueue
;
247 wait_queue_head_t waitqueue
;
248 struct gss_cl_ctx
*ctx
;
249 char databuf
[UPCALL_BUF_LEN
];
252 static int get_pipe_version(void)
256 spin_lock(&pipe_version_lock
);
257 if (pipe_version
>= 0) {
258 atomic_inc(&pipe_users
);
262 spin_unlock(&pipe_version_lock
);
266 static void put_pipe_version(void)
268 if (atomic_dec_and_lock(&pipe_users
, &pipe_version_lock
)) {
270 spin_unlock(&pipe_version_lock
);
275 gss_release_msg(struct gss_upcall_msg
*gss_msg
)
277 if (!atomic_dec_and_test(&gss_msg
->count
))
280 BUG_ON(!list_empty(&gss_msg
->list
));
281 if (gss_msg
->ctx
!= NULL
)
282 gss_put_ctx(gss_msg
->ctx
);
283 rpc_destroy_wait_queue(&gss_msg
->rpc_waitqueue
);
287 static struct gss_upcall_msg
*
288 __gss_find_upcall(struct rpc_inode
*rpci
, uid_t uid
)
290 struct gss_upcall_msg
*pos
;
291 list_for_each_entry(pos
, &rpci
->in_downcall
, list
) {
294 atomic_inc(&pos
->count
);
295 dprintk("RPC: gss_find_upcall found msg %p\n", pos
);
298 dprintk("RPC: gss_find_upcall found nothing\n");
302 /* Try to add an upcall to the pipefs queue.
303 * If an upcall owned by our uid already exists, then we return a reference
304 * to that upcall instead of adding the new upcall.
306 static inline struct gss_upcall_msg
*
307 gss_add_msg(struct gss_auth
*gss_auth
, struct gss_upcall_msg
*gss_msg
)
309 struct rpc_inode
*rpci
= gss_msg
->inode
;
310 struct inode
*inode
= &rpci
->vfs_inode
;
311 struct gss_upcall_msg
*old
;
313 spin_lock(&inode
->i_lock
);
314 old
= __gss_find_upcall(rpci
, gss_msg
->uid
);
316 atomic_inc(&gss_msg
->count
);
317 list_add(&gss_msg
->list
, &rpci
->in_downcall
);
320 spin_unlock(&inode
->i_lock
);
325 __gss_unhash_msg(struct gss_upcall_msg
*gss_msg
)
327 list_del_init(&gss_msg
->list
);
328 rpc_wake_up_status(&gss_msg
->rpc_waitqueue
, gss_msg
->msg
.errno
);
329 wake_up_all(&gss_msg
->waitqueue
);
330 atomic_dec(&gss_msg
->count
);
334 gss_unhash_msg(struct gss_upcall_msg
*gss_msg
)
336 struct inode
*inode
= &gss_msg
->inode
->vfs_inode
;
338 if (list_empty(&gss_msg
->list
))
340 spin_lock(&inode
->i_lock
);
341 if (!list_empty(&gss_msg
->list
))
342 __gss_unhash_msg(gss_msg
);
343 spin_unlock(&inode
->i_lock
);
347 gss_upcall_callback(struct rpc_task
*task
)
349 struct gss_cred
*gss_cred
= container_of(task
->tk_msg
.rpc_cred
,
350 struct gss_cred
, gc_base
);
351 struct gss_upcall_msg
*gss_msg
= gss_cred
->gc_upcall
;
352 struct inode
*inode
= &gss_msg
->inode
->vfs_inode
;
354 spin_lock(&inode
->i_lock
);
356 gss_cred_set_ctx(task
->tk_msg
.rpc_cred
, gss_msg
->ctx
);
358 task
->tk_status
= gss_msg
->msg
.errno
;
359 gss_cred
->gc_upcall
= NULL
;
360 rpc_wake_up_status(&gss_msg
->rpc_waitqueue
, gss_msg
->msg
.errno
);
361 spin_unlock(&inode
->i_lock
);
362 gss_release_msg(gss_msg
);
365 static void gss_encode_v0_msg(struct gss_upcall_msg
*gss_msg
)
367 gss_msg
->msg
.data
= &gss_msg
->uid
;
368 gss_msg
->msg
.len
= sizeof(gss_msg
->uid
);
371 static void gss_encode_v1_msg(struct gss_upcall_msg
*gss_msg
,
372 struct rpc_clnt
*clnt
, int machine_cred
)
374 char *p
= gss_msg
->databuf
;
377 gss_msg
->msg
.len
= sprintf(gss_msg
->databuf
, "mech=%s uid=%d ",
378 gss_msg
->auth
->mech
->gm_name
,
380 p
+= gss_msg
->msg
.len
;
381 if (clnt
->cl_principal
) {
382 len
= sprintf(p
, "target=%s ", clnt
->cl_principal
);
384 gss_msg
->msg
.len
+= len
;
387 len
= sprintf(p
, "service=* ");
389 gss_msg
->msg
.len
+= len
;
390 } else if (!strcmp(clnt
->cl_program
->name
, "nfs4_cb")) {
391 len
= sprintf(p
, "service=nfs ");
393 gss_msg
->msg
.len
+= len
;
395 len
= sprintf(p
, "\n");
396 gss_msg
->msg
.len
+= len
;
398 gss_msg
->msg
.data
= gss_msg
->databuf
;
399 BUG_ON(gss_msg
->msg
.len
> UPCALL_BUF_LEN
);
402 static void gss_encode_msg(struct gss_upcall_msg
*gss_msg
,
403 struct rpc_clnt
*clnt
, int machine_cred
)
405 if (pipe_version
== 0)
406 gss_encode_v0_msg(gss_msg
);
407 else /* pipe_version == 1 */
408 gss_encode_v1_msg(gss_msg
, clnt
, machine_cred
);
411 static inline struct gss_upcall_msg
*
412 gss_alloc_msg(struct gss_auth
*gss_auth
, uid_t uid
, struct rpc_clnt
*clnt
,
415 struct gss_upcall_msg
*gss_msg
;
418 gss_msg
= kzalloc(sizeof(*gss_msg
), GFP_NOFS
);
420 return ERR_PTR(-ENOMEM
);
421 vers
= get_pipe_version();
424 return ERR_PTR(vers
);
426 gss_msg
->inode
= RPC_I(gss_auth
->dentry
[vers
]->d_inode
);
427 INIT_LIST_HEAD(&gss_msg
->list
);
428 rpc_init_wait_queue(&gss_msg
->rpc_waitqueue
, "RPCSEC_GSS upcall waitq");
429 init_waitqueue_head(&gss_msg
->waitqueue
);
430 atomic_set(&gss_msg
->count
, 1);
432 gss_msg
->auth
= gss_auth
;
433 gss_encode_msg(gss_msg
, clnt
, machine_cred
);
437 static struct gss_upcall_msg
*
438 gss_setup_upcall(struct rpc_clnt
*clnt
, struct gss_auth
*gss_auth
, struct rpc_cred
*cred
)
440 struct gss_cred
*gss_cred
= container_of(cred
,
441 struct gss_cred
, gc_base
);
442 struct gss_upcall_msg
*gss_new
, *gss_msg
;
443 uid_t uid
= cred
->cr_uid
;
445 gss_new
= gss_alloc_msg(gss_auth
, uid
, clnt
, gss_cred
->gc_machine_cred
);
448 gss_msg
= gss_add_msg(gss_auth
, gss_new
);
449 if (gss_msg
== gss_new
) {
450 struct inode
*inode
= &gss_new
->inode
->vfs_inode
;
451 int res
= rpc_queue_upcall(inode
, &gss_new
->msg
);
453 gss_unhash_msg(gss_new
);
454 gss_msg
= ERR_PTR(res
);
457 gss_release_msg(gss_new
);
461 static void warn_gssd(void)
463 static unsigned long ratelimit
;
464 unsigned long now
= jiffies
;
466 if (time_after(now
, ratelimit
)) {
467 printk(KERN_WARNING
"RPC: AUTH_GSS upcall timed out.\n"
468 "Please check user daemon is running.\n");
469 ratelimit
= now
+ 15*HZ
;
474 gss_refresh_upcall(struct rpc_task
*task
)
476 struct rpc_cred
*cred
= task
->tk_msg
.rpc_cred
;
477 struct gss_auth
*gss_auth
= container_of(cred
->cr_auth
,
478 struct gss_auth
, rpc_auth
);
479 struct gss_cred
*gss_cred
= container_of(cred
,
480 struct gss_cred
, gc_base
);
481 struct gss_upcall_msg
*gss_msg
;
485 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task
->tk_pid
,
487 gss_msg
= gss_setup_upcall(task
->tk_client
, gss_auth
, cred
);
488 if (IS_ERR(gss_msg
) == -EAGAIN
) {
489 /* XXX: warning on the first, under the assumption we
490 * shouldn't normally hit this case on a refresh. */
492 task
->tk_timeout
= 15*HZ
;
493 rpc_sleep_on(&pipe_version_rpc_waitqueue
, task
, NULL
);
496 if (IS_ERR(gss_msg
)) {
497 err
= PTR_ERR(gss_msg
);
500 inode
= &gss_msg
->inode
->vfs_inode
;
501 spin_lock(&inode
->i_lock
);
502 if (gss_cred
->gc_upcall
!= NULL
)
503 rpc_sleep_on(&gss_cred
->gc_upcall
->rpc_waitqueue
, task
, NULL
);
504 else if (gss_msg
->ctx
!= NULL
) {
505 gss_cred_set_ctx(task
->tk_msg
.rpc_cred
, gss_msg
->ctx
);
506 gss_cred
->gc_upcall
= NULL
;
507 rpc_wake_up_status(&gss_msg
->rpc_waitqueue
, gss_msg
->msg
.errno
);
508 } else if (gss_msg
->msg
.errno
>= 0) {
509 task
->tk_timeout
= 0;
510 gss_cred
->gc_upcall
= gss_msg
;
511 /* gss_upcall_callback will release the reference to gss_upcall_msg */
512 atomic_inc(&gss_msg
->count
);
513 rpc_sleep_on(&gss_msg
->rpc_waitqueue
, task
, gss_upcall_callback
);
515 err
= gss_msg
->msg
.errno
;
516 spin_unlock(&inode
->i_lock
);
517 gss_release_msg(gss_msg
);
519 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
520 task
->tk_pid
, cred
->cr_uid
, err
);
525 gss_create_upcall(struct gss_auth
*gss_auth
, struct gss_cred
*gss_cred
)
528 struct rpc_cred
*cred
= &gss_cred
->gc_base
;
529 struct gss_upcall_msg
*gss_msg
;
533 dprintk("RPC: gss_upcall for uid %u\n", cred
->cr_uid
);
535 gss_msg
= gss_setup_upcall(gss_auth
->client
, gss_auth
, cred
);
536 if (PTR_ERR(gss_msg
) == -EAGAIN
) {
537 err
= wait_event_interruptible_timeout(pipe_version_waitqueue
,
538 pipe_version
>= 0, 15*HZ
);
541 if (pipe_version
< 0)
545 if (IS_ERR(gss_msg
)) {
546 err
= PTR_ERR(gss_msg
);
549 inode
= &gss_msg
->inode
->vfs_inode
;
551 prepare_to_wait(&gss_msg
->waitqueue
, &wait
, TASK_INTERRUPTIBLE
);
552 spin_lock(&inode
->i_lock
);
553 if (gss_msg
->ctx
!= NULL
|| gss_msg
->msg
.errno
< 0) {
556 spin_unlock(&inode
->i_lock
);
564 gss_cred_set_ctx(cred
, gss_msg
->ctx
);
566 err
= gss_msg
->msg
.errno
;
567 spin_unlock(&inode
->i_lock
);
569 finish_wait(&gss_msg
->waitqueue
, &wait
);
570 gss_release_msg(gss_msg
);
572 dprintk("RPC: gss_create_upcall for uid %u result %d\n",
578 gss_pipe_upcall(struct file
*filp
, struct rpc_pipe_msg
*msg
,
579 char __user
*dst
, size_t buflen
)
581 char *data
= (char *)msg
->data
+ msg
->copied
;
582 size_t mlen
= min(msg
->len
, buflen
);
585 left
= copy_to_user(dst
, data
, mlen
);
587 msg
->errno
= -EFAULT
;
597 #define MSG_BUF_MAXSIZE 1024
600 gss_pipe_downcall(struct file
*filp
, const char __user
*src
, size_t mlen
)
604 struct gss_upcall_msg
*gss_msg
;
605 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
606 struct gss_cl_ctx
*ctx
;
608 ssize_t err
= -EFBIG
;
610 if (mlen
> MSG_BUF_MAXSIZE
)
613 buf
= kmalloc(mlen
, GFP_NOFS
);
618 if (copy_from_user(buf
, src
, mlen
))
621 end
= (const void *)((char *)buf
+ mlen
);
622 p
= simple_get_bytes(buf
, end
, &uid
, sizeof(uid
));
629 ctx
= gss_alloc_context();
634 /* Find a matching upcall */
635 spin_lock(&inode
->i_lock
);
636 gss_msg
= __gss_find_upcall(RPC_I(inode
), uid
);
637 if (gss_msg
== NULL
) {
638 spin_unlock(&inode
->i_lock
);
641 list_del_init(&gss_msg
->list
);
642 spin_unlock(&inode
->i_lock
);
644 p
= gss_fill_context(p
, end
, ctx
, gss_msg
->auth
->mech
);
647 gss_msg
->msg
.errno
= (err
== -EAGAIN
) ? -EAGAIN
: -EACCES
;
648 goto err_release_msg
;
650 gss_msg
->ctx
= gss_get_ctx(ctx
);
654 spin_lock(&inode
->i_lock
);
655 __gss_unhash_msg(gss_msg
);
656 spin_unlock(&inode
->i_lock
);
657 gss_release_msg(gss_msg
);
663 dprintk("RPC: gss_pipe_downcall returning %Zd\n", err
);
667 static int gss_pipe_open(struct inode
*inode
, int new_version
)
671 spin_lock(&pipe_version_lock
);
672 if (pipe_version
< 0) {
673 /* First open of any gss pipe determines the version: */
674 pipe_version
= new_version
;
675 rpc_wake_up(&pipe_version_rpc_waitqueue
);
676 wake_up(&pipe_version_waitqueue
);
677 } else if (pipe_version
!= new_version
) {
678 /* Trying to open a pipe of a different version */
682 atomic_inc(&pipe_users
);
684 spin_unlock(&pipe_version_lock
);
689 static int gss_pipe_open_v0(struct inode
*inode
)
691 return gss_pipe_open(inode
, 0);
694 static int gss_pipe_open_v1(struct inode
*inode
)
696 return gss_pipe_open(inode
, 1);
700 gss_pipe_release(struct inode
*inode
)
702 struct rpc_inode
*rpci
= RPC_I(inode
);
703 struct gss_upcall_msg
*gss_msg
;
705 spin_lock(&inode
->i_lock
);
706 while (!list_empty(&rpci
->in_downcall
)) {
708 gss_msg
= list_entry(rpci
->in_downcall
.next
,
709 struct gss_upcall_msg
, list
);
710 gss_msg
->msg
.errno
= -EPIPE
;
711 atomic_inc(&gss_msg
->count
);
712 __gss_unhash_msg(gss_msg
);
713 spin_unlock(&inode
->i_lock
);
714 gss_release_msg(gss_msg
);
715 spin_lock(&inode
->i_lock
);
717 spin_unlock(&inode
->i_lock
);
723 gss_pipe_destroy_msg(struct rpc_pipe_msg
*msg
)
725 struct gss_upcall_msg
*gss_msg
= container_of(msg
, struct gss_upcall_msg
, msg
);
727 if (msg
->errno
< 0) {
728 dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
730 atomic_inc(&gss_msg
->count
);
731 gss_unhash_msg(gss_msg
);
732 if (msg
->errno
== -ETIMEDOUT
)
734 gss_release_msg(gss_msg
);
739 * NOTE: we have the opportunity to use different
740 * parameters based on the input flavor (which must be a pseudoflavor)
742 static struct rpc_auth
*
743 gss_create(struct rpc_clnt
*clnt
, rpc_authflavor_t flavor
)
745 struct gss_auth
*gss_auth
;
746 struct rpc_auth
* auth
;
747 int err
= -ENOMEM
; /* XXX? */
749 dprintk("RPC: creating GSS authenticator for client %p\n", clnt
);
751 if (!try_module_get(THIS_MODULE
))
753 if (!(gss_auth
= kmalloc(sizeof(*gss_auth
), GFP_KERNEL
)))
755 gss_auth
->client
= clnt
;
757 gss_auth
->mech
= gss_mech_get_by_pseudoflavor(flavor
);
758 if (!gss_auth
->mech
) {
759 printk(KERN_WARNING
"%s: Pseudoflavor %d not found!\n",
763 gss_auth
->service
= gss_pseudoflavor_to_service(gss_auth
->mech
, flavor
);
764 if (gss_auth
->service
== 0)
766 auth
= &gss_auth
->rpc_auth
;
767 auth
->au_cslack
= GSS_CRED_SLACK
>> 2;
768 auth
->au_rslack
= GSS_VERF_SLACK
>> 2;
769 auth
->au_ops
= &authgss_ops
;
770 auth
->au_flavor
= flavor
;
771 atomic_set(&auth
->au_count
, 1);
772 kref_init(&gss_auth
->kref
);
775 * Note: if we created the old pipe first, then someone who
776 * examined the directory at the right moment might conclude
777 * that we supported only the old pipe. So we instead create
778 * the new pipe first.
780 gss_auth
->dentry
[1] = rpc_mkpipe(clnt
->cl_dentry
,
782 clnt
, &gss_upcall_ops_v1
,
783 RPC_PIPE_WAIT_FOR_OPEN
);
784 if (IS_ERR(gss_auth
->dentry
[1])) {
785 err
= PTR_ERR(gss_auth
->dentry
[1]);
789 gss_auth
->dentry
[0] = rpc_mkpipe(clnt
->cl_dentry
,
790 gss_auth
->mech
->gm_name
,
791 clnt
, &gss_upcall_ops_v0
,
792 RPC_PIPE_WAIT_FOR_OPEN
);
793 if (IS_ERR(gss_auth
->dentry
[0])) {
794 err
= PTR_ERR(gss_auth
->dentry
[0]);
795 goto err_unlink_pipe_1
;
797 err
= rpcauth_init_credcache(auth
);
799 goto err_unlink_pipe_0
;
803 rpc_unlink(gss_auth
->dentry
[0]);
805 rpc_unlink(gss_auth
->dentry
[1]);
807 gss_mech_put(gss_auth
->mech
);
811 module_put(THIS_MODULE
);
816 gss_free(struct gss_auth
*gss_auth
)
818 rpc_unlink(gss_auth
->dentry
[1]);
819 rpc_unlink(gss_auth
->dentry
[0]);
820 gss_mech_put(gss_auth
->mech
);
823 module_put(THIS_MODULE
);
827 gss_free_callback(struct kref
*kref
)
829 struct gss_auth
*gss_auth
= container_of(kref
, struct gss_auth
, kref
);
835 gss_destroy(struct rpc_auth
*auth
)
837 struct gss_auth
*gss_auth
;
839 dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
840 auth
, auth
->au_flavor
);
842 rpcauth_destroy_credcache(auth
);
844 gss_auth
= container_of(auth
, struct gss_auth
, rpc_auth
);
845 kref_put(&gss_auth
->kref
, gss_free_callback
);
849 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
850 * to the server with the GSS control procedure field set to
851 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
852 * all RPCSEC_GSS state associated with that context.
855 gss_destroying_context(struct rpc_cred
*cred
)
857 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
858 struct gss_auth
*gss_auth
= container_of(cred
->cr_auth
, struct gss_auth
, rpc_auth
);
859 struct rpc_task
*task
;
861 if (gss_cred
->gc_ctx
== NULL
||
862 test_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
) == 0)
865 gss_cred
->gc_ctx
->gc_proc
= RPC_GSS_PROC_DESTROY
;
866 cred
->cr_ops
= &gss_nullops
;
868 /* Take a reference to ensure the cred will be destroyed either
869 * by the RPC call or by the put_rpccred() below */
872 task
= rpc_call_null(gss_auth
->client
, cred
, RPC_TASK_ASYNC
|RPC_TASK_SOFT
);
880 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
881 * to create a new cred or context, so they check that things have been
882 * allocated before freeing them. */
884 gss_do_free_ctx(struct gss_cl_ctx
*ctx
)
886 dprintk("RPC: gss_free_ctx\n");
888 kfree(ctx
->gc_wire_ctx
.data
);
893 gss_free_ctx_callback(struct rcu_head
*head
)
895 struct gss_cl_ctx
*ctx
= container_of(head
, struct gss_cl_ctx
, gc_rcu
);
896 gss_do_free_ctx(ctx
);
900 gss_free_ctx(struct gss_cl_ctx
*ctx
)
902 struct gss_ctx
*gc_gss_ctx
;
904 gc_gss_ctx
= rcu_dereference(ctx
->gc_gss_ctx
);
905 rcu_assign_pointer(ctx
->gc_gss_ctx
, NULL
);
906 call_rcu(&ctx
->gc_rcu
, gss_free_ctx_callback
);
908 gss_delete_sec_context(&gc_gss_ctx
);
912 gss_free_cred(struct gss_cred
*gss_cred
)
914 dprintk("RPC: gss_free_cred %p\n", gss_cred
);
919 gss_free_cred_callback(struct rcu_head
*head
)
921 struct gss_cred
*gss_cred
= container_of(head
, struct gss_cred
, gc_base
.cr_rcu
);
922 gss_free_cred(gss_cred
);
926 gss_destroy_nullcred(struct rpc_cred
*cred
)
928 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
929 struct gss_auth
*gss_auth
= container_of(cred
->cr_auth
, struct gss_auth
, rpc_auth
);
930 struct gss_cl_ctx
*ctx
= gss_cred
->gc_ctx
;
932 rcu_assign_pointer(gss_cred
->gc_ctx
, NULL
);
933 call_rcu(&cred
->cr_rcu
, gss_free_cred_callback
);
936 kref_put(&gss_auth
->kref
, gss_free_callback
);
940 gss_destroy_cred(struct rpc_cred
*cred
)
943 if (gss_destroying_context(cred
))
945 gss_destroy_nullcred(cred
);
949 * Lookup RPCSEC_GSS cred for the current process
951 static struct rpc_cred
*
952 gss_lookup_cred(struct rpc_auth
*auth
, struct auth_cred
*acred
, int flags
)
954 return rpcauth_lookup_credcache(auth
, acred
, flags
);
957 static struct rpc_cred
*
958 gss_create_cred(struct rpc_auth
*auth
, struct auth_cred
*acred
, int flags
)
960 struct gss_auth
*gss_auth
= container_of(auth
, struct gss_auth
, rpc_auth
);
961 struct gss_cred
*cred
= NULL
;
964 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
965 acred
->uid
, auth
->au_flavor
);
967 if (!(cred
= kzalloc(sizeof(*cred
), GFP_NOFS
)))
970 rpcauth_init_cred(&cred
->gc_base
, acred
, auth
, &gss_credops
);
972 * Note: in order to force a call to call_refresh(), we deliberately
973 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
975 cred
->gc_base
.cr_flags
= 1UL << RPCAUTH_CRED_NEW
;
976 cred
->gc_service
= gss_auth
->service
;
977 cred
->gc_machine_cred
= acred
->machine_cred
;
978 kref_get(&gss_auth
->kref
);
979 return &cred
->gc_base
;
982 dprintk("RPC: gss_create_cred failed with error %d\n", err
);
987 gss_cred_init(struct rpc_auth
*auth
, struct rpc_cred
*cred
)
989 struct gss_auth
*gss_auth
= container_of(auth
, struct gss_auth
, rpc_auth
);
990 struct gss_cred
*gss_cred
= container_of(cred
,struct gss_cred
, gc_base
);
994 err
= gss_create_upcall(gss_auth
, gss_cred
);
995 } while (err
== -EAGAIN
);
1000 gss_match(struct auth_cred
*acred
, struct rpc_cred
*rc
, int flags
)
1002 struct gss_cred
*gss_cred
= container_of(rc
, struct gss_cred
, gc_base
);
1004 if (test_bit(RPCAUTH_CRED_NEW
, &rc
->cr_flags
))
1006 /* Don't match with creds that have expired. */
1007 if (time_after(jiffies
, gss_cred
->gc_ctx
->gc_expiry
))
1009 if (!test_bit(RPCAUTH_CRED_UPTODATE
, &rc
->cr_flags
))
1012 if (acred
->machine_cred
!= gss_cred
->gc_machine_cred
)
1014 return (rc
->cr_uid
== acred
->uid
);
1018 * Marshal credentials.
1019 * Maybe we should keep a cached credential for performance reasons.
1022 gss_marshal(struct rpc_task
*task
, __be32
*p
)
1024 struct rpc_cred
*cred
= task
->tk_msg
.rpc_cred
;
1025 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
,
1027 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1029 struct rpc_rqst
*req
= task
->tk_rqstp
;
1031 struct xdr_netobj mic
;
1033 struct xdr_buf verf_buf
;
1035 dprintk("RPC: %5u gss_marshal\n", task
->tk_pid
);
1037 *p
++ = htonl(RPC_AUTH_GSS
);
1040 spin_lock(&ctx
->gc_seq_lock
);
1041 req
->rq_seqno
= ctx
->gc_seq
++;
1042 spin_unlock(&ctx
->gc_seq_lock
);
1044 *p
++ = htonl((u32
) RPC_GSS_VERSION
);
1045 *p
++ = htonl((u32
) ctx
->gc_proc
);
1046 *p
++ = htonl((u32
) req
->rq_seqno
);
1047 *p
++ = htonl((u32
) gss_cred
->gc_service
);
1048 p
= xdr_encode_netobj(p
, &ctx
->gc_wire_ctx
);
1049 *cred_len
= htonl((p
- (cred_len
+ 1)) << 2);
1051 /* We compute the checksum for the verifier over the xdr-encoded bytes
1052 * starting with the xid and ending at the end of the credential: */
1053 iov
.iov_base
= xprt_skip_transport_header(task
->tk_xprt
,
1054 req
->rq_snd_buf
.head
[0].iov_base
);
1055 iov
.iov_len
= (u8
*)p
- (u8
*)iov
.iov_base
;
1056 xdr_buf_from_iov(&iov
, &verf_buf
);
1058 /* set verifier flavor*/
1059 *p
++ = htonl(RPC_AUTH_GSS
);
1061 mic
.data
= (u8
*)(p
+ 1);
1062 maj_stat
= gss_get_mic(ctx
->gc_gss_ctx
, &verf_buf
, &mic
);
1063 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
) {
1064 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1065 } else if (maj_stat
!= 0) {
1066 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat
);
1069 p
= xdr_encode_opaque(p
, NULL
, mic
.len
);
1077 static int gss_renew_cred(struct rpc_task
*task
)
1079 struct rpc_cred
*oldcred
= task
->tk_msg
.rpc_cred
;
1080 struct gss_cred
*gss_cred
= container_of(oldcred
,
1083 struct rpc_auth
*auth
= oldcred
->cr_auth
;
1084 struct auth_cred acred
= {
1085 .uid
= oldcred
->cr_uid
,
1086 .machine_cred
= gss_cred
->gc_machine_cred
,
1088 struct rpc_cred
*new;
1090 new = gss_lookup_cred(auth
, &acred
, RPCAUTH_LOOKUP_NEW
);
1092 return PTR_ERR(new);
1093 task
->tk_msg
.rpc_cred
= new;
1094 put_rpccred(oldcred
);
1099 * Refresh credentials. XXX - finish
1102 gss_refresh(struct rpc_task
*task
)
1104 struct rpc_cred
*cred
= task
->tk_msg
.rpc_cred
;
1107 if (!test_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
) &&
1108 !test_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
)) {
1109 ret
= gss_renew_cred(task
);
1112 cred
= task
->tk_msg
.rpc_cred
;
1115 if (test_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
))
1116 ret
= gss_refresh_upcall(task
);
1121 /* Dummy refresh routine: used only when destroying the context */
1123 gss_refresh_null(struct rpc_task
*task
)
1129 gss_validate(struct rpc_task
*task
, __be32
*p
)
1131 struct rpc_cred
*cred
= task
->tk_msg
.rpc_cred
;
1132 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1135 struct xdr_buf verf_buf
;
1136 struct xdr_netobj mic
;
1140 dprintk("RPC: %5u gss_validate\n", task
->tk_pid
);
1143 if ((len
= ntohl(*p
++)) > RPC_MAX_AUTH_SIZE
)
1145 if (flav
!= RPC_AUTH_GSS
)
1147 seq
= htonl(task
->tk_rqstp
->rq_seqno
);
1148 iov
.iov_base
= &seq
;
1149 iov
.iov_len
= sizeof(seq
);
1150 xdr_buf_from_iov(&iov
, &verf_buf
);
1154 maj_stat
= gss_verify_mic(ctx
->gc_gss_ctx
, &verf_buf
, &mic
);
1155 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1156 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1158 dprintk("RPC: %5u gss_validate: gss_verify_mic returned "
1159 "error 0x%08x\n", task
->tk_pid
, maj_stat
);
1162 /* We leave it to unwrap to calculate au_rslack. For now we just
1163 * calculate the length of the verifier: */
1164 cred
->cr_auth
->au_verfsize
= XDR_QUADLEN(len
) + 2;
1166 dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
1168 return p
+ XDR_QUADLEN(len
);
1171 dprintk("RPC: %5u gss_validate failed.\n", task
->tk_pid
);
1176 gss_wrap_req_integ(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1177 kxdrproc_t encode
, struct rpc_rqst
*rqstp
, __be32
*p
, void *obj
)
1179 struct xdr_buf
*snd_buf
= &rqstp
->rq_snd_buf
;
1180 struct xdr_buf integ_buf
;
1181 __be32
*integ_len
= NULL
;
1182 struct xdr_netobj mic
;
1190 offset
= (u8
*)p
- (u8
*)snd_buf
->head
[0].iov_base
;
1191 *p
++ = htonl(rqstp
->rq_seqno
);
1193 status
= encode(rqstp
, p
, obj
);
1197 if (xdr_buf_subsegment(snd_buf
, &integ_buf
,
1198 offset
, snd_buf
->len
- offset
))
1200 *integ_len
= htonl(integ_buf
.len
);
1202 /* guess whether we're in the head or the tail: */
1203 if (snd_buf
->page_len
|| snd_buf
->tail
[0].iov_len
)
1204 iov
= snd_buf
->tail
;
1206 iov
= snd_buf
->head
;
1207 p
= iov
->iov_base
+ iov
->iov_len
;
1208 mic
.data
= (u8
*)(p
+ 1);
1210 maj_stat
= gss_get_mic(ctx
->gc_gss_ctx
, &integ_buf
, &mic
);
1211 status
= -EIO
; /* XXX? */
1212 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1213 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1216 q
= xdr_encode_opaque(p
, NULL
, mic
.len
);
1218 offset
= (u8
*)q
- (u8
*)p
;
1219 iov
->iov_len
+= offset
;
1220 snd_buf
->len
+= offset
;
1225 priv_release_snd_buf(struct rpc_rqst
*rqstp
)
1229 for (i
=0; i
< rqstp
->rq_enc_pages_num
; i
++)
1230 __free_page(rqstp
->rq_enc_pages
[i
]);
1231 kfree(rqstp
->rq_enc_pages
);
1235 alloc_enc_pages(struct rpc_rqst
*rqstp
)
1237 struct xdr_buf
*snd_buf
= &rqstp
->rq_snd_buf
;
1240 if (snd_buf
->page_len
== 0) {
1241 rqstp
->rq_enc_pages_num
= 0;
1245 first
= snd_buf
->page_base
>> PAGE_CACHE_SHIFT
;
1246 last
= (snd_buf
->page_base
+ snd_buf
->page_len
- 1) >> PAGE_CACHE_SHIFT
;
1247 rqstp
->rq_enc_pages_num
= last
- first
+ 1 + 1;
1249 = kmalloc(rqstp
->rq_enc_pages_num
* sizeof(struct page
*),
1251 if (!rqstp
->rq_enc_pages
)
1253 for (i
=0; i
< rqstp
->rq_enc_pages_num
; i
++) {
1254 rqstp
->rq_enc_pages
[i
] = alloc_page(GFP_NOFS
);
1255 if (rqstp
->rq_enc_pages
[i
] == NULL
)
1258 rqstp
->rq_release_snd_buf
= priv_release_snd_buf
;
1261 for (i
--; i
>= 0; i
--) {
1262 __free_page(rqstp
->rq_enc_pages
[i
]);
1269 gss_wrap_req_priv(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1270 kxdrproc_t encode
, struct rpc_rqst
*rqstp
, __be32
*p
, void *obj
)
1272 struct xdr_buf
*snd_buf
= &rqstp
->rq_snd_buf
;
1277 struct page
**inpages
;
1284 offset
= (u8
*)p
- (u8
*)snd_buf
->head
[0].iov_base
;
1285 *p
++ = htonl(rqstp
->rq_seqno
);
1287 status
= encode(rqstp
, p
, obj
);
1291 status
= alloc_enc_pages(rqstp
);
1294 first
= snd_buf
->page_base
>> PAGE_CACHE_SHIFT
;
1295 inpages
= snd_buf
->pages
+ first
;
1296 snd_buf
->pages
= rqstp
->rq_enc_pages
;
1297 snd_buf
->page_base
-= first
<< PAGE_CACHE_SHIFT
;
1298 /* Give the tail its own page, in case we need extra space in the
1299 * head when wrapping: */
1300 if (snd_buf
->page_len
|| snd_buf
->tail
[0].iov_len
) {
1301 tmp
= page_address(rqstp
->rq_enc_pages
[rqstp
->rq_enc_pages_num
- 1]);
1302 memcpy(tmp
, snd_buf
->tail
[0].iov_base
, snd_buf
->tail
[0].iov_len
);
1303 snd_buf
->tail
[0].iov_base
= tmp
;
1305 maj_stat
= gss_wrap(ctx
->gc_gss_ctx
, offset
, snd_buf
, inpages
);
1306 /* RPC_SLACK_SPACE should prevent this ever happening: */
1307 BUG_ON(snd_buf
->len
> snd_buf
->buflen
);
1309 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1310 * done anyway, so it's safe to put the request on the wire: */
1311 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1312 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1316 *opaque_len
= htonl(snd_buf
->len
- offset
);
1317 /* guess whether we're in the head or the tail: */
1318 if (snd_buf
->page_len
|| snd_buf
->tail
[0].iov_len
)
1319 iov
= snd_buf
->tail
;
1321 iov
= snd_buf
->head
;
1322 p
= iov
->iov_base
+ iov
->iov_len
;
1323 pad
= 3 - ((snd_buf
->len
- offset
- 1) & 3);
1325 iov
->iov_len
+= pad
;
1326 snd_buf
->len
+= pad
;
1332 gss_wrap_req(struct rpc_task
*task
,
1333 kxdrproc_t encode
, void *rqstp
, __be32
*p
, void *obj
)
1335 struct rpc_cred
*cred
= task
->tk_msg
.rpc_cred
;
1336 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
,
1338 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1341 dprintk("RPC: %5u gss_wrap_req\n", task
->tk_pid
);
1342 if (ctx
->gc_proc
!= RPC_GSS_PROC_DATA
) {
1343 /* The spec seems a little ambiguous here, but I think that not
1344 * wrapping context destruction requests makes the most sense.
1346 status
= encode(rqstp
, p
, obj
);
1349 switch (gss_cred
->gc_service
) {
1350 case RPC_GSS_SVC_NONE
:
1351 status
= encode(rqstp
, p
, obj
);
1353 case RPC_GSS_SVC_INTEGRITY
:
1354 status
= gss_wrap_req_integ(cred
, ctx
, encode
,
1357 case RPC_GSS_SVC_PRIVACY
:
1358 status
= gss_wrap_req_priv(cred
, ctx
, encode
,
1364 dprintk("RPC: %5u gss_wrap_req returning %d\n", task
->tk_pid
, status
);
1369 gss_unwrap_resp_integ(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1370 struct rpc_rqst
*rqstp
, __be32
**p
)
1372 struct xdr_buf
*rcv_buf
= &rqstp
->rq_rcv_buf
;
1373 struct xdr_buf integ_buf
;
1374 struct xdr_netobj mic
;
1375 u32 data_offset
, mic_offset
;
1380 integ_len
= ntohl(*(*p
)++);
1383 data_offset
= (u8
*)(*p
) - (u8
*)rcv_buf
->head
[0].iov_base
;
1384 mic_offset
= integ_len
+ data_offset
;
1385 if (mic_offset
> rcv_buf
->len
)
1387 if (ntohl(*(*p
)++) != rqstp
->rq_seqno
)
1390 if (xdr_buf_subsegment(rcv_buf
, &integ_buf
, data_offset
,
1391 mic_offset
- data_offset
))
1394 if (xdr_buf_read_netobj(rcv_buf
, &mic
, mic_offset
))
1397 maj_stat
= gss_verify_mic(ctx
->gc_gss_ctx
, &integ_buf
, &mic
);
1398 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1399 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1400 if (maj_stat
!= GSS_S_COMPLETE
)
1406 gss_unwrap_resp_priv(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1407 struct rpc_rqst
*rqstp
, __be32
**p
)
1409 struct xdr_buf
*rcv_buf
= &rqstp
->rq_rcv_buf
;
1415 opaque_len
= ntohl(*(*p
)++);
1416 offset
= (u8
*)(*p
) - (u8
*)rcv_buf
->head
[0].iov_base
;
1417 if (offset
+ opaque_len
> rcv_buf
->len
)
1419 /* remove padding: */
1420 rcv_buf
->len
= offset
+ opaque_len
;
1422 maj_stat
= gss_unwrap(ctx
->gc_gss_ctx
, offset
, rcv_buf
);
1423 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1424 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1425 if (maj_stat
!= GSS_S_COMPLETE
)
1427 if (ntohl(*(*p
)++) != rqstp
->rq_seqno
)
1435 gss_unwrap_resp(struct rpc_task
*task
,
1436 kxdrproc_t decode
, void *rqstp
, __be32
*p
, void *obj
)
1438 struct rpc_cred
*cred
= task
->tk_msg
.rpc_cred
;
1439 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
,
1441 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1443 struct kvec
*head
= ((struct rpc_rqst
*)rqstp
)->rq_rcv_buf
.head
;
1444 int savedlen
= head
->iov_len
;
1447 if (ctx
->gc_proc
!= RPC_GSS_PROC_DATA
)
1449 switch (gss_cred
->gc_service
) {
1450 case RPC_GSS_SVC_NONE
:
1452 case RPC_GSS_SVC_INTEGRITY
:
1453 status
= gss_unwrap_resp_integ(cred
, ctx
, rqstp
, &p
);
1457 case RPC_GSS_SVC_PRIVACY
:
1458 status
= gss_unwrap_resp_priv(cred
, ctx
, rqstp
, &p
);
1463 /* take into account extra slack for integrity and privacy cases: */
1464 cred
->cr_auth
->au_rslack
= cred
->cr_auth
->au_verfsize
+ (p
- savedp
)
1465 + (savedlen
- head
->iov_len
);
1467 status
= decode(rqstp
, p
, obj
);
1470 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task
->tk_pid
,
1475 static const struct rpc_authops authgss_ops
= {
1476 .owner
= THIS_MODULE
,
1477 .au_flavor
= RPC_AUTH_GSS
,
1478 .au_name
= "RPCSEC_GSS",
1479 .create
= gss_create
,
1480 .destroy
= gss_destroy
,
1481 .lookup_cred
= gss_lookup_cred
,
1482 .crcreate
= gss_create_cred
1485 static const struct rpc_credops gss_credops
= {
1486 .cr_name
= "AUTH_GSS",
1487 .crdestroy
= gss_destroy_cred
,
1488 .cr_init
= gss_cred_init
,
1489 .crbind
= rpcauth_generic_bind_cred
,
1490 .crmatch
= gss_match
,
1491 .crmarshal
= gss_marshal
,
1492 .crrefresh
= gss_refresh
,
1493 .crvalidate
= gss_validate
,
1494 .crwrap_req
= gss_wrap_req
,
1495 .crunwrap_resp
= gss_unwrap_resp
,
1498 static const struct rpc_credops gss_nullops
= {
1499 .cr_name
= "AUTH_GSS",
1500 .crdestroy
= gss_destroy_nullcred
,
1501 .crbind
= rpcauth_generic_bind_cred
,
1502 .crmatch
= gss_match
,
1503 .crmarshal
= gss_marshal
,
1504 .crrefresh
= gss_refresh_null
,
1505 .crvalidate
= gss_validate
,
1506 .crwrap_req
= gss_wrap_req
,
1507 .crunwrap_resp
= gss_unwrap_resp
,
1510 static struct rpc_pipe_ops gss_upcall_ops_v0
= {
1511 .upcall
= gss_pipe_upcall
,
1512 .downcall
= gss_pipe_downcall
,
1513 .destroy_msg
= gss_pipe_destroy_msg
,
1514 .open_pipe
= gss_pipe_open_v0
,
1515 .release_pipe
= gss_pipe_release
,
1518 static struct rpc_pipe_ops gss_upcall_ops_v1
= {
1519 .upcall
= gss_pipe_upcall
,
1520 .downcall
= gss_pipe_downcall
,
1521 .destroy_msg
= gss_pipe_destroy_msg
,
1522 .open_pipe
= gss_pipe_open_v1
,
1523 .release_pipe
= gss_pipe_release
,
1527 * Initialize RPCSEC_GSS module
1529 static int __init
init_rpcsec_gss(void)
1533 err
= rpcauth_register(&authgss_ops
);
1536 err
= gss_svc_init();
1538 goto out_unregister
;
1539 rpc_init_wait_queue(&pipe_version_rpc_waitqueue
, "gss pipe version");
1542 rpcauth_unregister(&authgss_ops
);
1547 static void __exit
exit_rpcsec_gss(void)
1550 rpcauth_unregister(&authgss_ops
);
1551 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1554 MODULE_LICENSE("GPL");
1555 module_init(init_rpcsec_gss
)
1556 module_exit(exit_rpcsec_gss
)