2 * linux/net/sunrpc/auth_gss/auth_gss.c
4 * RPCSEC_GSS client authentication.
6 * Copyright (c) 2000 The Regents of the University of Michigan.
9 * Dug Song <dugsong@monkey.org>
10 * Andy Adamson <andros@umich.edu>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/types.h>
42 #include <linux/slab.h>
43 #include <linux/sched.h>
44 #include <linux/pagemap.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/auth.h>
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/svcauth_gss.h>
49 #include <linux/sunrpc/gss_err.h>
50 #include <linux/workqueue.h>
51 #include <linux/sunrpc/rpc_pipe_fs.h>
52 #include <linux/sunrpc/gss_api.h>
53 #include <asm/uaccess.h>
54 #include <linux/hashtable.h>
58 static const struct rpc_authops authgss_ops
;
60 static const struct rpc_credops gss_credops
;
61 static const struct rpc_credops gss_nullops
;
63 #define GSS_RETRY_EXPIRED 5
64 static unsigned int gss_expired_cred_retry_delay
= GSS_RETRY_EXPIRED
;
66 #define GSS_KEY_EXPIRE_TIMEO 240
67 static unsigned int gss_key_expire_timeo
= GSS_KEY_EXPIRE_TIMEO
;
69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
70 # define RPCDBG_FACILITY RPCDBG_AUTH
73 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
74 /* length of a krb5 verifier (48), plus data added before arguments when
75 * using integrity (two 4-byte integers): */
76 #define GSS_VERF_SLACK 100
78 static DEFINE_HASHTABLE(gss_auth_hash_table
, 4);
79 static DEFINE_SPINLOCK(gss_auth_hash_lock
);
82 struct rpc_pipe_dir_object pdo
;
83 struct rpc_pipe
*pipe
;
84 struct rpc_clnt
*clnt
;
91 struct hlist_node hash
;
92 struct rpc_auth rpc_auth
;
93 struct gss_api_mech
*mech
;
94 enum rpc_gss_svc service
;
95 struct rpc_clnt
*client
;
98 * There are two upcall pipes; dentry[1], named "gssd", is used
99 * for the new text-based upcall; dentry[0] is named after the
100 * mechanism (for example, "krb5") and exists for
101 * backwards-compatibility with older gssd's.
103 struct gss_pipe
*gss_pipe
[2];
104 const char *target_name
;
107 /* pipe_version >= 0 if and only if someone has a pipe open. */
108 static DEFINE_SPINLOCK(pipe_version_lock
);
109 static struct rpc_wait_queue pipe_version_rpc_waitqueue
;
110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue
);
111 static void gss_put_auth(struct gss_auth
*gss_auth
);
113 static void gss_free_ctx(struct gss_cl_ctx
*);
114 static const struct rpc_pipe_ops gss_upcall_ops_v0
;
115 static const struct rpc_pipe_ops gss_upcall_ops_v1
;
117 static inline struct gss_cl_ctx
*
118 gss_get_ctx(struct gss_cl_ctx
*ctx
)
120 atomic_inc(&ctx
->count
);
125 gss_put_ctx(struct gss_cl_ctx
*ctx
)
127 if (atomic_dec_and_test(&ctx
->count
))
132 * called by gss_upcall_callback and gss_create_upcall in order
133 * to set the gss context. The actual exchange of an old context
134 * and a new one is protected by the pipe->lock.
137 gss_cred_set_ctx(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
)
139 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
141 if (!test_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
))
144 rcu_assign_pointer(gss_cred
->gc_ctx
, ctx
);
145 set_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
146 smp_mb__before_atomic();
147 clear_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
);
151 simple_get_bytes(const void *p
, const void *end
, void *res
, size_t len
)
153 const void *q
= (const void *)((const char *)p
+ len
);
154 if (unlikely(q
> end
|| q
< p
))
155 return ERR_PTR(-EFAULT
);
160 static inline const void *
161 simple_get_netobj(const void *p
, const void *end
, struct xdr_netobj
*dest
)
166 p
= simple_get_bytes(p
, end
, &len
, sizeof(len
));
169 q
= (const void *)((const char *)p
+ len
);
170 if (unlikely(q
> end
|| q
< p
))
171 return ERR_PTR(-EFAULT
);
172 dest
->data
= kmemdup(p
, len
, GFP_NOFS
);
173 if (unlikely(dest
->data
== NULL
))
174 return ERR_PTR(-ENOMEM
);
179 static struct gss_cl_ctx
*
180 gss_cred_get_ctx(struct rpc_cred
*cred
)
182 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
183 struct gss_cl_ctx
*ctx
= NULL
;
186 ctx
= rcu_dereference(gss_cred
->gc_ctx
);
193 static struct gss_cl_ctx
*
194 gss_alloc_context(void)
196 struct gss_cl_ctx
*ctx
;
198 ctx
= kzalloc(sizeof(*ctx
), GFP_NOFS
);
200 ctx
->gc_proc
= RPC_GSS_PROC_DATA
;
201 ctx
->gc_seq
= 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
202 spin_lock_init(&ctx
->gc_seq_lock
);
203 atomic_set(&ctx
->count
,1);
208 #define GSSD_MIN_TIMEOUT (60 * 60)
210 gss_fill_context(const void *p
, const void *end
, struct gss_cl_ctx
*ctx
, struct gss_api_mech
*gm
)
214 unsigned int timeout
;
215 unsigned long now
= jiffies
;
219 /* First unsigned int gives the remaining lifetime in seconds of the
220 * credential - e.g. the remaining TGT lifetime for Kerberos or
221 * the -t value passed to GSSD.
223 p
= simple_get_bytes(p
, end
, &timeout
, sizeof(timeout
));
227 timeout
= GSSD_MIN_TIMEOUT
;
228 ctx
->gc_expiry
= now
+ ((unsigned long)timeout
* HZ
);
229 /* Sequence number window. Determines the maximum number of
230 * simultaneous requests
232 p
= simple_get_bytes(p
, end
, &window_size
, sizeof(window_size
));
235 ctx
->gc_win
= window_size
;
236 /* gssd signals an error by passing ctx->gc_win = 0: */
237 if (ctx
->gc_win
== 0) {
239 * in which case, p points to an error code. Anything other
240 * than -EKEYEXPIRED gets converted to -EACCES.
242 p
= simple_get_bytes(p
, end
, &ret
, sizeof(ret
));
244 p
= (ret
== -EKEYEXPIRED
) ? ERR_PTR(-EKEYEXPIRED
) :
248 /* copy the opaque wire context */
249 p
= simple_get_netobj(p
, end
, &ctx
->gc_wire_ctx
);
252 /* import the opaque security context */
253 p
= simple_get_bytes(p
, end
, &seclen
, sizeof(seclen
));
256 q
= (const void *)((const char *)p
+ seclen
);
257 if (unlikely(q
> end
|| q
< p
)) {
258 p
= ERR_PTR(-EFAULT
);
261 ret
= gss_import_sec_context(p
, seclen
, gm
, &ctx
->gc_gss_ctx
, NULL
, GFP_NOFS
);
267 /* is there any trailing data? */
273 /* pull in acceptor name (if there is one) */
274 p
= simple_get_netobj(q
, end
, &ctx
->gc_acceptor
);
278 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u acceptor %.*s\n",
279 __func__
, ctx
->gc_expiry
, now
, timeout
, ctx
->gc_acceptor
.len
,
280 ctx
->gc_acceptor
.data
);
283 dprintk("RPC: %s returns error %ld\n", __func__
, -PTR_ERR(p
));
287 #define UPCALL_BUF_LEN 128
289 struct gss_upcall_msg
{
292 struct rpc_pipe_msg msg
;
293 struct list_head list
;
294 struct gss_auth
*auth
;
295 struct rpc_pipe
*pipe
;
296 struct rpc_wait_queue rpc_waitqueue
;
297 wait_queue_head_t waitqueue
;
298 struct gss_cl_ctx
*ctx
;
299 char databuf
[UPCALL_BUF_LEN
];
302 static int get_pipe_version(struct net
*net
)
304 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
307 spin_lock(&pipe_version_lock
);
308 if (sn
->pipe_version
>= 0) {
309 atomic_inc(&sn
->pipe_users
);
310 ret
= sn
->pipe_version
;
313 spin_unlock(&pipe_version_lock
);
317 static void put_pipe_version(struct net
*net
)
319 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
321 if (atomic_dec_and_lock(&sn
->pipe_users
, &pipe_version_lock
)) {
322 sn
->pipe_version
= -1;
323 spin_unlock(&pipe_version_lock
);
328 gss_release_msg(struct gss_upcall_msg
*gss_msg
)
330 struct net
*net
= gss_msg
->auth
->net
;
331 if (!atomic_dec_and_test(&gss_msg
->count
))
333 put_pipe_version(net
);
334 BUG_ON(!list_empty(&gss_msg
->list
));
335 if (gss_msg
->ctx
!= NULL
)
336 gss_put_ctx(gss_msg
->ctx
);
337 rpc_destroy_wait_queue(&gss_msg
->rpc_waitqueue
);
338 gss_put_auth(gss_msg
->auth
);
342 static struct gss_upcall_msg
*
343 __gss_find_upcall(struct rpc_pipe
*pipe
, kuid_t uid
)
345 struct gss_upcall_msg
*pos
;
346 list_for_each_entry(pos
, &pipe
->in_downcall
, list
) {
347 if (!uid_eq(pos
->uid
, uid
))
349 atomic_inc(&pos
->count
);
350 dprintk("RPC: %s found msg %p\n", __func__
, pos
);
353 dprintk("RPC: %s found nothing\n", __func__
);
357 /* Try to add an upcall to the pipefs queue.
358 * If an upcall owned by our uid already exists, then we return a reference
359 * to that upcall instead of adding the new upcall.
361 static inline struct gss_upcall_msg
*
362 gss_add_msg(struct gss_upcall_msg
*gss_msg
)
364 struct rpc_pipe
*pipe
= gss_msg
->pipe
;
365 struct gss_upcall_msg
*old
;
367 spin_lock(&pipe
->lock
);
368 old
= __gss_find_upcall(pipe
, gss_msg
->uid
);
370 atomic_inc(&gss_msg
->count
);
371 list_add(&gss_msg
->list
, &pipe
->in_downcall
);
374 spin_unlock(&pipe
->lock
);
379 __gss_unhash_msg(struct gss_upcall_msg
*gss_msg
)
381 list_del_init(&gss_msg
->list
);
382 rpc_wake_up_status(&gss_msg
->rpc_waitqueue
, gss_msg
->msg
.errno
);
383 wake_up_all(&gss_msg
->waitqueue
);
384 atomic_dec(&gss_msg
->count
);
388 gss_unhash_msg(struct gss_upcall_msg
*gss_msg
)
390 struct rpc_pipe
*pipe
= gss_msg
->pipe
;
392 if (list_empty(&gss_msg
->list
))
394 spin_lock(&pipe
->lock
);
395 if (!list_empty(&gss_msg
->list
))
396 __gss_unhash_msg(gss_msg
);
397 spin_unlock(&pipe
->lock
);
401 gss_handle_downcall_result(struct gss_cred
*gss_cred
, struct gss_upcall_msg
*gss_msg
)
403 switch (gss_msg
->msg
.errno
) {
405 if (gss_msg
->ctx
== NULL
)
407 clear_bit(RPCAUTH_CRED_NEGATIVE
, &gss_cred
->gc_base
.cr_flags
);
408 gss_cred_set_ctx(&gss_cred
->gc_base
, gss_msg
->ctx
);
411 set_bit(RPCAUTH_CRED_NEGATIVE
, &gss_cred
->gc_base
.cr_flags
);
413 gss_cred
->gc_upcall_timestamp
= jiffies
;
414 gss_cred
->gc_upcall
= NULL
;
415 rpc_wake_up_status(&gss_msg
->rpc_waitqueue
, gss_msg
->msg
.errno
);
419 gss_upcall_callback(struct rpc_task
*task
)
421 struct gss_cred
*gss_cred
= container_of(task
->tk_rqstp
->rq_cred
,
422 struct gss_cred
, gc_base
);
423 struct gss_upcall_msg
*gss_msg
= gss_cred
->gc_upcall
;
424 struct rpc_pipe
*pipe
= gss_msg
->pipe
;
426 spin_lock(&pipe
->lock
);
427 gss_handle_downcall_result(gss_cred
, gss_msg
);
428 spin_unlock(&pipe
->lock
);
429 task
->tk_status
= gss_msg
->msg
.errno
;
430 gss_release_msg(gss_msg
);
433 static void gss_encode_v0_msg(struct gss_upcall_msg
*gss_msg
)
435 uid_t uid
= from_kuid(&init_user_ns
, gss_msg
->uid
);
436 memcpy(gss_msg
->databuf
, &uid
, sizeof(uid
));
437 gss_msg
->msg
.data
= gss_msg
->databuf
;
438 gss_msg
->msg
.len
= sizeof(uid
);
440 BUILD_BUG_ON(sizeof(uid
) > sizeof(gss_msg
->databuf
));
443 static int gss_encode_v1_msg(struct gss_upcall_msg
*gss_msg
,
444 const char *service_name
,
445 const char *target_name
)
447 struct gss_api_mech
*mech
= gss_msg
->auth
->mech
;
448 char *p
= gss_msg
->databuf
;
449 size_t buflen
= sizeof(gss_msg
->databuf
);
452 len
= scnprintf(p
, buflen
, "mech=%s uid=%d ", mech
->gm_name
,
453 from_kuid(&init_user_ns
, gss_msg
->uid
));
456 gss_msg
->msg
.len
= len
;
458 len
= scnprintf(p
, buflen
, "target=%s ", target_name
);
461 gss_msg
->msg
.len
+= len
;
463 if (service_name
!= NULL
) {
464 len
= scnprintf(p
, buflen
, "service=%s ", service_name
);
467 gss_msg
->msg
.len
+= len
;
469 if (mech
->gm_upcall_enctypes
) {
470 len
= scnprintf(p
, buflen
, "enctypes=%s ",
471 mech
->gm_upcall_enctypes
);
474 gss_msg
->msg
.len
+= len
;
476 len
= scnprintf(p
, buflen
, "\n");
479 gss_msg
->msg
.len
+= len
;
481 gss_msg
->msg
.data
= gss_msg
->databuf
;
488 static struct gss_upcall_msg
*
489 gss_alloc_msg(struct gss_auth
*gss_auth
,
490 kuid_t uid
, const char *service_name
)
492 struct gss_upcall_msg
*gss_msg
;
496 gss_msg
= kzalloc(sizeof(*gss_msg
), GFP_NOFS
);
499 vers
= get_pipe_version(gss_auth
->net
);
503 gss_msg
->pipe
= gss_auth
->gss_pipe
[vers
]->pipe
;
504 INIT_LIST_HEAD(&gss_msg
->list
);
505 rpc_init_wait_queue(&gss_msg
->rpc_waitqueue
, "RPCSEC_GSS upcall waitq");
506 init_waitqueue_head(&gss_msg
->waitqueue
);
507 atomic_set(&gss_msg
->count
, 1);
509 gss_msg
->auth
= gss_auth
;
512 gss_encode_v0_msg(gss_msg
);
515 err
= gss_encode_v1_msg(gss_msg
, service_name
, gss_auth
->target_name
);
517 goto err_put_pipe_version
;
519 kref_get(&gss_auth
->kref
);
521 err_put_pipe_version
:
522 put_pipe_version(gss_auth
->net
);
529 static struct gss_upcall_msg
*
530 gss_setup_upcall(struct gss_auth
*gss_auth
, struct rpc_cred
*cred
)
532 struct gss_cred
*gss_cred
= container_of(cred
,
533 struct gss_cred
, gc_base
);
534 struct gss_upcall_msg
*gss_new
, *gss_msg
;
535 kuid_t uid
= cred
->cr_uid
;
537 gss_new
= gss_alloc_msg(gss_auth
, uid
, gss_cred
->gc_principal
);
540 gss_msg
= gss_add_msg(gss_new
);
541 if (gss_msg
== gss_new
) {
542 int res
= rpc_queue_upcall(gss_new
->pipe
, &gss_new
->msg
);
544 gss_unhash_msg(gss_new
);
545 gss_msg
= ERR_PTR(res
);
548 gss_release_msg(gss_new
);
552 static void warn_gssd(void)
554 dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
558 gss_refresh_upcall(struct rpc_task
*task
)
560 struct rpc_cred
*cred
= task
->tk_rqstp
->rq_cred
;
561 struct gss_auth
*gss_auth
= container_of(cred
->cr_auth
,
562 struct gss_auth
, rpc_auth
);
563 struct gss_cred
*gss_cred
= container_of(cred
,
564 struct gss_cred
, gc_base
);
565 struct gss_upcall_msg
*gss_msg
;
566 struct rpc_pipe
*pipe
;
569 dprintk("RPC: %5u %s for uid %u\n",
570 task
->tk_pid
, __func__
, from_kuid(&init_user_ns
, cred
->cr_uid
));
571 gss_msg
= gss_setup_upcall(gss_auth
, cred
);
572 if (PTR_ERR(gss_msg
) == -EAGAIN
) {
573 /* XXX: warning on the first, under the assumption we
574 * shouldn't normally hit this case on a refresh. */
576 task
->tk_timeout
= 15*HZ
;
577 rpc_sleep_on(&pipe_version_rpc_waitqueue
, task
, NULL
);
580 if (IS_ERR(gss_msg
)) {
581 err
= PTR_ERR(gss_msg
);
584 pipe
= gss_msg
->pipe
;
585 spin_lock(&pipe
->lock
);
586 if (gss_cred
->gc_upcall
!= NULL
)
587 rpc_sleep_on(&gss_cred
->gc_upcall
->rpc_waitqueue
, task
, NULL
);
588 else if (gss_msg
->ctx
== NULL
&& gss_msg
->msg
.errno
>= 0) {
589 task
->tk_timeout
= 0;
590 gss_cred
->gc_upcall
= gss_msg
;
591 /* gss_upcall_callback will release the reference to gss_upcall_msg */
592 atomic_inc(&gss_msg
->count
);
593 rpc_sleep_on(&gss_msg
->rpc_waitqueue
, task
, gss_upcall_callback
);
595 gss_handle_downcall_result(gss_cred
, gss_msg
);
596 err
= gss_msg
->msg
.errno
;
598 spin_unlock(&pipe
->lock
);
599 gss_release_msg(gss_msg
);
601 dprintk("RPC: %5u %s for uid %u result %d\n",
602 task
->tk_pid
, __func__
,
603 from_kuid(&init_user_ns
, cred
->cr_uid
), err
);
608 gss_create_upcall(struct gss_auth
*gss_auth
, struct gss_cred
*gss_cred
)
610 struct net
*net
= gss_auth
->net
;
611 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
612 struct rpc_pipe
*pipe
;
613 struct rpc_cred
*cred
= &gss_cred
->gc_base
;
614 struct gss_upcall_msg
*gss_msg
;
618 dprintk("RPC: %s for uid %u\n",
619 __func__
, from_kuid(&init_user_ns
, cred
->cr_uid
));
622 /* if gssd is down, just skip upcalling altogether */
623 if (!gssd_running(net
)) {
627 gss_msg
= gss_setup_upcall(gss_auth
, cred
);
628 if (PTR_ERR(gss_msg
) == -EAGAIN
) {
629 err
= wait_event_interruptible_timeout(pipe_version_waitqueue
,
630 sn
->pipe_version
>= 0, 15 * HZ
);
631 if (sn
->pipe_version
< 0) {
639 if (IS_ERR(gss_msg
)) {
640 err
= PTR_ERR(gss_msg
);
643 pipe
= gss_msg
->pipe
;
645 prepare_to_wait(&gss_msg
->waitqueue
, &wait
, TASK_KILLABLE
);
646 spin_lock(&pipe
->lock
);
647 if (gss_msg
->ctx
!= NULL
|| gss_msg
->msg
.errno
< 0) {
650 spin_unlock(&pipe
->lock
);
651 if (fatal_signal_pending(current
)) {
658 gss_cred_set_ctx(cred
, gss_msg
->ctx
);
660 err
= gss_msg
->msg
.errno
;
661 spin_unlock(&pipe
->lock
);
663 finish_wait(&gss_msg
->waitqueue
, &wait
);
664 gss_release_msg(gss_msg
);
666 dprintk("RPC: %s for uid %u result %d\n",
667 __func__
, from_kuid(&init_user_ns
, cred
->cr_uid
), err
);
671 #define MSG_BUF_MAXSIZE 1024
674 gss_pipe_downcall(struct file
*filp
, const char __user
*src
, size_t mlen
)
678 struct gss_upcall_msg
*gss_msg
;
679 struct rpc_pipe
*pipe
= RPC_I(file_inode(filp
))->pipe
;
680 struct gss_cl_ctx
*ctx
;
683 ssize_t err
= -EFBIG
;
685 if (mlen
> MSG_BUF_MAXSIZE
)
688 buf
= kmalloc(mlen
, GFP_NOFS
);
693 if (copy_from_user(buf
, src
, mlen
))
696 end
= (const void *)((char *)buf
+ mlen
);
697 p
= simple_get_bytes(buf
, end
, &id
, sizeof(id
));
703 uid
= make_kuid(&init_user_ns
, id
);
704 if (!uid_valid(uid
)) {
710 ctx
= gss_alloc_context();
715 /* Find a matching upcall */
716 spin_lock(&pipe
->lock
);
717 gss_msg
= __gss_find_upcall(pipe
, uid
);
718 if (gss_msg
== NULL
) {
719 spin_unlock(&pipe
->lock
);
722 list_del_init(&gss_msg
->list
);
723 spin_unlock(&pipe
->lock
);
725 p
= gss_fill_context(p
, end
, ctx
, gss_msg
->auth
->mech
);
731 gss_msg
->msg
.errno
= err
;
738 gss_msg
->msg
.errno
= -EAGAIN
;
741 printk(KERN_CRIT
"%s: bad return from "
742 "gss_fill_context: %zd\n", __func__
, err
);
745 goto err_release_msg
;
747 gss_msg
->ctx
= gss_get_ctx(ctx
);
751 spin_lock(&pipe
->lock
);
752 __gss_unhash_msg(gss_msg
);
753 spin_unlock(&pipe
->lock
);
754 gss_release_msg(gss_msg
);
760 dprintk("RPC: %s returning %Zd\n", __func__
, err
);
764 static int gss_pipe_open(struct inode
*inode
, int new_version
)
766 struct net
*net
= inode
->i_sb
->s_fs_info
;
767 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
770 spin_lock(&pipe_version_lock
);
771 if (sn
->pipe_version
< 0) {
772 /* First open of any gss pipe determines the version: */
773 sn
->pipe_version
= new_version
;
774 rpc_wake_up(&pipe_version_rpc_waitqueue
);
775 wake_up(&pipe_version_waitqueue
);
776 } else if (sn
->pipe_version
!= new_version
) {
777 /* Trying to open a pipe of a different version */
781 atomic_inc(&sn
->pipe_users
);
783 spin_unlock(&pipe_version_lock
);
788 static int gss_pipe_open_v0(struct inode
*inode
)
790 return gss_pipe_open(inode
, 0);
793 static int gss_pipe_open_v1(struct inode
*inode
)
795 return gss_pipe_open(inode
, 1);
799 gss_pipe_release(struct inode
*inode
)
801 struct net
*net
= inode
->i_sb
->s_fs_info
;
802 struct rpc_pipe
*pipe
= RPC_I(inode
)->pipe
;
803 struct gss_upcall_msg
*gss_msg
;
806 spin_lock(&pipe
->lock
);
807 list_for_each_entry(gss_msg
, &pipe
->in_downcall
, list
) {
809 if (!list_empty(&gss_msg
->msg
.list
))
811 gss_msg
->msg
.errno
= -EPIPE
;
812 atomic_inc(&gss_msg
->count
);
813 __gss_unhash_msg(gss_msg
);
814 spin_unlock(&pipe
->lock
);
815 gss_release_msg(gss_msg
);
818 spin_unlock(&pipe
->lock
);
820 put_pipe_version(net
);
824 gss_pipe_destroy_msg(struct rpc_pipe_msg
*msg
)
826 struct gss_upcall_msg
*gss_msg
= container_of(msg
, struct gss_upcall_msg
, msg
);
828 if (msg
->errno
< 0) {
829 dprintk("RPC: %s releasing msg %p\n",
831 atomic_inc(&gss_msg
->count
);
832 gss_unhash_msg(gss_msg
);
833 if (msg
->errno
== -ETIMEDOUT
)
835 gss_release_msg(gss_msg
);
839 static void gss_pipe_dentry_destroy(struct dentry
*dir
,
840 struct rpc_pipe_dir_object
*pdo
)
842 struct gss_pipe
*gss_pipe
= pdo
->pdo_data
;
843 struct rpc_pipe
*pipe
= gss_pipe
->pipe
;
845 if (pipe
->dentry
!= NULL
) {
846 rpc_unlink(pipe
->dentry
);
851 static int gss_pipe_dentry_create(struct dentry
*dir
,
852 struct rpc_pipe_dir_object
*pdo
)
854 struct gss_pipe
*p
= pdo
->pdo_data
;
855 struct dentry
*dentry
;
857 dentry
= rpc_mkpipe_dentry(dir
, p
->name
, p
->clnt
, p
->pipe
);
859 return PTR_ERR(dentry
);
860 p
->pipe
->dentry
= dentry
;
864 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops
= {
865 .create
= gss_pipe_dentry_create
,
866 .destroy
= gss_pipe_dentry_destroy
,
869 static struct gss_pipe
*gss_pipe_alloc(struct rpc_clnt
*clnt
,
871 const struct rpc_pipe_ops
*upcall_ops
)
876 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
879 p
->pipe
= rpc_mkpipe_data(upcall_ops
, RPC_PIPE_WAIT_FOR_OPEN
);
880 if (IS_ERR(p
->pipe
)) {
881 err
= PTR_ERR(p
->pipe
);
882 goto err_free_gss_pipe
;
887 rpc_init_pipe_dir_object(&p
->pdo
,
888 &gss_pipe_dir_object_ops
,
897 struct gss_alloc_pdo
{
898 struct rpc_clnt
*clnt
;
900 const struct rpc_pipe_ops
*upcall_ops
;
903 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object
*pdo
, void *data
)
905 struct gss_pipe
*gss_pipe
;
906 struct gss_alloc_pdo
*args
= data
;
908 if (pdo
->pdo_ops
!= &gss_pipe_dir_object_ops
)
910 gss_pipe
= container_of(pdo
, struct gss_pipe
, pdo
);
911 if (strcmp(gss_pipe
->name
, args
->name
) != 0)
913 if (!kref_get_unless_zero(&gss_pipe
->kref
))
918 static struct rpc_pipe_dir_object
*gss_pipe_alloc_pdo(void *data
)
920 struct gss_pipe
*gss_pipe
;
921 struct gss_alloc_pdo
*args
= data
;
923 gss_pipe
= gss_pipe_alloc(args
->clnt
, args
->name
, args
->upcall_ops
);
924 if (!IS_ERR(gss_pipe
))
925 return &gss_pipe
->pdo
;
929 static struct gss_pipe
*gss_pipe_get(struct rpc_clnt
*clnt
,
931 const struct rpc_pipe_ops
*upcall_ops
)
933 struct net
*net
= rpc_net_ns(clnt
);
934 struct rpc_pipe_dir_object
*pdo
;
935 struct gss_alloc_pdo args
= {
938 .upcall_ops
= upcall_ops
,
941 pdo
= rpc_find_or_alloc_pipe_dir_object(net
,
942 &clnt
->cl_pipedir_objects
,
947 return container_of(pdo
, struct gss_pipe
, pdo
);
948 return ERR_PTR(-ENOMEM
);
951 static void __gss_pipe_free(struct gss_pipe
*p
)
953 struct rpc_clnt
*clnt
= p
->clnt
;
954 struct net
*net
= rpc_net_ns(clnt
);
956 rpc_remove_pipe_dir_object(net
,
957 &clnt
->cl_pipedir_objects
,
959 rpc_destroy_pipe_data(p
->pipe
);
963 static void __gss_pipe_release(struct kref
*kref
)
965 struct gss_pipe
*p
= container_of(kref
, struct gss_pipe
, kref
);
970 static void gss_pipe_free(struct gss_pipe
*p
)
973 kref_put(&p
->kref
, __gss_pipe_release
);
977 * NOTE: we have the opportunity to use different
978 * parameters based on the input flavor (which must be a pseudoflavor)
980 static struct gss_auth
*
981 gss_create_new(struct rpc_auth_create_args
*args
, struct rpc_clnt
*clnt
)
983 rpc_authflavor_t flavor
= args
->pseudoflavor
;
984 struct gss_auth
*gss_auth
;
985 struct gss_pipe
*gss_pipe
;
986 struct rpc_auth
* auth
;
987 int err
= -ENOMEM
; /* XXX? */
989 dprintk("RPC: creating GSS authenticator for client %p\n", clnt
);
991 if (!try_module_get(THIS_MODULE
))
993 if (!(gss_auth
= kmalloc(sizeof(*gss_auth
), GFP_KERNEL
)))
995 INIT_HLIST_NODE(&gss_auth
->hash
);
996 gss_auth
->target_name
= NULL
;
997 if (args
->target_name
) {
998 gss_auth
->target_name
= kstrdup(args
->target_name
, GFP_KERNEL
);
999 if (gss_auth
->target_name
== NULL
)
1002 gss_auth
->client
= clnt
;
1003 gss_auth
->net
= get_net(rpc_net_ns(clnt
));
1005 gss_auth
->mech
= gss_mech_get_by_pseudoflavor(flavor
);
1006 if (!gss_auth
->mech
) {
1007 dprintk("RPC: Pseudoflavor %d not found!\n", flavor
);
1010 gss_auth
->service
= gss_pseudoflavor_to_service(gss_auth
->mech
, flavor
);
1011 if (gss_auth
->service
== 0)
1013 if (!gssd_running(gss_auth
->net
))
1015 auth
= &gss_auth
->rpc_auth
;
1016 auth
->au_cslack
= GSS_CRED_SLACK
>> 2;
1017 auth
->au_rslack
= GSS_VERF_SLACK
>> 2;
1018 auth
->au_ops
= &authgss_ops
;
1019 auth
->au_flavor
= flavor
;
1020 atomic_set(&auth
->au_count
, 1);
1021 kref_init(&gss_auth
->kref
);
1023 err
= rpcauth_init_credcache(auth
);
1027 * Note: if we created the old pipe first, then someone who
1028 * examined the directory at the right moment might conclude
1029 * that we supported only the old pipe. So we instead create
1030 * the new pipe first.
1032 gss_pipe
= gss_pipe_get(clnt
, "gssd", &gss_upcall_ops_v1
);
1033 if (IS_ERR(gss_pipe
)) {
1034 err
= PTR_ERR(gss_pipe
);
1035 goto err_destroy_credcache
;
1037 gss_auth
->gss_pipe
[1] = gss_pipe
;
1039 gss_pipe
= gss_pipe_get(clnt
, gss_auth
->mech
->gm_name
,
1040 &gss_upcall_ops_v0
);
1041 if (IS_ERR(gss_pipe
)) {
1042 err
= PTR_ERR(gss_pipe
);
1043 goto err_destroy_pipe_1
;
1045 gss_auth
->gss_pipe
[0] = gss_pipe
;
1049 gss_pipe_free(gss_auth
->gss_pipe
[1]);
1050 err_destroy_credcache
:
1051 rpcauth_destroy_credcache(auth
);
1053 gss_mech_put(gss_auth
->mech
);
1055 put_net(gss_auth
->net
);
1057 kfree(gss_auth
->target_name
);
1060 module_put(THIS_MODULE
);
1061 return ERR_PTR(err
);
1065 gss_free(struct gss_auth
*gss_auth
)
1067 gss_pipe_free(gss_auth
->gss_pipe
[0]);
1068 gss_pipe_free(gss_auth
->gss_pipe
[1]);
1069 gss_mech_put(gss_auth
->mech
);
1070 put_net(gss_auth
->net
);
1071 kfree(gss_auth
->target_name
);
1074 module_put(THIS_MODULE
);
1078 gss_free_callback(struct kref
*kref
)
1080 struct gss_auth
*gss_auth
= container_of(kref
, struct gss_auth
, kref
);
1086 gss_put_auth(struct gss_auth
*gss_auth
)
1088 kref_put(&gss_auth
->kref
, gss_free_callback
);
1092 gss_destroy(struct rpc_auth
*auth
)
1094 struct gss_auth
*gss_auth
= container_of(auth
,
1095 struct gss_auth
, rpc_auth
);
1097 dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
1098 auth
, auth
->au_flavor
);
1100 if (hash_hashed(&gss_auth
->hash
)) {
1101 spin_lock(&gss_auth_hash_lock
);
1102 hash_del(&gss_auth
->hash
);
1103 spin_unlock(&gss_auth_hash_lock
);
1106 gss_pipe_free(gss_auth
->gss_pipe
[0]);
1107 gss_auth
->gss_pipe
[0] = NULL
;
1108 gss_pipe_free(gss_auth
->gss_pipe
[1]);
1109 gss_auth
->gss_pipe
[1] = NULL
;
1110 rpcauth_destroy_credcache(auth
);
1112 gss_put_auth(gss_auth
);
1116 * Auths may be shared between rpc clients that were cloned from a
1117 * common client with the same xprt, if they also share the flavor and
1120 * The auth is looked up from the oldest parent sharing the same
1121 * cl_xprt, and the auth itself references only that common parent
1122 * (which is guaranteed to last as long as any of its descendants).
1124 static struct gss_auth
*
1125 gss_auth_find_or_add_hashed(struct rpc_auth_create_args
*args
,
1126 struct rpc_clnt
*clnt
,
1127 struct gss_auth
*new)
1129 struct gss_auth
*gss_auth
;
1130 unsigned long hashval
= (unsigned long)clnt
;
1132 spin_lock(&gss_auth_hash_lock
);
1133 hash_for_each_possible(gss_auth_hash_table
,
1137 if (gss_auth
->client
!= clnt
)
1139 if (gss_auth
->rpc_auth
.au_flavor
!= args
->pseudoflavor
)
1141 if (gss_auth
->target_name
!= args
->target_name
) {
1142 if (gss_auth
->target_name
== NULL
)
1144 if (args
->target_name
== NULL
)
1146 if (strcmp(gss_auth
->target_name
, args
->target_name
))
1149 if (!atomic_inc_not_zero(&gss_auth
->rpc_auth
.au_count
))
1154 hash_add(gss_auth_hash_table
, &new->hash
, hashval
);
1157 spin_unlock(&gss_auth_hash_lock
);
1161 static struct gss_auth
*
1162 gss_create_hashed(struct rpc_auth_create_args
*args
, struct rpc_clnt
*clnt
)
1164 struct gss_auth
*gss_auth
;
1165 struct gss_auth
*new;
1167 gss_auth
= gss_auth_find_or_add_hashed(args
, clnt
, NULL
);
1168 if (gss_auth
!= NULL
)
1170 new = gss_create_new(args
, clnt
);
1173 gss_auth
= gss_auth_find_or_add_hashed(args
, clnt
, new);
1174 if (gss_auth
!= new)
1175 gss_destroy(&new->rpc_auth
);
1180 static struct rpc_auth
*
1181 gss_create(struct rpc_auth_create_args
*args
, struct rpc_clnt
*clnt
)
1183 struct gss_auth
*gss_auth
;
1184 struct rpc_xprt
*xprt
= rcu_access_pointer(clnt
->cl_xprt
);
1186 while (clnt
!= clnt
->cl_parent
) {
1187 struct rpc_clnt
*parent
= clnt
->cl_parent
;
1188 /* Find the original parent for this transport */
1189 if (rcu_access_pointer(parent
->cl_xprt
) != xprt
)
1194 gss_auth
= gss_create_hashed(args
, clnt
);
1195 if (IS_ERR(gss_auth
))
1196 return ERR_CAST(gss_auth
);
1197 return &gss_auth
->rpc_auth
;
1201 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
1202 * to the server with the GSS control procedure field set to
1203 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1204 * all RPCSEC_GSS state associated with that context.
1207 gss_destroying_context(struct rpc_cred
*cred
)
1209 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
1210 struct gss_auth
*gss_auth
= container_of(cred
->cr_auth
, struct gss_auth
, rpc_auth
);
1211 struct gss_cl_ctx
*ctx
= rcu_dereference_protected(gss_cred
->gc_ctx
, 1);
1212 struct rpc_task
*task
;
1214 if (test_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
) == 0)
1217 ctx
->gc_proc
= RPC_GSS_PROC_DESTROY
;
1218 cred
->cr_ops
= &gss_nullops
;
1220 /* Take a reference to ensure the cred will be destroyed either
1221 * by the RPC call or by the put_rpccred() below */
1224 task
= rpc_call_null(gss_auth
->client
, cred
, RPC_TASK_ASYNC
|RPC_TASK_SOFT
);
1232 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1233 * to create a new cred or context, so they check that things have been
1234 * allocated before freeing them. */
1236 gss_do_free_ctx(struct gss_cl_ctx
*ctx
)
1238 dprintk("RPC: %s\n", __func__
);
1240 gss_delete_sec_context(&ctx
->gc_gss_ctx
);
1241 kfree(ctx
->gc_wire_ctx
.data
);
1242 kfree(ctx
->gc_acceptor
.data
);
1247 gss_free_ctx_callback(struct rcu_head
*head
)
1249 struct gss_cl_ctx
*ctx
= container_of(head
, struct gss_cl_ctx
, gc_rcu
);
1250 gss_do_free_ctx(ctx
);
1254 gss_free_ctx(struct gss_cl_ctx
*ctx
)
1256 call_rcu(&ctx
->gc_rcu
, gss_free_ctx_callback
);
1260 gss_free_cred(struct gss_cred
*gss_cred
)
1262 dprintk("RPC: %s cred=%p\n", __func__
, gss_cred
);
1267 gss_free_cred_callback(struct rcu_head
*head
)
1269 struct gss_cred
*gss_cred
= container_of(head
, struct gss_cred
, gc_base
.cr_rcu
);
1270 gss_free_cred(gss_cred
);
1274 gss_destroy_nullcred(struct rpc_cred
*cred
)
1276 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
1277 struct gss_auth
*gss_auth
= container_of(cred
->cr_auth
, struct gss_auth
, rpc_auth
);
1278 struct gss_cl_ctx
*ctx
= rcu_dereference_protected(gss_cred
->gc_ctx
, 1);
1280 RCU_INIT_POINTER(gss_cred
->gc_ctx
, NULL
);
1281 call_rcu(&cred
->cr_rcu
, gss_free_cred_callback
);
1284 gss_put_auth(gss_auth
);
1288 gss_destroy_cred(struct rpc_cred
*cred
)
1291 if (gss_destroying_context(cred
))
1293 gss_destroy_nullcred(cred
);
1297 * Lookup RPCSEC_GSS cred for the current process
1299 static struct rpc_cred
*
1300 gss_lookup_cred(struct rpc_auth
*auth
, struct auth_cred
*acred
, int flags
)
1302 return rpcauth_lookup_credcache(auth
, acred
, flags
);
1305 static struct rpc_cred
*
1306 gss_create_cred(struct rpc_auth
*auth
, struct auth_cred
*acred
, int flags
)
1308 struct gss_auth
*gss_auth
= container_of(auth
, struct gss_auth
, rpc_auth
);
1309 struct gss_cred
*cred
= NULL
;
1312 dprintk("RPC: %s for uid %d, flavor %d\n",
1313 __func__
, from_kuid(&init_user_ns
, acred
->uid
),
1316 if (!(cred
= kzalloc(sizeof(*cred
), GFP_NOFS
)))
1319 rpcauth_init_cred(&cred
->gc_base
, acred
, auth
, &gss_credops
);
1321 * Note: in order to force a call to call_refresh(), we deliberately
1322 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1324 cred
->gc_base
.cr_flags
= 1UL << RPCAUTH_CRED_NEW
;
1325 cred
->gc_service
= gss_auth
->service
;
1326 cred
->gc_principal
= NULL
;
1327 if (acred
->machine_cred
)
1328 cred
->gc_principal
= acred
->principal
;
1329 kref_get(&gss_auth
->kref
);
1330 return &cred
->gc_base
;
1333 dprintk("RPC: %s failed with error %d\n", __func__
, err
);
1334 return ERR_PTR(err
);
1338 gss_cred_init(struct rpc_auth
*auth
, struct rpc_cred
*cred
)
1340 struct gss_auth
*gss_auth
= container_of(auth
, struct gss_auth
, rpc_auth
);
1341 struct gss_cred
*gss_cred
= container_of(cred
,struct gss_cred
, gc_base
);
1345 err
= gss_create_upcall(gss_auth
, gss_cred
);
1346 } while (err
== -EAGAIN
);
1351 gss_stringify_acceptor(struct rpc_cred
*cred
)
1353 char *string
= NULL
;
1354 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
1355 struct gss_cl_ctx
*ctx
;
1357 struct xdr_netobj
*acceptor
;
1360 ctx
= rcu_dereference(gss_cred
->gc_ctx
);
1364 len
= ctx
->gc_acceptor
.len
;
1367 /* no point if there's no string */
1371 string
= kmalloc(len
+ 1, GFP_KERNEL
);
1376 ctx
= rcu_dereference(gss_cred
->gc_ctx
);
1378 /* did the ctx disappear or was it replaced by one with no acceptor? */
1379 if (!ctx
|| !ctx
->gc_acceptor
.len
) {
1385 acceptor
= &ctx
->gc_acceptor
;
1388 * Did we find a new acceptor that's longer than the original? Allocate
1389 * a longer buffer and try again.
1391 if (len
< acceptor
->len
) {
1392 len
= acceptor
->len
;
1398 memcpy(string
, acceptor
->data
, acceptor
->len
);
1399 string
[acceptor
->len
] = '\0';
1406 * Returns -EACCES if GSS context is NULL or will expire within the
1407 * timeout (miliseconds)
1410 gss_key_timeout(struct rpc_cred
*rc
)
1412 struct gss_cred
*gss_cred
= container_of(rc
, struct gss_cred
, gc_base
);
1413 struct gss_cl_ctx
*ctx
;
1414 unsigned long now
= jiffies
;
1415 unsigned long expire
;
1418 ctx
= rcu_dereference(gss_cred
->gc_ctx
);
1420 expire
= ctx
->gc_expiry
- (gss_key_expire_timeo
* HZ
);
1422 if (!ctx
|| time_after(now
, expire
))
1428 gss_match(struct auth_cred
*acred
, struct rpc_cred
*rc
, int flags
)
1430 struct gss_cred
*gss_cred
= container_of(rc
, struct gss_cred
, gc_base
);
1431 struct gss_cl_ctx
*ctx
;
1434 if (test_bit(RPCAUTH_CRED_NEW
, &rc
->cr_flags
))
1436 /* Don't match with creds that have expired. */
1438 ctx
= rcu_dereference(gss_cred
->gc_ctx
);
1439 if (!ctx
|| time_after(jiffies
, ctx
->gc_expiry
)) {
1444 if (!test_bit(RPCAUTH_CRED_UPTODATE
, &rc
->cr_flags
))
1447 if (acred
->principal
!= NULL
) {
1448 if (gss_cred
->gc_principal
== NULL
)
1450 ret
= strcmp(acred
->principal
, gss_cred
->gc_principal
) == 0;
1453 if (gss_cred
->gc_principal
!= NULL
)
1455 ret
= uid_eq(rc
->cr_uid
, acred
->uid
);
1461 /* Notify acred users of GSS context expiration timeout */
1462 if (test_bit(RPC_CRED_NOTIFY_TIMEOUT
, &acred
->ac_flags
) &&
1463 (gss_key_timeout(rc
) != 0)) {
1464 /* test will now be done from generic cred */
1465 test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT
, &acred
->ac_flags
);
1466 /* tell NFS layer that key will expire soon */
1467 set_bit(RPC_CRED_KEY_EXPIRE_SOON
, &acred
->ac_flags
);
1473 * Marshal credentials.
1474 * Maybe we should keep a cached credential for performance reasons.
1477 gss_marshal(struct rpc_task
*task
, __be32
*p
)
1479 struct rpc_rqst
*req
= task
->tk_rqstp
;
1480 struct rpc_cred
*cred
= req
->rq_cred
;
1481 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
,
1483 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1486 struct xdr_netobj mic
;
1488 struct xdr_buf verf_buf
;
1490 dprintk("RPC: %5u %s\n", task
->tk_pid
, __func__
);
1492 *p
++ = htonl(RPC_AUTH_GSS
);
1495 spin_lock(&ctx
->gc_seq_lock
);
1496 req
->rq_seqno
= ctx
->gc_seq
++;
1497 spin_unlock(&ctx
->gc_seq_lock
);
1499 *p
++ = htonl((u32
) RPC_GSS_VERSION
);
1500 *p
++ = htonl((u32
) ctx
->gc_proc
);
1501 *p
++ = htonl((u32
) req
->rq_seqno
);
1502 *p
++ = htonl((u32
) gss_cred
->gc_service
);
1503 p
= xdr_encode_netobj(p
, &ctx
->gc_wire_ctx
);
1504 *cred_len
= htonl((p
- (cred_len
+ 1)) << 2);
1506 /* We compute the checksum for the verifier over the xdr-encoded bytes
1507 * starting with the xid and ending at the end of the credential: */
1508 iov
.iov_base
= xprt_skip_transport_header(req
->rq_xprt
,
1509 req
->rq_snd_buf
.head
[0].iov_base
);
1510 iov
.iov_len
= (u8
*)p
- (u8
*)iov
.iov_base
;
1511 xdr_buf_from_iov(&iov
, &verf_buf
);
1513 /* set verifier flavor*/
1514 *p
++ = htonl(RPC_AUTH_GSS
);
1516 mic
.data
= (u8
*)(p
+ 1);
1517 maj_stat
= gss_get_mic(ctx
->gc_gss_ctx
, &verf_buf
, &mic
);
1518 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
) {
1519 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1520 } else if (maj_stat
!= 0) {
1521 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat
);
1524 p
= xdr_encode_opaque(p
, NULL
, mic
.len
);
1532 static int gss_renew_cred(struct rpc_task
*task
)
1534 struct rpc_cred
*oldcred
= task
->tk_rqstp
->rq_cred
;
1535 struct gss_cred
*gss_cred
= container_of(oldcred
,
1538 struct rpc_auth
*auth
= oldcred
->cr_auth
;
1539 struct auth_cred acred
= {
1540 .uid
= oldcred
->cr_uid
,
1541 .principal
= gss_cred
->gc_principal
,
1542 .machine_cred
= (gss_cred
->gc_principal
!= NULL
? 1 : 0),
1544 struct rpc_cred
*new;
1546 new = gss_lookup_cred(auth
, &acred
, RPCAUTH_LOOKUP_NEW
);
1548 return PTR_ERR(new);
1549 task
->tk_rqstp
->rq_cred
= new;
1550 put_rpccred(oldcred
);
1554 static int gss_cred_is_negative_entry(struct rpc_cred
*cred
)
1556 if (test_bit(RPCAUTH_CRED_NEGATIVE
, &cred
->cr_flags
)) {
1557 unsigned long now
= jiffies
;
1558 unsigned long begin
, expire
;
1559 struct gss_cred
*gss_cred
;
1561 gss_cred
= container_of(cred
, struct gss_cred
, gc_base
);
1562 begin
= gss_cred
->gc_upcall_timestamp
;
1563 expire
= begin
+ gss_expired_cred_retry_delay
* HZ
;
1565 if (time_in_range_open(now
, begin
, expire
))
1572 * Refresh credentials. XXX - finish
1575 gss_refresh(struct rpc_task
*task
)
1577 struct rpc_cred
*cred
= task
->tk_rqstp
->rq_cred
;
1580 if (gss_cred_is_negative_entry(cred
))
1581 return -EKEYEXPIRED
;
1583 if (!test_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
) &&
1584 !test_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
)) {
1585 ret
= gss_renew_cred(task
);
1588 cred
= task
->tk_rqstp
->rq_cred
;
1591 if (test_bit(RPCAUTH_CRED_NEW
, &cred
->cr_flags
))
1592 ret
= gss_refresh_upcall(task
);
1597 /* Dummy refresh routine: used only when destroying the context */
1599 gss_refresh_null(struct rpc_task
*task
)
1605 gss_validate(struct rpc_task
*task
, __be32
*p
)
1607 struct rpc_cred
*cred
= task
->tk_rqstp
->rq_cred
;
1608 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1611 struct xdr_buf verf_buf
;
1612 struct xdr_netobj mic
;
1615 __be32
*ret
= ERR_PTR(-EIO
);
1617 dprintk("RPC: %5u %s\n", task
->tk_pid
, __func__
);
1620 if ((len
= ntohl(*p
++)) > RPC_MAX_AUTH_SIZE
)
1622 if (flav
!= RPC_AUTH_GSS
)
1624 seq
= htonl(task
->tk_rqstp
->rq_seqno
);
1625 iov
.iov_base
= &seq
;
1626 iov
.iov_len
= sizeof(seq
);
1627 xdr_buf_from_iov(&iov
, &verf_buf
);
1631 ret
= ERR_PTR(-EACCES
);
1632 maj_stat
= gss_verify_mic(ctx
->gc_gss_ctx
, &verf_buf
, &mic
);
1633 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1634 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1636 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n",
1637 task
->tk_pid
, __func__
, maj_stat
);
1640 /* We leave it to unwrap to calculate au_rslack. For now we just
1641 * calculate the length of the verifier: */
1642 cred
->cr_auth
->au_verfsize
= XDR_QUADLEN(len
) + 2;
1644 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
1645 task
->tk_pid
, __func__
);
1646 return p
+ XDR_QUADLEN(len
);
1649 dprintk("RPC: %5u %s failed ret %ld.\n", task
->tk_pid
, __func__
,
1654 static void gss_wrap_req_encode(kxdreproc_t encode
, struct rpc_rqst
*rqstp
,
1655 __be32
*p
, void *obj
)
1657 struct xdr_stream xdr
;
1659 xdr_init_encode(&xdr
, &rqstp
->rq_snd_buf
, p
);
1660 encode(rqstp
, &xdr
, obj
);
1664 gss_wrap_req_integ(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1665 kxdreproc_t encode
, struct rpc_rqst
*rqstp
,
1666 __be32
*p
, void *obj
)
1668 struct xdr_buf
*snd_buf
= &rqstp
->rq_snd_buf
;
1669 struct xdr_buf integ_buf
;
1670 __be32
*integ_len
= NULL
;
1671 struct xdr_netobj mic
;
1679 offset
= (u8
*)p
- (u8
*)snd_buf
->head
[0].iov_base
;
1680 *p
++ = htonl(rqstp
->rq_seqno
);
1682 gss_wrap_req_encode(encode
, rqstp
, p
, obj
);
1684 if (xdr_buf_subsegment(snd_buf
, &integ_buf
,
1685 offset
, snd_buf
->len
- offset
))
1687 *integ_len
= htonl(integ_buf
.len
);
1689 /* guess whether we're in the head or the tail: */
1690 if (snd_buf
->page_len
|| snd_buf
->tail
[0].iov_len
)
1691 iov
= snd_buf
->tail
;
1693 iov
= snd_buf
->head
;
1694 p
= iov
->iov_base
+ iov
->iov_len
;
1695 mic
.data
= (u8
*)(p
+ 1);
1697 maj_stat
= gss_get_mic(ctx
->gc_gss_ctx
, &integ_buf
, &mic
);
1698 status
= -EIO
; /* XXX? */
1699 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1700 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1703 q
= xdr_encode_opaque(p
, NULL
, mic
.len
);
1705 offset
= (u8
*)q
- (u8
*)p
;
1706 iov
->iov_len
+= offset
;
1707 snd_buf
->len
+= offset
;
1712 priv_release_snd_buf(struct rpc_rqst
*rqstp
)
1716 for (i
=0; i
< rqstp
->rq_enc_pages_num
; i
++)
1717 __free_page(rqstp
->rq_enc_pages
[i
]);
1718 kfree(rqstp
->rq_enc_pages
);
1722 alloc_enc_pages(struct rpc_rqst
*rqstp
)
1724 struct xdr_buf
*snd_buf
= &rqstp
->rq_snd_buf
;
1727 if (snd_buf
->page_len
== 0) {
1728 rqstp
->rq_enc_pages_num
= 0;
1732 first
= snd_buf
->page_base
>> PAGE_CACHE_SHIFT
;
1733 last
= (snd_buf
->page_base
+ snd_buf
->page_len
- 1) >> PAGE_CACHE_SHIFT
;
1734 rqstp
->rq_enc_pages_num
= last
- first
+ 1 + 1;
1736 = kmalloc(rqstp
->rq_enc_pages_num
* sizeof(struct page
*),
1738 if (!rqstp
->rq_enc_pages
)
1740 for (i
=0; i
< rqstp
->rq_enc_pages_num
; i
++) {
1741 rqstp
->rq_enc_pages
[i
] = alloc_page(GFP_NOFS
);
1742 if (rqstp
->rq_enc_pages
[i
] == NULL
)
1745 rqstp
->rq_release_snd_buf
= priv_release_snd_buf
;
1748 rqstp
->rq_enc_pages_num
= i
;
1749 priv_release_snd_buf(rqstp
);
1755 gss_wrap_req_priv(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1756 kxdreproc_t encode
, struct rpc_rqst
*rqstp
,
1757 __be32
*p
, void *obj
)
1759 struct xdr_buf
*snd_buf
= &rqstp
->rq_snd_buf
;
1764 struct page
**inpages
;
1771 offset
= (u8
*)p
- (u8
*)snd_buf
->head
[0].iov_base
;
1772 *p
++ = htonl(rqstp
->rq_seqno
);
1774 gss_wrap_req_encode(encode
, rqstp
, p
, obj
);
1776 status
= alloc_enc_pages(rqstp
);
1779 first
= snd_buf
->page_base
>> PAGE_CACHE_SHIFT
;
1780 inpages
= snd_buf
->pages
+ first
;
1781 snd_buf
->pages
= rqstp
->rq_enc_pages
;
1782 snd_buf
->page_base
-= first
<< PAGE_CACHE_SHIFT
;
1784 * Give the tail its own page, in case we need extra space in the
1785 * head when wrapping:
1787 * call_allocate() allocates twice the slack space required
1788 * by the authentication flavor to rq_callsize.
1789 * For GSS, slack is GSS_CRED_SLACK.
1791 if (snd_buf
->page_len
|| snd_buf
->tail
[0].iov_len
) {
1792 tmp
= page_address(rqstp
->rq_enc_pages
[rqstp
->rq_enc_pages_num
- 1]);
1793 memcpy(tmp
, snd_buf
->tail
[0].iov_base
, snd_buf
->tail
[0].iov_len
);
1794 snd_buf
->tail
[0].iov_base
= tmp
;
1796 maj_stat
= gss_wrap(ctx
->gc_gss_ctx
, offset
, snd_buf
, inpages
);
1797 /* slack space should prevent this ever happening: */
1798 BUG_ON(snd_buf
->len
> snd_buf
->buflen
);
1800 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1801 * done anyway, so it's safe to put the request on the wire: */
1802 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1803 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1807 *opaque_len
= htonl(snd_buf
->len
- offset
);
1808 /* guess whether we're in the head or the tail: */
1809 if (snd_buf
->page_len
|| snd_buf
->tail
[0].iov_len
)
1810 iov
= snd_buf
->tail
;
1812 iov
= snd_buf
->head
;
1813 p
= iov
->iov_base
+ iov
->iov_len
;
1814 pad
= 3 - ((snd_buf
->len
- offset
- 1) & 3);
1816 iov
->iov_len
+= pad
;
1817 snd_buf
->len
+= pad
;
1823 gss_wrap_req(struct rpc_task
*task
,
1824 kxdreproc_t encode
, void *rqstp
, __be32
*p
, void *obj
)
1826 struct rpc_cred
*cred
= task
->tk_rqstp
->rq_cred
;
1827 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
,
1829 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1832 dprintk("RPC: %5u %s\n", task
->tk_pid
, __func__
);
1833 if (ctx
->gc_proc
!= RPC_GSS_PROC_DATA
) {
1834 /* The spec seems a little ambiguous here, but I think that not
1835 * wrapping context destruction requests makes the most sense.
1837 gss_wrap_req_encode(encode
, rqstp
, p
, obj
);
1841 switch (gss_cred
->gc_service
) {
1842 case RPC_GSS_SVC_NONE
:
1843 gss_wrap_req_encode(encode
, rqstp
, p
, obj
);
1846 case RPC_GSS_SVC_INTEGRITY
:
1847 status
= gss_wrap_req_integ(cred
, ctx
, encode
, rqstp
, p
, obj
);
1849 case RPC_GSS_SVC_PRIVACY
:
1850 status
= gss_wrap_req_priv(cred
, ctx
, encode
, rqstp
, p
, obj
);
1855 dprintk("RPC: %5u %s returning %d\n", task
->tk_pid
, __func__
, status
);
1860 gss_unwrap_resp_integ(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1861 struct rpc_rqst
*rqstp
, __be32
**p
)
1863 struct xdr_buf
*rcv_buf
= &rqstp
->rq_rcv_buf
;
1864 struct xdr_buf integ_buf
;
1865 struct xdr_netobj mic
;
1866 u32 data_offset
, mic_offset
;
1871 integ_len
= ntohl(*(*p
)++);
1874 data_offset
= (u8
*)(*p
) - (u8
*)rcv_buf
->head
[0].iov_base
;
1875 mic_offset
= integ_len
+ data_offset
;
1876 if (mic_offset
> rcv_buf
->len
)
1878 if (ntohl(*(*p
)++) != rqstp
->rq_seqno
)
1881 if (xdr_buf_subsegment(rcv_buf
, &integ_buf
, data_offset
,
1882 mic_offset
- data_offset
))
1885 if (xdr_buf_read_netobj(rcv_buf
, &mic
, mic_offset
))
1888 maj_stat
= gss_verify_mic(ctx
->gc_gss_ctx
, &integ_buf
, &mic
);
1889 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1890 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1891 if (maj_stat
!= GSS_S_COMPLETE
)
1897 gss_unwrap_resp_priv(struct rpc_cred
*cred
, struct gss_cl_ctx
*ctx
,
1898 struct rpc_rqst
*rqstp
, __be32
**p
)
1900 struct xdr_buf
*rcv_buf
= &rqstp
->rq_rcv_buf
;
1906 opaque_len
= ntohl(*(*p
)++);
1907 offset
= (u8
*)(*p
) - (u8
*)rcv_buf
->head
[0].iov_base
;
1908 if (offset
+ opaque_len
> rcv_buf
->len
)
1910 /* remove padding: */
1911 rcv_buf
->len
= offset
+ opaque_len
;
1913 maj_stat
= gss_unwrap(ctx
->gc_gss_ctx
, offset
, rcv_buf
);
1914 if (maj_stat
== GSS_S_CONTEXT_EXPIRED
)
1915 clear_bit(RPCAUTH_CRED_UPTODATE
, &cred
->cr_flags
);
1916 if (maj_stat
!= GSS_S_COMPLETE
)
1918 if (ntohl(*(*p
)++) != rqstp
->rq_seqno
)
1925 gss_unwrap_req_decode(kxdrdproc_t decode
, struct rpc_rqst
*rqstp
,
1926 __be32
*p
, void *obj
)
1928 struct xdr_stream xdr
;
1930 xdr_init_decode(&xdr
, &rqstp
->rq_rcv_buf
, p
);
1931 return decode(rqstp
, &xdr
, obj
);
1935 gss_unwrap_resp(struct rpc_task
*task
,
1936 kxdrdproc_t decode
, void *rqstp
, __be32
*p
, void *obj
)
1938 struct rpc_cred
*cred
= task
->tk_rqstp
->rq_cred
;
1939 struct gss_cred
*gss_cred
= container_of(cred
, struct gss_cred
,
1941 struct gss_cl_ctx
*ctx
= gss_cred_get_ctx(cred
);
1943 struct kvec
*head
= ((struct rpc_rqst
*)rqstp
)->rq_rcv_buf
.head
;
1944 int savedlen
= head
->iov_len
;
1947 if (ctx
->gc_proc
!= RPC_GSS_PROC_DATA
)
1949 switch (gss_cred
->gc_service
) {
1950 case RPC_GSS_SVC_NONE
:
1952 case RPC_GSS_SVC_INTEGRITY
:
1953 status
= gss_unwrap_resp_integ(cred
, ctx
, rqstp
, &p
);
1957 case RPC_GSS_SVC_PRIVACY
:
1958 status
= gss_unwrap_resp_priv(cred
, ctx
, rqstp
, &p
);
1963 /* take into account extra slack for integrity and privacy cases: */
1964 cred
->cr_auth
->au_rslack
= cred
->cr_auth
->au_verfsize
+ (p
- savedp
)
1965 + (savedlen
- head
->iov_len
);
1967 status
= gss_unwrap_req_decode(decode
, rqstp
, p
, obj
);
1970 dprintk("RPC: %5u %s returning %d\n",
1971 task
->tk_pid
, __func__
, status
);
1975 static const struct rpc_authops authgss_ops
= {
1976 .owner
= THIS_MODULE
,
1977 .au_flavor
= RPC_AUTH_GSS
,
1978 .au_name
= "RPCSEC_GSS",
1979 .create
= gss_create
,
1980 .destroy
= gss_destroy
,
1981 .lookup_cred
= gss_lookup_cred
,
1982 .crcreate
= gss_create_cred
,
1983 .list_pseudoflavors
= gss_mech_list_pseudoflavors
,
1984 .info2flavor
= gss_mech_info2flavor
,
1985 .flavor2info
= gss_mech_flavor2info
,
1988 static const struct rpc_credops gss_credops
= {
1989 .cr_name
= "AUTH_GSS",
1990 .crdestroy
= gss_destroy_cred
,
1991 .cr_init
= gss_cred_init
,
1992 .crbind
= rpcauth_generic_bind_cred
,
1993 .crmatch
= gss_match
,
1994 .crmarshal
= gss_marshal
,
1995 .crrefresh
= gss_refresh
,
1996 .crvalidate
= gss_validate
,
1997 .crwrap_req
= gss_wrap_req
,
1998 .crunwrap_resp
= gss_unwrap_resp
,
1999 .crkey_timeout
= gss_key_timeout
,
2000 .crstringify_acceptor
= gss_stringify_acceptor
,
2003 static const struct rpc_credops gss_nullops
= {
2004 .cr_name
= "AUTH_GSS",
2005 .crdestroy
= gss_destroy_nullcred
,
2006 .crbind
= rpcauth_generic_bind_cred
,
2007 .crmatch
= gss_match
,
2008 .crmarshal
= gss_marshal
,
2009 .crrefresh
= gss_refresh_null
,
2010 .crvalidate
= gss_validate
,
2011 .crwrap_req
= gss_wrap_req
,
2012 .crunwrap_resp
= gss_unwrap_resp
,
2013 .crstringify_acceptor
= gss_stringify_acceptor
,
2016 static const struct rpc_pipe_ops gss_upcall_ops_v0
= {
2017 .upcall
= rpc_pipe_generic_upcall
,
2018 .downcall
= gss_pipe_downcall
,
2019 .destroy_msg
= gss_pipe_destroy_msg
,
2020 .open_pipe
= gss_pipe_open_v0
,
2021 .release_pipe
= gss_pipe_release
,
2024 static const struct rpc_pipe_ops gss_upcall_ops_v1
= {
2025 .upcall
= rpc_pipe_generic_upcall
,
2026 .downcall
= gss_pipe_downcall
,
2027 .destroy_msg
= gss_pipe_destroy_msg
,
2028 .open_pipe
= gss_pipe_open_v1
,
2029 .release_pipe
= gss_pipe_release
,
2032 static __net_init
int rpcsec_gss_init_net(struct net
*net
)
2034 return gss_svc_init_net(net
);
2037 static __net_exit
void rpcsec_gss_exit_net(struct net
*net
)
2039 gss_svc_shutdown_net(net
);
2042 static struct pernet_operations rpcsec_gss_net_ops
= {
2043 .init
= rpcsec_gss_init_net
,
2044 .exit
= rpcsec_gss_exit_net
,
2048 * Initialize RPCSEC_GSS module
2050 static int __init
init_rpcsec_gss(void)
2054 err
= rpcauth_register(&authgss_ops
);
2057 err
= gss_svc_init();
2059 goto out_unregister
;
2060 err
= register_pernet_subsys(&rpcsec_gss_net_ops
);
2063 rpc_init_wait_queue(&pipe_version_rpc_waitqueue
, "gss pipe version");
2068 rpcauth_unregister(&authgss_ops
);
2073 static void __exit
exit_rpcsec_gss(void)
2075 unregister_pernet_subsys(&rpcsec_gss_net_ops
);
2077 rpcauth_unregister(&authgss_ops
);
2078 rcu_barrier(); /* Wait for completion of call_rcu()'s */
2081 MODULE_ALIAS("rpc-auth-6");
2082 MODULE_LICENSE("GPL");
2083 module_param_named(expired_cred_retry_delay
,
2084 gss_expired_cred_retry_delay
,
2086 MODULE_PARM_DESC(expired_cred_retry_delay
, "Timeout (in seconds) until "
2087 "the RPC engine retries an expired credential");
2089 module_param_named(key_expire_timeo
,
2090 gss_key_expire_timeo
,
2092 MODULE_PARM_DESC(key_expire_timeo
, "Time (in seconds) at the end of a "
2093 "credential keys lifetime where the NFS layer cleans up "
2094 "prior to key expiration");
2096 module_init(init_rpcsec_gss
)
2097 module_exit(exit_rpcsec_gss
)