1 // SPDX-License-Identifier: GPL-2.0
3 * Neil Brown <neilb@cse.unsw.edu.au>
4 * J. Bruce Fields <bfields@umich.edu>
5 * Andy Adamson <andros@umich.edu>
6 * Dug Song <dugsong@monkey.org>
8 * RPCSEC_GSS server authentication.
9 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
12 * The RPCSEC_GSS involves three stages:
15 * 3/ context destruction
17 * Context creation is handled largely by upcalls to user-space.
18 * In particular, GSS_Accept_sec_context is handled by an upcall
19 * Data exchange is handled entirely within the kernel
20 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
21 * Context destruction is handled in-kernel
22 * GSS_Delete_sec_context is in-kernel
24 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
25 * The context handle and gss_token are used as a key into the rpcsec_init cache.
26 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
27 * being major_status, minor_status, context_handle, reply_token.
28 * These are sent back to the client.
29 * Sequence window management is handled by the kernel. The window size if currently
30 * a compile time constant.
32 * When user-space is happy that a context is established, it places an entry
33 * in the rpcsec_context cache. The key for this cache is the context_handle.
34 * The content includes:
35 * uid/gidlist - for determining access rights
37 * mechanism specific information, such as a key
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <linux/module.h>
44 #include <linux/pagemap.h>
45 #include <linux/user_namespace.h>
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/gss_err.h>
49 #include <linux/sunrpc/svcauth.h>
50 #include <linux/sunrpc/svcauth_gss.h>
51 #include <linux/sunrpc/cache.h>
52 #include <linux/sunrpc/gss_krb5.h>
54 #include <trace/events/rpcgss.h>
56 #include "gss_rpc_upcall.h"
59 * Unfortunately there isn't a maximum checksum size exported via the
60 * GSS API. Manufacture one based on GSS mechanisms supported by this
63 #define GSS_MAX_CKSUMSIZE (GSS_KRB5_TOK_HDR_LEN + GSS_KRB5_MAX_CKSUM_LEN)
66 * This value may be increased in the future to accommodate other
67 * usage of the scratch buffer.
69 #define GSS_SCRATCH_SIZE GSS_MAX_CKSUMSIZE
72 /* decoded gss client cred: */
73 struct rpc_gss_wire_cred clcred
;
74 u32 gsd_databody_offset
;
77 /* for temporary results */
79 u8 gsd_scratch
[GSS_SCRATCH_SIZE
];
82 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
85 * Key is context handle (\x if empty) and gss_token.
86 * Content is major_status minor_status (integers) context_handle, reply_token.
90 static int netobj_equal(struct xdr_netobj
*a
, struct xdr_netobj
*b
)
92 return a
->len
== b
->len
&& 0 == memcmp(a
->data
, b
->data
, a
->len
);
95 #define RSI_HASHBITS 6
96 #define RSI_HASHMAX (1<<RSI_HASHBITS)
100 struct xdr_netobj in_handle
, in_token
;
101 struct xdr_netobj out_handle
, out_token
;
102 int major_status
, minor_status
;
103 struct rcu_head rcu_head
;
106 static struct rsi
*rsi_update(struct cache_detail
*cd
, struct rsi
*new, struct rsi
*old
);
107 static struct rsi
*rsi_lookup(struct cache_detail
*cd
, struct rsi
*item
);
109 static void rsi_free(struct rsi
*rsii
)
111 kfree(rsii
->in_handle
.data
);
112 kfree(rsii
->in_token
.data
);
113 kfree(rsii
->out_handle
.data
);
114 kfree(rsii
->out_token
.data
);
117 static void rsi_free_rcu(struct rcu_head
*head
)
119 struct rsi
*rsii
= container_of(head
, struct rsi
, rcu_head
);
125 static void rsi_put(struct kref
*ref
)
127 struct rsi
*rsii
= container_of(ref
, struct rsi
, h
.ref
);
129 call_rcu(&rsii
->rcu_head
, rsi_free_rcu
);
132 static inline int rsi_hash(struct rsi
*item
)
134 return hash_mem(item
->in_handle
.data
, item
->in_handle
.len
, RSI_HASHBITS
)
135 ^ hash_mem(item
->in_token
.data
, item
->in_token
.len
, RSI_HASHBITS
);
138 static int rsi_match(struct cache_head
*a
, struct cache_head
*b
)
140 struct rsi
*item
= container_of(a
, struct rsi
, h
);
141 struct rsi
*tmp
= container_of(b
, struct rsi
, h
);
142 return netobj_equal(&item
->in_handle
, &tmp
->in_handle
) &&
143 netobj_equal(&item
->in_token
, &tmp
->in_token
);
146 static int dup_to_netobj(struct xdr_netobj
*dst
, char *src
, int len
)
149 dst
->data
= (len
? kmemdup(src
, len
, GFP_KERNEL
) : NULL
);
150 if (len
&& !dst
->data
)
155 static inline int dup_netobj(struct xdr_netobj
*dst
, struct xdr_netobj
*src
)
157 return dup_to_netobj(dst
, src
->data
, src
->len
);
160 static void rsi_init(struct cache_head
*cnew
, struct cache_head
*citem
)
162 struct rsi
*new = container_of(cnew
, struct rsi
, h
);
163 struct rsi
*item
= container_of(citem
, struct rsi
, h
);
165 new->out_handle
.data
= NULL
;
166 new->out_handle
.len
= 0;
167 new->out_token
.data
= NULL
;
168 new->out_token
.len
= 0;
169 new->in_handle
.len
= item
->in_handle
.len
;
170 item
->in_handle
.len
= 0;
171 new->in_token
.len
= item
->in_token
.len
;
172 item
->in_token
.len
= 0;
173 new->in_handle
.data
= item
->in_handle
.data
;
174 item
->in_handle
.data
= NULL
;
175 new->in_token
.data
= item
->in_token
.data
;
176 item
->in_token
.data
= NULL
;
179 static void update_rsi(struct cache_head
*cnew
, struct cache_head
*citem
)
181 struct rsi
*new = container_of(cnew
, struct rsi
, h
);
182 struct rsi
*item
= container_of(citem
, struct rsi
, h
);
184 BUG_ON(new->out_handle
.data
|| new->out_token
.data
);
185 new->out_handle
.len
= item
->out_handle
.len
;
186 item
->out_handle
.len
= 0;
187 new->out_token
.len
= item
->out_token
.len
;
188 item
->out_token
.len
= 0;
189 new->out_handle
.data
= item
->out_handle
.data
;
190 item
->out_handle
.data
= NULL
;
191 new->out_token
.data
= item
->out_token
.data
;
192 item
->out_token
.data
= NULL
;
194 new->major_status
= item
->major_status
;
195 new->minor_status
= item
->minor_status
;
198 static struct cache_head
*rsi_alloc(void)
200 struct rsi
*rsii
= kmalloc(sizeof(*rsii
), GFP_KERNEL
);
207 static int rsi_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
209 return sunrpc_cache_pipe_upcall_timeout(cd
, h
);
212 static void rsi_request(struct cache_detail
*cd
,
213 struct cache_head
*h
,
214 char **bpp
, int *blen
)
216 struct rsi
*rsii
= container_of(h
, struct rsi
, h
);
218 qword_addhex(bpp
, blen
, rsii
->in_handle
.data
, rsii
->in_handle
.len
);
219 qword_addhex(bpp
, blen
, rsii
->in_token
.data
, rsii
->in_token
.len
);
222 "RPCSEC/GSS credential too large - please use gssproxy\n");
225 static int rsi_parse(struct cache_detail
*cd
,
226 char *mesg
, int mlen
)
228 /* context token expiry major minor context token */
232 struct rsi rsii
, *rsip
= NULL
;
234 int status
= -EINVAL
;
236 memset(&rsii
, 0, sizeof(rsii
));
238 len
= qword_get(&mesg
, buf
, mlen
);
242 if (dup_to_netobj(&rsii
.in_handle
, buf
, len
))
246 len
= qword_get(&mesg
, buf
, mlen
);
251 if (dup_to_netobj(&rsii
.in_token
, buf
, len
))
254 rsip
= rsi_lookup(cd
, &rsii
);
260 status
= get_expiry(&mesg
, &expiry
);
266 len
= qword_get(&mesg
, buf
, mlen
);
269 rsii
.major_status
= simple_strtoul(buf
, &ep
, 10);
272 len
= qword_get(&mesg
, buf
, mlen
);
275 rsii
.minor_status
= simple_strtoul(buf
, &ep
, 10);
280 len
= qword_get(&mesg
, buf
, mlen
);
284 if (dup_to_netobj(&rsii
.out_handle
, buf
, len
))
288 len
= qword_get(&mesg
, buf
, mlen
);
293 if (dup_to_netobj(&rsii
.out_token
, buf
, len
))
295 rsii
.h
.expiry_time
= expiry
;
296 rsip
= rsi_update(cd
, &rsii
, rsip
);
301 cache_put(&rsip
->h
, cd
);
307 static const struct cache_detail rsi_cache_template
= {
308 .owner
= THIS_MODULE
,
309 .hash_size
= RSI_HASHMAX
,
310 .name
= "auth.rpcsec.init",
311 .cache_put
= rsi_put
,
312 .cache_upcall
= rsi_upcall
,
313 .cache_request
= rsi_request
,
314 .cache_parse
= rsi_parse
,
317 .update
= update_rsi
,
321 static struct rsi
*rsi_lookup(struct cache_detail
*cd
, struct rsi
*item
)
323 struct cache_head
*ch
;
324 int hash
= rsi_hash(item
);
326 ch
= sunrpc_cache_lookup_rcu(cd
, &item
->h
, hash
);
328 return container_of(ch
, struct rsi
, h
);
333 static struct rsi
*rsi_update(struct cache_detail
*cd
, struct rsi
*new, struct rsi
*old
)
335 struct cache_head
*ch
;
336 int hash
= rsi_hash(new);
338 ch
= sunrpc_cache_update(cd
, &new->h
,
341 return container_of(ch
, struct rsi
, h
);
348 * The rpcsec_context cache is used to store a context that is
349 * used in data exchange.
350 * The key is a context handle. The content is:
351 * uid, gidlist, mechanism, service-set, mech-specific-data
354 #define RSC_HASHBITS 10
355 #define RSC_HASHMAX (1<<RSC_HASHBITS)
357 #define GSS_SEQ_WIN 128
359 struct gss_svc_seq_data
{
360 /* highest seq number seen so far: */
362 /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
363 * sd_win is nonzero iff sequence number i has been seen already: */
364 unsigned long sd_win
[GSS_SEQ_WIN
/BITS_PER_LONG
];
370 struct xdr_netobj handle
;
371 struct svc_cred cred
;
372 struct gss_svc_seq_data seqdata
;
373 struct gss_ctx
*mechctx
;
374 struct rcu_head rcu_head
;
377 static struct rsc
*rsc_update(struct cache_detail
*cd
, struct rsc
*new, struct rsc
*old
);
378 static struct rsc
*rsc_lookup(struct cache_detail
*cd
, struct rsc
*item
);
380 static void rsc_free(struct rsc
*rsci
)
382 kfree(rsci
->handle
.data
);
384 gss_delete_sec_context(&rsci
->mechctx
);
385 free_svc_cred(&rsci
->cred
);
388 static void rsc_free_rcu(struct rcu_head
*head
)
390 struct rsc
*rsci
= container_of(head
, struct rsc
, rcu_head
);
392 kfree(rsci
->handle
.data
);
396 static void rsc_put(struct kref
*ref
)
398 struct rsc
*rsci
= container_of(ref
, struct rsc
, h
.ref
);
401 gss_delete_sec_context(&rsci
->mechctx
);
402 free_svc_cred(&rsci
->cred
);
403 call_rcu(&rsci
->rcu_head
, rsc_free_rcu
);
407 rsc_hash(struct rsc
*rsci
)
409 return hash_mem(rsci
->handle
.data
, rsci
->handle
.len
, RSC_HASHBITS
);
413 rsc_match(struct cache_head
*a
, struct cache_head
*b
)
415 struct rsc
*new = container_of(a
, struct rsc
, h
);
416 struct rsc
*tmp
= container_of(b
, struct rsc
, h
);
418 return netobj_equal(&new->handle
, &tmp
->handle
);
422 rsc_init(struct cache_head
*cnew
, struct cache_head
*ctmp
)
424 struct rsc
*new = container_of(cnew
, struct rsc
, h
);
425 struct rsc
*tmp
= container_of(ctmp
, struct rsc
, h
);
427 new->handle
.len
= tmp
->handle
.len
;
429 new->handle
.data
= tmp
->handle
.data
;
430 tmp
->handle
.data
= NULL
;
432 init_svc_cred(&new->cred
);
436 update_rsc(struct cache_head
*cnew
, struct cache_head
*ctmp
)
438 struct rsc
*new = container_of(cnew
, struct rsc
, h
);
439 struct rsc
*tmp
= container_of(ctmp
, struct rsc
, h
);
441 new->mechctx
= tmp
->mechctx
;
443 memset(&new->seqdata
, 0, sizeof(new->seqdata
));
444 spin_lock_init(&new->seqdata
.sd_lock
);
445 new->cred
= tmp
->cred
;
446 init_svc_cred(&tmp
->cred
);
449 static struct cache_head
*
452 struct rsc
*rsci
= kmalloc(sizeof(*rsci
), GFP_KERNEL
);
459 static int rsc_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
464 static int rsc_parse(struct cache_detail
*cd
,
465 char *mesg
, int mlen
)
467 /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
471 struct rsc rsci
, *rscp
= NULL
;
473 int status
= -EINVAL
;
474 struct gss_api_mech
*gm
= NULL
;
476 memset(&rsci
, 0, sizeof(rsci
));
478 len
= qword_get(&mesg
, buf
, mlen
);
479 if (len
< 0) goto out
;
481 if (dup_to_netobj(&rsci
.handle
, buf
, len
))
486 status
= get_expiry(&mesg
, &expiry
);
491 rscp
= rsc_lookup(cd
, &rsci
);
495 /* uid, or NEGATIVE */
496 rv
= get_int(&mesg
, &id
);
500 set_bit(CACHE_NEGATIVE
, &rsci
.h
.flags
);
505 * NOTE: we skip uid_valid()/gid_valid() checks here:
506 * instead, * -1 id's are later mapped to the
507 * (export-specific) anonymous id by nfsd_setuser.
509 * (But supplementary gid's get no such special
510 * treatment so are checked for validity here.)
513 rsci
.cred
.cr_uid
= make_kuid(current_user_ns(), id
);
516 if (get_int(&mesg
, &id
))
518 rsci
.cred
.cr_gid
= make_kgid(current_user_ns(), id
);
520 /* number of additional gid's */
521 if (get_int(&mesg
, &N
))
523 if (N
< 0 || N
> NGROUPS_MAX
)
526 rsci
.cred
.cr_group_info
= groups_alloc(N
);
527 if (rsci
.cred
.cr_group_info
== NULL
)
532 for (i
=0; i
<N
; i
++) {
534 if (get_int(&mesg
, &id
))
536 kgid
= make_kgid(current_user_ns(), id
);
537 if (!gid_valid(kgid
))
539 rsci
.cred
.cr_group_info
->gid
[i
] = kgid
;
541 groups_sort(rsci
.cred
.cr_group_info
);
544 len
= qword_get(&mesg
, buf
, mlen
);
547 gm
= rsci
.cred
.cr_gss_mech
= gss_mech_get_by_name(buf
);
548 status
= -EOPNOTSUPP
;
553 /* mech-specific data: */
554 len
= qword_get(&mesg
, buf
, mlen
);
557 status
= gss_import_sec_context(buf
, len
, gm
, &rsci
.mechctx
,
562 /* get client name */
563 len
= qword_get(&mesg
, buf
, mlen
);
565 rsci
.cred
.cr_principal
= kstrdup(buf
, GFP_KERNEL
);
566 if (!rsci
.cred
.cr_principal
) {
573 rsci
.h
.expiry_time
= expiry
;
574 rscp
= rsc_update(cd
, &rsci
, rscp
);
579 cache_put(&rscp
->h
, cd
);
585 static const struct cache_detail rsc_cache_template
= {
586 .owner
= THIS_MODULE
,
587 .hash_size
= RSC_HASHMAX
,
588 .name
= "auth.rpcsec.context",
589 .cache_put
= rsc_put
,
590 .cache_upcall
= rsc_upcall
,
591 .cache_parse
= rsc_parse
,
594 .update
= update_rsc
,
598 static struct rsc
*rsc_lookup(struct cache_detail
*cd
, struct rsc
*item
)
600 struct cache_head
*ch
;
601 int hash
= rsc_hash(item
);
603 ch
= sunrpc_cache_lookup_rcu(cd
, &item
->h
, hash
);
605 return container_of(ch
, struct rsc
, h
);
610 static struct rsc
*rsc_update(struct cache_detail
*cd
, struct rsc
*new, struct rsc
*old
)
612 struct cache_head
*ch
;
613 int hash
= rsc_hash(new);
615 ch
= sunrpc_cache_update(cd
, &new->h
,
618 return container_of(ch
, struct rsc
, h
);
625 gss_svc_searchbyctx(struct cache_detail
*cd
, struct xdr_netobj
*handle
)
630 memset(&rsci
, 0, sizeof(rsci
));
631 if (dup_to_netobj(&rsci
.handle
, handle
->data
, handle
->len
))
633 found
= rsc_lookup(cd
, &rsci
);
637 if (cache_check(cd
, &found
->h
, NULL
))
643 * gss_check_seq_num - GSS sequence number window check
644 * @rqstp: RPC Call to use when reporting errors
645 * @rsci: cached GSS context state (updated on return)
646 * @seq_num: sequence number to check
648 * Implements sequence number algorithm as specified in
649 * RFC 2203, Section 5.3.3.1. "Context Management".
652 * %true: @rqstp's GSS sequence number is inside the window
653 * %false: @rqstp's GSS sequence number is outside the window
655 static bool gss_check_seq_num(const struct svc_rqst
*rqstp
, struct rsc
*rsci
,
658 struct gss_svc_seq_data
*sd
= &rsci
->seqdata
;
661 spin_lock(&sd
->sd_lock
);
662 if (seq_num
> sd
->sd_max
) {
663 if (seq_num
>= sd
->sd_max
+ GSS_SEQ_WIN
) {
664 memset(sd
->sd_win
, 0, sizeof(sd
->sd_win
));
665 sd
->sd_max
= seq_num
;
666 } else while (sd
->sd_max
< seq_num
) {
668 __clear_bit(sd
->sd_max
% GSS_SEQ_WIN
, sd
->sd_win
);
670 __set_bit(seq_num
% GSS_SEQ_WIN
, sd
->sd_win
);
672 } else if (seq_num
+ GSS_SEQ_WIN
<= sd
->sd_max
) {
675 if (__test_and_set_bit(seq_num
% GSS_SEQ_WIN
, sd
->sd_win
))
681 spin_unlock(&sd
->sd_lock
);
685 trace_rpcgss_svc_seqno_low(rqstp
, seq_num
,
686 sd
->sd_max
- GSS_SEQ_WIN
,
690 trace_rpcgss_svc_seqno_seen(rqstp
, seq_num
);
695 * Decode and verify a Call's verifier field. For RPC_AUTH_GSS Calls,
696 * the body of this field contains a variable length checksum.
698 * GSS-specific auth_stat values are mandated by RFC 2203 Section
702 svcauth_gss_verify_header(struct svc_rqst
*rqstp
, struct rsc
*rsci
,
703 __be32
*rpcstart
, struct rpc_gss_wire_cred
*gc
)
705 struct xdr_stream
*xdr
= &rqstp
->rq_arg_stream
;
706 struct gss_ctx
*ctx_id
= rsci
->mechctx
;
707 u32 flavor
, maj_stat
;
708 struct xdr_buf rpchdr
;
709 struct xdr_netobj checksum
;
713 * Compute the checksum of the incoming Call from the
714 * XID field to credential field:
716 iov
.iov_base
= rpcstart
;
717 iov
.iov_len
= (u8
*)xdr
->p
- (u8
*)rpcstart
;
718 xdr_buf_from_iov(&iov
, &rpchdr
);
720 /* Call's verf field: */
721 if (xdr_stream_decode_opaque_auth(xdr
, &flavor
,
722 (void **)&checksum
.data
,
723 &checksum
.len
) < 0) {
724 rqstp
->rq_auth_stat
= rpc_autherr_badverf
;
727 if (flavor
!= RPC_AUTH_GSS
) {
728 rqstp
->rq_auth_stat
= rpc_autherr_badverf
;
732 if (rqstp
->rq_deferred
)
734 maj_stat
= gss_verify_mic(ctx_id
, &rpchdr
, &checksum
);
735 if (maj_stat
!= GSS_S_COMPLETE
) {
736 trace_rpcgss_svc_mic(rqstp
, maj_stat
);
737 rqstp
->rq_auth_stat
= rpcsec_gsserr_credproblem
;
741 if (gc
->gc_seq
> MAXSEQ
) {
742 trace_rpcgss_svc_seqno_large(rqstp
, gc
->gc_seq
);
743 rqstp
->rq_auth_stat
= rpcsec_gsserr_ctxproblem
;
746 if (!gss_check_seq_num(rqstp
, rsci
, gc
->gc_seq
))
752 * Construct and encode a Reply's verifier field. The verifier's body
753 * field contains a variable-length checksum of the GSS sequence
757 svcauth_gss_encode_verf(struct svc_rqst
*rqstp
, struct gss_ctx
*ctx_id
, u32 seq
)
759 struct gss_svc_data
*gsd
= rqstp
->rq_auth_data
;
761 struct xdr_buf verf_data
;
762 struct xdr_netobj checksum
;
765 gsd
->gsd_seq_num
= cpu_to_be32(seq
);
766 iov
.iov_base
= &gsd
->gsd_seq_num
;
767 iov
.iov_len
= XDR_UNIT
;
768 xdr_buf_from_iov(&iov
, &verf_data
);
770 checksum
.data
= gsd
->gsd_scratch
;
771 maj_stat
= gss_get_mic(ctx_id
, &verf_data
, &checksum
);
772 if (maj_stat
!= GSS_S_COMPLETE
)
775 return xdr_stream_encode_opaque_auth(&rqstp
->rq_res_stream
, RPC_AUTH_GSS
,
776 checksum
.data
, checksum
.len
) > 0;
779 trace_rpcgss_svc_get_mic(rqstp
, maj_stat
);
784 struct auth_domain h
;
788 static struct auth_domain
*
789 find_gss_auth_domain(struct gss_ctx
*ctx
, u32 svc
)
793 name
= gss_service_to_auth_domain_name(ctx
->mech_type
, svc
);
796 return auth_domain_find(name
);
799 static struct auth_ops svcauthops_gss
;
801 u32
svcauth_gss_flavor(struct auth_domain
*dom
)
803 struct gss_domain
*gd
= container_of(dom
, struct gss_domain
, h
);
805 return gd
->pseudoflavor
;
808 EXPORT_SYMBOL_GPL(svcauth_gss_flavor
);
811 svcauth_gss_register_pseudoflavor(u32 pseudoflavor
, char * name
)
813 struct gss_domain
*new;
814 struct auth_domain
*test
;
817 new = kmalloc(sizeof(*new), GFP_KERNEL
);
820 kref_init(&new->h
.ref
);
821 new->h
.name
= kstrdup(name
, GFP_KERNEL
);
824 new->h
.flavour
= &svcauthops_gss
;
825 new->pseudoflavor
= pseudoflavor
;
827 test
= auth_domain_lookup(name
, &new->h
);
828 if (test
!= &new->h
) {
829 pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
832 auth_domain_put(test
);
842 return ERR_PTR(stat
);
844 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor
);
847 * RFC 2203, Section 5.3.2.2
849 * struct rpc_gss_integ_data {
850 * opaque databody_integ<>;
854 * struct rpc_gss_data_t {
855 * unsigned int seq_num;
856 * proc_req_arg_t arg;
859 static noinline_for_stack
int
860 svcauth_gss_unwrap_integ(struct svc_rqst
*rqstp
, u32 seq
, struct gss_ctx
*ctx
)
862 struct gss_svc_data
*gsd
= rqstp
->rq_auth_data
;
863 struct xdr_stream
*xdr
= &rqstp
->rq_arg_stream
;
864 u32 len
, offset
, seq_num
, maj_stat
;
865 struct xdr_buf
*buf
= xdr
->buf
;
866 struct xdr_buf databody_integ
;
867 struct xdr_netobj checksum
;
869 /* Did we already verify the signature on the original pass through? */
870 if (rqstp
->rq_deferred
)
873 if (xdr_stream_decode_u32(xdr
, &len
) < 0)
877 offset
= xdr_stream_pos(xdr
);
878 if (xdr_buf_subsegment(buf
, &databody_integ
, offset
, len
))
882 * The xdr_stream now points to the @seq_num field. The next
883 * XDR data item is the @arg field, which contains the clear
884 * text RPC program payload. The checksum, which follows the
885 * @arg field, is located and decoded without updating the
890 if (xdr_decode_word(buf
, offset
, &checksum
.len
))
892 if (checksum
.len
> sizeof(gsd
->gsd_scratch
))
894 checksum
.data
= gsd
->gsd_scratch
;
895 if (read_bytes_from_xdr_buf(buf
, offset
+ XDR_UNIT
, checksum
.data
,
899 maj_stat
= gss_verify_mic(ctx
, &databody_integ
, &checksum
);
900 if (maj_stat
!= GSS_S_COMPLETE
)
903 /* The received seqno is protected by the checksum. */
904 if (xdr_stream_decode_u32(xdr
, &seq_num
) < 0)
909 xdr_truncate_decode(xdr
, XDR_UNIT
+ checksum
.len
);
913 trace_rpcgss_svc_unwrap_failed(rqstp
);
916 trace_rpcgss_svc_seqno_bad(rqstp
, seq
, seq_num
);
919 trace_rpcgss_svc_mic(rqstp
, maj_stat
);
924 * RFC 2203, Section 5.3.2.3
926 * struct rpc_gss_priv_data {
927 * opaque databody_priv<>
930 * struct rpc_gss_data_t {
931 * unsigned int seq_num;
932 * proc_req_arg_t arg;
935 static noinline_for_stack
int
936 svcauth_gss_unwrap_priv(struct svc_rqst
*rqstp
, u32 seq
, struct gss_ctx
*ctx
)
938 struct xdr_stream
*xdr
= &rqstp
->rq_arg_stream
;
939 u32 len
, maj_stat
, seq_num
, offset
;
940 struct xdr_buf
*buf
= xdr
->buf
;
941 unsigned int saved_len
;
943 if (xdr_stream_decode_u32(xdr
, &len
) < 0)
945 if (rqstp
->rq_deferred
) {
946 /* Already decrypted last time through! The sequence number
947 * check at out_seq is unnecessary but harmless: */
950 if (len
> xdr_stream_remaining(xdr
))
952 offset
= xdr_stream_pos(xdr
);
954 saved_len
= buf
->len
;
955 maj_stat
= gss_unwrap(ctx
, offset
, offset
+ len
, buf
);
956 if (maj_stat
!= GSS_S_COMPLETE
)
958 xdr
->nwords
-= XDR_QUADLEN(saved_len
- buf
->len
);
961 /* gss_unwrap() decrypted the sequence number. */
962 if (xdr_stream_decode_u32(xdr
, &seq_num
) < 0)
969 trace_rpcgss_svc_unwrap_failed(rqstp
);
972 trace_rpcgss_svc_seqno_bad(rqstp
, seq
, seq_num
);
975 trace_rpcgss_svc_unwrap(rqstp
, maj_stat
);
979 static enum svc_auth_status
980 svcauth_gss_set_client(struct svc_rqst
*rqstp
)
982 struct gss_svc_data
*svcdata
= rqstp
->rq_auth_data
;
983 struct rsc
*rsci
= svcdata
->rsci
;
984 struct rpc_gss_wire_cred
*gc
= &svcdata
->clcred
;
987 rqstp
->rq_auth_stat
= rpc_autherr_badcred
;
990 * A gss export can be specified either by:
991 * export *(sec=krb5,rw)
993 * export gss/krb5(rw)
994 * The latter is deprecated; but for backwards compatibility reasons
995 * the nfsd code will still fall back on trying it if the former
996 * doesn't work; so we try to make both available to nfsd, below.
998 rqstp
->rq_gssclient
= find_gss_auth_domain(rsci
->mechctx
, gc
->gc_svc
);
999 if (rqstp
->rq_gssclient
== NULL
)
1001 stat
= svcauth_unix_set_client(rqstp
);
1002 if (stat
== SVC_DROP
|| stat
== SVC_CLOSE
)
1005 rqstp
->rq_auth_stat
= rpc_auth_ok
;
1010 svcauth_gss_proc_init_verf(struct cache_detail
*cd
, struct svc_rqst
*rqstp
,
1011 struct xdr_netobj
*out_handle
, int *major_status
,
1014 struct xdr_stream
*xdr
= &rqstp
->rq_res_stream
;
1018 if (*major_status
!= GSS_S_COMPLETE
)
1020 rsci
= gss_svc_searchbyctx(cd
, out_handle
);
1022 *major_status
= GSS_S_NO_CONTEXT
;
1026 rc
= svcauth_gss_encode_verf(rqstp
, rsci
->mechctx
, seq_num
);
1027 cache_put(&rsci
->h
, cd
);
1031 return xdr_stream_encode_opaque_auth(xdr
, RPC_AUTH_NULL
, NULL
, 0) > 0;
1034 static void gss_free_in_token_pages(struct gssp_in_token
*in_token
)
1039 while (in_token
->pages
[i
])
1040 put_page(in_token
->pages
[i
++]);
1041 kfree(in_token
->pages
);
1042 in_token
->pages
= NULL
;
1045 static int gss_read_proxy_verf(struct svc_rqst
*rqstp
,
1046 struct rpc_gss_wire_cred
*gc
,
1047 struct xdr_netobj
*in_handle
,
1048 struct gssp_in_token
*in_token
)
1050 struct xdr_stream
*xdr
= &rqstp
->rq_arg_stream
;
1051 unsigned int length
, pgto_offs
, pgfrom_offs
;
1052 int pages
, i
, pgto
, pgfrom
;
1053 size_t to_offs
, from_offs
;
1056 if (dup_netobj(in_handle
, &gc
->gc_ctx
))
1060 * RFC 2203 Section 5.2.2
1062 * struct rpc_gss_init_arg {
1063 * opaque gss_token<>;
1066 if (xdr_stream_decode_u32(xdr
, &inlen
) < 0)
1067 goto out_denied_free
;
1068 if (inlen
> xdr_stream_remaining(xdr
))
1069 goto out_denied_free
;
1071 pages
= DIV_ROUND_UP(inlen
, PAGE_SIZE
);
1072 in_token
->pages
= kcalloc(pages
+ 1, sizeof(struct page
*), GFP_KERNEL
);
1073 if (!in_token
->pages
)
1074 goto out_denied_free
;
1075 in_token
->page_base
= 0;
1076 in_token
->page_len
= inlen
;
1077 for (i
= 0; i
< pages
; i
++) {
1078 in_token
->pages
[i
] = alloc_page(GFP_KERNEL
);
1079 if (!in_token
->pages
[i
]) {
1080 gss_free_in_token_pages(in_token
);
1081 goto out_denied_free
;
1085 length
= min_t(unsigned int, inlen
, (char *)xdr
->end
- (char *)xdr
->p
);
1086 memcpy(page_address(in_token
->pages
[0]), xdr
->p
, length
);
1090 from_offs
= rqstp
->rq_arg
.page_base
;
1092 pgto
= to_offs
>> PAGE_SHIFT
;
1093 pgfrom
= from_offs
>> PAGE_SHIFT
;
1094 pgto_offs
= to_offs
& ~PAGE_MASK
;
1095 pgfrom_offs
= from_offs
& ~PAGE_MASK
;
1097 length
= min_t(unsigned int, inlen
,
1098 min_t(unsigned int, PAGE_SIZE
- pgto_offs
,
1099 PAGE_SIZE
- pgfrom_offs
));
1100 memcpy(page_address(in_token
->pages
[pgto
]) + pgto_offs
,
1101 page_address(rqstp
->rq_arg
.pages
[pgfrom
]) + pgfrom_offs
,
1105 from_offs
+= length
;
1111 kfree(in_handle
->data
);
1116 * RFC 2203, Section 5.2.3.1.
1118 * struct rpc_gss_init_res {
1120 * unsigned int gss_major;
1121 * unsigned int gss_minor;
1122 * unsigned int seq_window;
1123 * opaque gss_token<>;
1127 svcxdr_encode_gss_init_res(struct xdr_stream
*xdr
,
1128 struct xdr_netobj
*handle
,
1129 struct xdr_netobj
*gss_token
,
1130 unsigned int major_status
,
1131 unsigned int minor_status
, u32 seq_num
)
1133 if (xdr_stream_encode_opaque(xdr
, handle
->data
, handle
->len
) < 0)
1135 if (xdr_stream_encode_u32(xdr
, major_status
) < 0)
1137 if (xdr_stream_encode_u32(xdr
, minor_status
) < 0)
1139 if (xdr_stream_encode_u32(xdr
, seq_num
) < 0)
1141 if (xdr_stream_encode_opaque(xdr
, gss_token
->data
, gss_token
->len
) < 0)
1147 * Having read the cred already and found we're in the context
1148 * initiation case, read the verifier and initiate (or check the results
1149 * of) upcalls to userspace for help with context initiation. If
1150 * the upcall results are available, write the verifier and result.
1151 * Otherwise, drop the request pending an answer to the upcall.
1154 svcauth_gss_legacy_init(struct svc_rqst
*rqstp
,
1155 struct rpc_gss_wire_cred
*gc
)
1157 struct xdr_stream
*xdr
= &rqstp
->rq_arg_stream
;
1158 struct rsi
*rsip
, rsikey
;
1162 struct sunrpc_net
*sn
= net_generic(SVC_NET(rqstp
), sunrpc_net_id
);
1164 memset(&rsikey
, 0, sizeof(rsikey
));
1165 if (dup_netobj(&rsikey
.in_handle
, &gc
->gc_ctx
))
1169 * RFC 2203 Section 5.2.2
1171 * struct rpc_gss_init_arg {
1172 * opaque gss_token<>;
1175 if (xdr_stream_decode_u32(xdr
, &len
) < 0) {
1176 kfree(rsikey
.in_handle
.data
);
1179 p
= xdr_inline_decode(xdr
, len
);
1181 kfree(rsikey
.in_handle
.data
);
1184 rsikey
.in_token
.data
= kmalloc(len
, GFP_KERNEL
);
1185 if (ZERO_OR_NULL_PTR(rsikey
.in_token
.data
)) {
1186 kfree(rsikey
.in_handle
.data
);
1189 memcpy(rsikey
.in_token
.data
, p
, len
);
1190 rsikey
.in_token
.len
= len
;
1192 /* Perform upcall, or find upcall result: */
1193 rsip
= rsi_lookup(sn
->rsi_cache
, &rsikey
);
1197 if (cache_check(sn
->rsi_cache
, &rsip
->h
, &rqstp
->rq_chandle
) < 0)
1198 /* No upcall result: */
1202 if (!svcauth_gss_proc_init_verf(sn
->rsc_cache
, rqstp
, &rsip
->out_handle
,
1203 &rsip
->major_status
, GSS_SEQ_WIN
))
1205 if (!svcxdr_set_accept_stat(rqstp
))
1207 if (!svcxdr_encode_gss_init_res(&rqstp
->rq_res_stream
, &rsip
->out_handle
,
1208 &rsip
->out_token
, rsip
->major_status
,
1209 rsip
->minor_status
, GSS_SEQ_WIN
))
1214 cache_put(&rsip
->h
, sn
->rsi_cache
);
1218 static int gss_proxy_save_rsc(struct cache_detail
*cd
,
1219 struct gssp_upcall_data
*ud
,
1222 struct rsc rsci
, *rscp
= NULL
;
1223 static atomic64_t ctxhctr
;
1225 struct gss_api_mech
*gm
= NULL
;
1229 memset(&rsci
, 0, sizeof(rsci
));
1230 /* context handle */
1232 /* the handle needs to be just a unique id,
1233 * use a static counter */
1234 ctxh
= atomic64_inc_return(&ctxhctr
);
1236 /* make a copy for the caller */
1239 /* make a copy for the rsc cache */
1240 if (dup_to_netobj(&rsci
.handle
, (char *)handle
, sizeof(uint64_t)))
1242 rscp
= rsc_lookup(cd
, &rsci
);
1247 if (!ud
->found_creds
) {
1248 /* userspace seem buggy, we should always get at least a
1249 * mapping to nobody */
1252 struct timespec64 boot
;
1255 rsci
.cred
= ud
->creds
;
1256 memset(&ud
->creds
, 0, sizeof(struct svc_cred
));
1258 status
= -EOPNOTSUPP
;
1259 /* get mech handle from OID */
1260 gm
= gss_mech_get_by_OID(&ud
->mech_oid
);
1263 rsci
.cred
.cr_gss_mech
= gm
;
1266 /* mech-specific data: */
1267 status
= gss_import_sec_context(ud
->out_handle
.data
,
1270 &expiry
, GFP_KERNEL
);
1274 getboottime64(&boot
);
1275 expiry
-= boot
.tv_sec
;
1278 rsci
.h
.expiry_time
= expiry
;
1279 rscp
= rsc_update(cd
, &rsci
, rscp
);
1284 cache_put(&rscp
->h
, cd
);
1290 static int svcauth_gss_proxy_init(struct svc_rqst
*rqstp
,
1291 struct rpc_gss_wire_cred
*gc
)
1293 struct xdr_netobj cli_handle
;
1294 struct gssp_upcall_data ud
;
1298 struct net
*net
= SVC_NET(rqstp
);
1299 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1301 memset(&ud
, 0, sizeof(ud
));
1302 ret
= gss_read_proxy_verf(rqstp
, gc
, &ud
.in_handle
, &ud
.in_token
);
1308 /* Perform synchronous upcall to gss-proxy */
1309 status
= gssp_accept_sec_context_upcall(net
, &ud
);
1313 trace_rpcgss_svc_accept_upcall(rqstp
, ud
.major_status
, ud
.minor_status
);
1315 switch (ud
.major_status
) {
1316 case GSS_S_CONTINUE_NEEDED
:
1317 cli_handle
= ud
.out_handle
;
1319 case GSS_S_COMPLETE
:
1320 status
= gss_proxy_save_rsc(sn
->rsc_cache
, &ud
, &handle
);
1323 cli_handle
.data
= (u8
*)&handle
;
1324 cli_handle
.len
= sizeof(handle
);
1330 if (!svcauth_gss_proc_init_verf(sn
->rsc_cache
, rqstp
, &cli_handle
,
1331 &ud
.major_status
, GSS_SEQ_WIN
))
1333 if (!svcxdr_set_accept_stat(rqstp
))
1335 if (!svcxdr_encode_gss_init_res(&rqstp
->rq_res_stream
, &cli_handle
,
1336 &ud
.out_token
, ud
.major_status
,
1337 ud
.minor_status
, GSS_SEQ_WIN
))
1342 gss_free_in_token_pages(&ud
.in_token
);
1343 gssp_free_upcall_data(&ud
);
1348 * Try to set the sn->use_gss_proxy variable to a new value. We only allow
1349 * it to be changed if it's currently undefined (-1). If it's any other value
1350 * then return -EBUSY unless the type wouldn't have changed anyway.
1352 static int set_gss_proxy(struct net
*net
, int type
)
1354 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1357 WARN_ON_ONCE(type
!= 0 && type
!= 1);
1358 ret
= cmpxchg(&sn
->use_gss_proxy
, -1, type
);
1359 if (ret
!= -1 && ret
!= type
)
1364 static bool use_gss_proxy(struct net
*net
)
1366 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1368 /* If use_gss_proxy is still undefined, then try to disable it */
1369 if (sn
->use_gss_proxy
== -1)
1370 set_gss_proxy(net
, 0);
1371 return sn
->use_gss_proxy
;
1374 static noinline_for_stack
int
1375 svcauth_gss_proc_init(struct svc_rqst
*rqstp
, struct rpc_gss_wire_cred
*gc
)
1377 struct xdr_stream
*xdr
= &rqstp
->rq_arg_stream
;
1381 /* Call's verf field: */
1382 if (xdr_stream_decode_opaque_auth(xdr
, &flavor
, &body
, &len
) < 0)
1384 if (flavor
!= RPC_AUTH_NULL
|| len
!= 0) {
1385 rqstp
->rq_auth_stat
= rpc_autherr_badverf
;
1389 if (gc
->gc_proc
== RPC_GSS_PROC_INIT
&& gc
->gc_ctx
.len
!= 0) {
1390 rqstp
->rq_auth_stat
= rpc_autherr_badcred
;
1394 if (!use_gss_proxy(SVC_NET(rqstp
)))
1395 return svcauth_gss_legacy_init(rqstp
, gc
);
1396 return svcauth_gss_proxy_init(rqstp
, gc
);
1399 #ifdef CONFIG_PROC_FS
1401 static ssize_t
write_gssp(struct file
*file
, const char __user
*buf
,
1402 size_t count
, loff_t
*ppos
)
1404 struct net
*net
= pde_data(file_inode(file
));
1409 if (*ppos
|| count
> sizeof(tbuf
)-1)
1411 if (copy_from_user(tbuf
, buf
, count
))
1415 res
= kstrtoul(tbuf
, 0, &i
);
1420 res
= set_gssp_clnt(net
);
1423 res
= set_gss_proxy(net
, 1);
1429 static ssize_t
read_gssp(struct file
*file
, char __user
*buf
,
1430 size_t count
, loff_t
*ppos
)
1432 struct net
*net
= pde_data(file_inode(file
));
1433 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1434 unsigned long p
= *ppos
;
1438 snprintf(tbuf
, sizeof(tbuf
), "%d\n", sn
->use_gss_proxy
);
1445 if (copy_to_user(buf
, (void *)(tbuf
+p
), len
))
1451 static const struct proc_ops use_gss_proxy_proc_ops
= {
1452 .proc_open
= nonseekable_open
,
1453 .proc_write
= write_gssp
,
1454 .proc_read
= read_gssp
,
1457 static int create_use_gss_proxy_proc_entry(struct net
*net
)
1459 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1460 struct proc_dir_entry
**p
= &sn
->use_gssp_proc
;
1462 sn
->use_gss_proxy
= -1;
1463 *p
= proc_create_data("use-gss-proxy", S_IFREG
| 0600,
1465 &use_gss_proxy_proc_ops
, net
);
1472 static void destroy_use_gss_proxy_proc_entry(struct net
*net
)
1474 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1476 if (sn
->use_gssp_proc
) {
1477 remove_proc_entry("use-gss-proxy", sn
->proc_net_rpc
);
1478 clear_gssp_clnt(sn
);
1482 static ssize_t
read_gss_krb5_enctypes(struct file
*file
, char __user
*buf
,
1483 size_t count
, loff_t
*ppos
)
1485 struct rpcsec_gss_oid oid
= {
1487 .data
= "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02",
1489 struct gss_api_mech
*mech
;
1492 mech
= gss_mech_get_by_OID(&oid
);
1495 if (!mech
->gm_upcall_enctypes
) {
1500 ret
= simple_read_from_buffer(buf
, count
, ppos
,
1501 mech
->gm_upcall_enctypes
,
1502 strlen(mech
->gm_upcall_enctypes
));
1507 static const struct proc_ops gss_krb5_enctypes_proc_ops
= {
1508 .proc_open
= nonseekable_open
,
1509 .proc_read
= read_gss_krb5_enctypes
,
1512 static int create_krb5_enctypes_proc_entry(struct net
*net
)
1514 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1516 sn
->gss_krb5_enctypes
=
1517 proc_create_data("gss_krb5_enctypes", S_IFREG
| 0444,
1518 sn
->proc_net_rpc
, &gss_krb5_enctypes_proc_ops
,
1520 return sn
->gss_krb5_enctypes
? 0 : -ENOMEM
;
1523 static void destroy_krb5_enctypes_proc_entry(struct net
*net
)
1525 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
1527 if (sn
->gss_krb5_enctypes
)
1528 remove_proc_entry("gss_krb5_enctypes", sn
->proc_net_rpc
);
1531 #else /* CONFIG_PROC_FS */
1533 static int create_use_gss_proxy_proc_entry(struct net
*net
)
1538 static void destroy_use_gss_proxy_proc_entry(struct net
*net
) {}
1540 static int create_krb5_enctypes_proc_entry(struct net
*net
)
1545 static void destroy_krb5_enctypes_proc_entry(struct net
*net
) {}
1547 #endif /* CONFIG_PROC_FS */
1550 * The Call's credential body should contain a struct rpc_gss_cred_t.
1552 * RFC 2203 Section 5
1554 * struct rpc_gss_cred_t {
1555 * union switch (unsigned int version) {
1556 * case RPCSEC_GSS_VERS_1:
1558 * rpc_gss_proc_t gss_proc;
1559 * unsigned int seq_num;
1560 * rpc_gss_service_t service;
1562 * } rpc_gss_cred_vers_1_t;
1567 svcauth_gss_decode_credbody(struct xdr_stream
*xdr
,
1568 struct rpc_gss_wire_cred
*gc
,
1575 p
= xdr_inline_decode(xdr
, XDR_UNIT
);
1579 * start of rpc packet is 7 u32's back from here:
1580 * xid direction rpcversion prog vers proc flavour
1583 body_len
= be32_to_cpup(p
);
1584 if (body_len
> RPC_MAX_AUTH_SIZE
)
1587 /* struct rpc_gss_cred_t */
1588 if (xdr_stream_decode_u32(xdr
, &gc
->gc_v
) < 0)
1590 if (xdr_stream_decode_u32(xdr
, &gc
->gc_proc
) < 0)
1592 if (xdr_stream_decode_u32(xdr
, &gc
->gc_seq
) < 0)
1594 if (xdr_stream_decode_u32(xdr
, &gc
->gc_svc
) < 0)
1596 handle_len
= xdr_stream_decode_opaque_inline(xdr
,
1597 (void **)&gc
->gc_ctx
.data
,
1601 if (body_len
!= XDR_UNIT
* 5 + xdr_align_size(handle_len
))
1604 gc
->gc_ctx
.len
= handle_len
;
1609 * svcauth_gss_accept - Decode and validate incoming RPC_AUTH_GSS credential
1610 * @rqstp: RPC transaction
1614 * %SVC_COMPLETE: GSS context lifetime event
1615 * %SVC_DENIED: Credential or verifier is not valid
1616 * %SVC_GARBAGE: Failed to decode credential or verifier
1617 * %SVC_CLOSE: Temporary failure
1619 * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531).
1621 static enum svc_auth_status
1622 svcauth_gss_accept(struct svc_rqst
*rqstp
)
1624 struct gss_svc_data
*svcdata
= rqstp
->rq_auth_data
;
1626 struct rpc_gss_wire_cred
*gc
;
1627 struct rsc
*rsci
= NULL
;
1629 struct sunrpc_net
*sn
= net_generic(SVC_NET(rqstp
), sunrpc_net_id
);
1631 rqstp
->rq_auth_stat
= rpc_autherr_badcred
;
1633 svcdata
= kmalloc(sizeof(*svcdata
), GFP_KERNEL
);
1636 rqstp
->rq_auth_data
= svcdata
;
1637 svcdata
->gsd_databody_offset
= 0;
1638 svcdata
->rsci
= NULL
;
1639 gc
= &svcdata
->clcred
;
1641 if (!svcauth_gss_decode_credbody(&rqstp
->rq_arg_stream
, gc
, &rpcstart
))
1643 if (gc
->gc_v
!= RPC_GSS_VERSION
)
1646 switch (gc
->gc_proc
) {
1647 case RPC_GSS_PROC_INIT
:
1648 case RPC_GSS_PROC_CONTINUE_INIT
:
1649 if (rqstp
->rq_proc
!= 0)
1651 return svcauth_gss_proc_init(rqstp
, gc
);
1652 case RPC_GSS_PROC_DESTROY
:
1653 if (rqstp
->rq_proc
!= 0)
1656 case RPC_GSS_PROC_DATA
:
1657 rqstp
->rq_auth_stat
= rpcsec_gsserr_credproblem
;
1658 rsci
= gss_svc_searchbyctx(sn
->rsc_cache
, &gc
->gc_ctx
);
1661 switch (svcauth_gss_verify_header(rqstp
, rsci
, rpcstart
, gc
)) {
1671 if (rqstp
->rq_proc
!= 0)
1673 rqstp
->rq_auth_stat
= rpc_autherr_rejectedcred
;
1677 /* now act upon the command: */
1678 switch (gc
->gc_proc
) {
1679 case RPC_GSS_PROC_DESTROY
:
1680 if (!svcauth_gss_encode_verf(rqstp
, rsci
->mechctx
, gc
->gc_seq
))
1682 if (!svcxdr_set_accept_stat(rqstp
))
1684 /* Delete the entry from the cache_list and call cache_put */
1685 sunrpc_cache_unhash(sn
->rsc_cache
, &rsci
->h
);
1687 case RPC_GSS_PROC_DATA
:
1688 rqstp
->rq_auth_stat
= rpcsec_gsserr_ctxproblem
;
1689 if (!svcauth_gss_encode_verf(rqstp
, rsci
->mechctx
, gc
->gc_seq
))
1691 if (!svcxdr_set_accept_stat(rqstp
))
1693 svcdata
->gsd_databody_offset
= xdr_stream_pos(&rqstp
->rq_res_stream
);
1694 rqstp
->rq_cred
= rsci
->cred
;
1695 get_group_info(rsci
->cred
.cr_group_info
);
1696 rqstp
->rq_auth_stat
= rpc_autherr_badcred
;
1697 switch (gc
->gc_svc
) {
1698 case RPC_GSS_SVC_NONE
:
1700 case RPC_GSS_SVC_INTEGRITY
:
1701 /* placeholders for body length and seq. number: */
1702 xdr_reserve_space(&rqstp
->rq_res_stream
, XDR_UNIT
* 2);
1703 if (svcauth_gss_unwrap_integ(rqstp
, gc
->gc_seq
,
1706 svcxdr_set_auth_slack(rqstp
, RPC_MAX_AUTH_SIZE
);
1708 case RPC_GSS_SVC_PRIVACY
:
1709 /* placeholders for body length and seq. number: */
1710 xdr_reserve_space(&rqstp
->rq_res_stream
, XDR_UNIT
* 2);
1711 if (svcauth_gss_unwrap_priv(rqstp
, gc
->gc_seq
,
1714 svcxdr_set_auth_slack(rqstp
, RPC_MAX_AUTH_SIZE
* 2);
1719 svcdata
->rsci
= rsci
;
1720 cache_get(&rsci
->h
);
1721 rqstp
->rq_cred
.cr_flavor
= gss_svc_to_pseudoflavor(
1722 rsci
->mechctx
->mech_type
,
1726 trace_rpcgss_svc_authenticate(rqstp
, gc
);
1733 xdr_truncate_encode(&rqstp
->rq_res_stream
, XDR_UNIT
* 2);
1743 cache_put(&rsci
->h
, sn
->rsc_cache
);
1748 svcauth_gss_prepare_to_wrap(struct svc_rqst
*rqstp
, struct gss_svc_data
*gsd
)
1752 /* Release can be called twice, but we only wrap once. */
1753 offset
= gsd
->gsd_databody_offset
;
1754 gsd
->gsd_databody_offset
= 0;
1756 /* AUTH_ERROR replies are not wrapped. */
1757 if (rqstp
->rq_auth_stat
!= rpc_auth_ok
)
1760 /* Also don't wrap if the accept_stat is nonzero: */
1761 if (*rqstp
->rq_accept_statp
!= rpc_success
)
1768 * RFC 2203, Section 5.3.2.2
1770 * struct rpc_gss_integ_data {
1771 * opaque databody_integ<>;
1772 * opaque checksum<>;
1775 * struct rpc_gss_data_t {
1776 * unsigned int seq_num;
1777 * proc_req_arg_t arg;
1780 * The RPC Reply message has already been XDR-encoded. rq_res_stream
1781 * is now positioned so that the checksum can be written just past
1782 * the RPC Reply message.
1784 static int svcauth_gss_wrap_integ(struct svc_rqst
*rqstp
)
1786 struct gss_svc_data
*gsd
= rqstp
->rq_auth_data
;
1787 struct xdr_stream
*xdr
= &rqstp
->rq_res_stream
;
1788 struct rpc_gss_wire_cred
*gc
= &gsd
->clcred
;
1789 struct xdr_buf
*buf
= xdr
->buf
;
1790 struct xdr_buf databody_integ
;
1791 struct xdr_netobj checksum
;
1792 u32 offset
, maj_stat
;
1794 offset
= svcauth_gss_prepare_to_wrap(rqstp
, gsd
);
1798 if (xdr_buf_subsegment(buf
, &databody_integ
, offset
+ XDR_UNIT
,
1799 buf
->len
- offset
- XDR_UNIT
))
1801 /* Buffer space for these has already been reserved in
1802 * svcauth_gss_accept(). */
1803 if (xdr_encode_word(buf
, offset
, databody_integ
.len
))
1805 if (xdr_encode_word(buf
, offset
+ XDR_UNIT
, gc
->gc_seq
))
1808 checksum
.data
= gsd
->gsd_scratch
;
1809 maj_stat
= gss_get_mic(gsd
->rsci
->mechctx
, &databody_integ
, &checksum
);
1810 if (maj_stat
!= GSS_S_COMPLETE
)
1813 if (xdr_stream_encode_opaque(xdr
, checksum
.data
, checksum
.len
) < 0)
1815 xdr_commit_encode(xdr
);
1821 trace_rpcgss_svc_get_mic(rqstp
, maj_stat
);
1824 trace_rpcgss_svc_wrap_failed(rqstp
);
1829 * RFC 2203, Section 5.3.2.3
1831 * struct rpc_gss_priv_data {
1832 * opaque databody_priv<>
1835 * struct rpc_gss_data_t {
1836 * unsigned int seq_num;
1837 * proc_req_arg_t arg;
1840 * gss_wrap() expands the size of the RPC message payload in the
1841 * response buffer. The main purpose of svcauth_gss_wrap_priv()
1842 * is to ensure there is adequate space in the response buffer to
1843 * avoid overflow during the wrap.
1845 static int svcauth_gss_wrap_priv(struct svc_rqst
*rqstp
)
1847 struct gss_svc_data
*gsd
= rqstp
->rq_auth_data
;
1848 struct rpc_gss_wire_cred
*gc
= &gsd
->clcred
;
1849 struct xdr_buf
*buf
= &rqstp
->rq_res
;
1850 struct kvec
*head
= buf
->head
;
1851 struct kvec
*tail
= buf
->tail
;
1852 u32 offset
, pad
, maj_stat
;
1855 offset
= svcauth_gss_prepare_to_wrap(rqstp
, gsd
);
1860 * Buffer space for this field has already been reserved
1861 * in svcauth_gss_accept(). Note that the GSS sequence
1862 * number is encrypted along with the RPC reply payload.
1864 if (xdr_encode_word(buf
, offset
+ XDR_UNIT
, gc
->gc_seq
))
1868 * If there is currently tail data, make sure there is
1869 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
1870 * the page, and move the current tail data such that
1871 * there is RPC_MAX_AUTH_SIZE slack space available in
1872 * both the head and tail.
1874 if (tail
->iov_base
) {
1875 if (tail
->iov_base
>= head
->iov_base
+ PAGE_SIZE
)
1877 if (tail
->iov_base
< head
->iov_base
)
1879 if (tail
->iov_len
+ head
->iov_len
1880 + 2 * RPC_MAX_AUTH_SIZE
> PAGE_SIZE
)
1882 memmove(tail
->iov_base
+ RPC_MAX_AUTH_SIZE
, tail
->iov_base
,
1884 tail
->iov_base
+= RPC_MAX_AUTH_SIZE
;
1887 * If there is no current tail data, make sure there is
1888 * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
1889 * allotted page, and set up tail information such that there
1890 * is RPC_MAX_AUTH_SIZE slack space available in both the
1893 if (!tail
->iov_base
) {
1894 if (head
->iov_len
+ 2 * RPC_MAX_AUTH_SIZE
> PAGE_SIZE
)
1896 tail
->iov_base
= head
->iov_base
1897 + head
->iov_len
+ RPC_MAX_AUTH_SIZE
;
1901 maj_stat
= gss_wrap(gsd
->rsci
->mechctx
, offset
+ XDR_UNIT
, buf
,
1903 if (maj_stat
!= GSS_S_COMPLETE
)
1906 /* Wrapping can change the size of databody_priv. */
1907 if (xdr_encode_word(buf
, offset
, buf
->len
- offset
- XDR_UNIT
))
1909 pad
= xdr_pad_size(buf
->len
- offset
- XDR_UNIT
);
1910 p
= (__be32
*)(tail
->iov_base
+ tail
->iov_len
);
1912 tail
->iov_len
+= pad
;
1917 trace_rpcgss_svc_wrap_failed(rqstp
);
1920 trace_rpcgss_svc_wrap(rqstp
, maj_stat
);
1925 * svcauth_gss_release - Wrap payload and release resources
1926 * @rqstp: RPC transaction context
1929 * %0: the Reply is ready to be sent
1930 * %-ENOMEM: failed to allocate memory
1931 * %-EINVAL: encoding error
1934 svcauth_gss_release(struct svc_rqst
*rqstp
)
1936 struct sunrpc_net
*sn
= net_generic(SVC_NET(rqstp
), sunrpc_net_id
);
1937 struct gss_svc_data
*gsd
= rqstp
->rq_auth_data
;
1938 struct rpc_gss_wire_cred
*gc
;
1944 if (gc
->gc_proc
!= RPC_GSS_PROC_DATA
)
1947 switch (gc
->gc_svc
) {
1948 case RPC_GSS_SVC_NONE
:
1950 case RPC_GSS_SVC_INTEGRITY
:
1951 stat
= svcauth_gss_wrap_integ(rqstp
);
1955 case RPC_GSS_SVC_PRIVACY
:
1956 stat
= svcauth_gss_wrap_priv(rqstp
);
1961 * For any other gc_svc value, svcauth_gss_accept() already set
1962 * the auth_error appropriately; just fall through:
1969 if (rqstp
->rq_client
)
1970 auth_domain_put(rqstp
->rq_client
);
1971 rqstp
->rq_client
= NULL
;
1972 if (rqstp
->rq_gssclient
)
1973 auth_domain_put(rqstp
->rq_gssclient
);
1974 rqstp
->rq_gssclient
= NULL
;
1975 if (rqstp
->rq_cred
.cr_group_info
)
1976 put_group_info(rqstp
->rq_cred
.cr_group_info
);
1977 rqstp
->rq_cred
.cr_group_info
= NULL
;
1978 if (gsd
&& gsd
->rsci
) {
1979 cache_put(&gsd
->rsci
->h
, sn
->rsc_cache
);
1986 svcauth_gss_domain_release_rcu(struct rcu_head
*head
)
1988 struct auth_domain
*dom
= container_of(head
, struct auth_domain
, rcu_head
);
1989 struct gss_domain
*gd
= container_of(dom
, struct gss_domain
, h
);
1996 svcauth_gss_domain_release(struct auth_domain
*dom
)
1998 call_rcu(&dom
->rcu_head
, svcauth_gss_domain_release_rcu
);
2001 static rpc_authflavor_t
svcauth_gss_pseudoflavor(struct svc_rqst
*rqstp
)
2003 return svcauth_gss_flavor(rqstp
->rq_gssclient
);
2006 static struct auth_ops svcauthops_gss
= {
2007 .name
= "rpcsec_gss",
2008 .owner
= THIS_MODULE
,
2009 .flavour
= RPC_AUTH_GSS
,
2010 .accept
= svcauth_gss_accept
,
2011 .release
= svcauth_gss_release
,
2012 .domain_release
= svcauth_gss_domain_release
,
2013 .set_client
= svcauth_gss_set_client
,
2014 .pseudoflavor
= svcauth_gss_pseudoflavor
,
2017 static int rsi_cache_create_net(struct net
*net
)
2019 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
2020 struct cache_detail
*cd
;
2023 cd
= cache_create_net(&rsi_cache_template
, net
);
2026 err
= cache_register_net(cd
, net
);
2028 cache_destroy_net(cd
, net
);
2035 static void rsi_cache_destroy_net(struct net
*net
)
2037 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
2038 struct cache_detail
*cd
= sn
->rsi_cache
;
2040 sn
->rsi_cache
= NULL
;
2042 cache_unregister_net(cd
, net
);
2043 cache_destroy_net(cd
, net
);
2046 static int rsc_cache_create_net(struct net
*net
)
2048 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
2049 struct cache_detail
*cd
;
2052 cd
= cache_create_net(&rsc_cache_template
, net
);
2055 err
= cache_register_net(cd
, net
);
2057 cache_destroy_net(cd
, net
);
2064 static void rsc_cache_destroy_net(struct net
*net
)
2066 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
2067 struct cache_detail
*cd
= sn
->rsc_cache
;
2069 sn
->rsc_cache
= NULL
;
2071 cache_unregister_net(cd
, net
);
2072 cache_destroy_net(cd
, net
);
2076 gss_svc_init_net(struct net
*net
)
2080 rv
= rsc_cache_create_net(net
);
2083 rv
= rsi_cache_create_net(net
);
2086 rv
= create_use_gss_proxy_proc_entry(net
);
2090 rv
= create_krb5_enctypes_proc_entry(net
);
2097 destroy_use_gss_proxy_proc_entry(net
);
2099 rsi_cache_destroy_net(net
);
2101 rsc_cache_destroy_net(net
);
2106 gss_svc_shutdown_net(struct net
*net
)
2108 destroy_krb5_enctypes_proc_entry(net
);
2109 destroy_use_gss_proxy_proc_entry(net
);
2110 rsi_cache_destroy_net(net
);
2111 rsc_cache_destroy_net(net
);
2117 return svc_auth_register(RPC_AUTH_GSS
, &svcauthops_gss
);
2121 gss_svc_shutdown(void)
2123 svc_auth_unregister(RPC_AUTH_GSS
);