3 * The Regents of the University of Michigan
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
31 #include <linux/types.h>
32 #include <linux/jiffies.h>
33 #include <linux/sunrpc/gss_krb5.h>
34 #include <linux/random.h>
35 #include <linux/pagemap.h>
36 #include <linux/crypto.h>
38 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
39 # define RPCDBG_FACILITY RPCDBG_AUTH
43 gss_krb5_padding(int blocksize
, int length
)
45 return blocksize
- (length
% blocksize
);
49 gss_krb5_add_padding(struct xdr_buf
*buf
, int offset
, int blocksize
)
51 int padding
= gss_krb5_padding(blocksize
, buf
->len
- offset
);
55 if (buf
->page_len
|| buf
->tail
[0].iov_len
)
59 p
= iov
->iov_base
+ iov
->iov_len
;
60 iov
->iov_len
+= padding
;
62 memset(p
, padding
, padding
);
66 gss_krb5_remove_padding(struct xdr_buf
*buf
, int blocksize
)
70 size_t len
= buf
->len
;
72 if (len
<= buf
->head
[0].iov_len
) {
73 pad
= *(u8
*)(buf
->head
[0].iov_base
+ len
- 1);
74 if (pad
> buf
->head
[0].iov_len
)
76 buf
->head
[0].iov_len
-= pad
;
79 len
-= buf
->head
[0].iov_len
;
80 if (len
<= buf
->page_len
) {
81 unsigned int last
= (buf
->page_base
+ len
- 1)
83 unsigned int offset
= (buf
->page_base
+ len
- 1)
84 & (PAGE_CACHE_SIZE
- 1);
85 ptr
= kmap_atomic(buf
->pages
[last
]);
86 pad
= *(ptr
+ offset
);
91 BUG_ON(len
> buf
->tail
[0].iov_len
);
92 pad
= *(u8
*)(buf
->tail
[0].iov_base
+ len
- 1);
94 /* XXX: NOTE: we do not adjust the page lengths--they represent
95 * a range of data in the real filesystem page cache, and we need
96 * to know that range so the xdr code can properly place read data.
97 * However adjusting the head length, as we do above, is harmless.
98 * In the case of a request that fits into a single page, the server
99 * also uses length and head length together to determine the original
100 * start of the request to copy the request for deferal; so it's
101 * easier on the server if we adjust head and tail length in tandem.
102 * It's not really a problem that we don't fool with the page and
103 * tail lengths, though--at worst badly formed xdr might lead the
104 * server to attempt to parse the padding.
105 * XXX: Document all these weird requirements for gss mechanism
106 * wrap/unwrap functions. */
117 gss_krb5_make_confounder(char *p
, u32 conflen
)
122 /* rfc1964 claims this should be "random". But all that's really
123 * necessary is that it be unique. And not even that is necessary in
124 * our case since our "gssapi" implementation exists only to support
125 * rpcsec_gss, so we know that the only buffers we will ever encrypt
126 * already begin with a unique sequence number. Just to hedge my bets
127 * I'll make a half-hearted attempt at something unique, but ensuring
128 * uniqueness would mean worrying about atomicity and rollover, and I
129 * don't care enough. */
131 /* initialize to random value */
134 i
= (i
<< 32) | prandom_u32();
149 /* Assumptions: the head and tail of inbuf are ours to play with.
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
152 /* XXX: obviously the above should be documentation of wrap interface,
153 * and shouldn't be in this kerberos-specific file. */
155 /* XXX factor out common code with seal/unseal. */
158 gss_wrap_kerberos_v1(struct krb5_ctx
*kctx
, int offset
,
159 struct xdr_buf
*buf
, struct page
**pages
)
161 char cksumdata
[GSS_KRB5_MAX_CKSUM_LEN
];
162 struct xdr_netobj md5cksum
= {.len
= sizeof(cksumdata
),
164 int blocksize
= 0, plainlen
;
165 unsigned char *ptr
, *msg_start
;
168 struct page
**tmp_pages
;
171 u32 conflen
= kctx
->gk5e
->conflen
;
173 dprintk("RPC: %s\n", __func__
);
177 blocksize
= crypto_blkcipher_blocksize(kctx
->enc
);
178 gss_krb5_add_padding(buf
, offset
, blocksize
);
179 BUG_ON((buf
->len
- offset
) % blocksize
);
180 plainlen
= conflen
+ buf
->len
- offset
;
182 headlen
= g_token_size(&kctx
->mech_used
,
183 GSS_KRB5_TOK_HDR_LEN
+ kctx
->gk5e
->cksumlength
+ plainlen
) -
186 ptr
= buf
->head
[0].iov_base
+ offset
;
187 /* shift data to make room for header. */
188 xdr_extend_head(buf
, offset
, headlen
);
190 /* XXX Would be cleverer to encrypt while copying. */
191 BUG_ON((buf
->len
- offset
- headlen
) % blocksize
);
193 g_make_token_header(&kctx
->mech_used
,
194 GSS_KRB5_TOK_HDR_LEN
+
195 kctx
->gk5e
->cksumlength
+ plainlen
, &ptr
);
198 /* ptr now at header described in rfc 1964, section 1.2.1: */
199 ptr
[0] = (unsigned char) ((KG_TOK_WRAP_MSG
>> 8) & 0xff);
200 ptr
[1] = (unsigned char) (KG_TOK_WRAP_MSG
& 0xff);
202 msg_start
= ptr
+ GSS_KRB5_TOK_HDR_LEN
+ kctx
->gk5e
->cksumlength
;
205 * signalg and sealalg are stored as if they were converted from LE
206 * to host endian, even though they're opaque pairs of bytes according
209 *(__le16
*)(ptr
+ 2) = cpu_to_le16(kctx
->gk5e
->signalg
);
210 *(__le16
*)(ptr
+ 4) = cpu_to_le16(kctx
->gk5e
->sealalg
);
214 gss_krb5_make_confounder(msg_start
, conflen
);
216 if (kctx
->gk5e
->keyed_cksum
)
217 cksumkey
= kctx
->cksum
;
222 tmp_pages
= buf
->pages
;
224 if (make_checksum(kctx
, ptr
, 8, buf
, offset
+ headlen
- conflen
,
225 cksumkey
, KG_USAGE_SEAL
, &md5cksum
))
226 return GSS_S_FAILURE
;
227 buf
->pages
= tmp_pages
;
229 memcpy(ptr
+ GSS_KRB5_TOK_HDR_LEN
, md5cksum
.data
, md5cksum
.len
);
231 spin_lock(&krb5_seq_lock
);
232 seq_send
= kctx
->seq_send
++;
233 spin_unlock(&krb5_seq_lock
);
235 /* XXX would probably be more efficient to compute checksum
236 * and encrypt at the same time: */
237 if ((krb5_make_seq_num(kctx
, kctx
->seq
, kctx
->initiate
? 0 : 0xff,
238 seq_send
, ptr
+ GSS_KRB5_TOK_HDR_LEN
, ptr
+ 8)))
239 return GSS_S_FAILURE
;
241 if (kctx
->enctype
== ENCTYPE_ARCFOUR_HMAC
) {
242 struct crypto_blkcipher
*cipher
;
244 cipher
= crypto_alloc_blkcipher(kctx
->gk5e
->encrypt_name
, 0,
247 return GSS_S_FAILURE
;
249 krb5_rc4_setup_enc_key(kctx
, cipher
, seq_send
);
251 err
= gss_encrypt_xdr_buf(cipher
, buf
,
252 offset
+ headlen
- conflen
, pages
);
253 crypto_free_blkcipher(cipher
);
255 return GSS_S_FAILURE
;
257 if (gss_encrypt_xdr_buf(kctx
->enc
, buf
,
258 offset
+ headlen
- conflen
, pages
))
259 return GSS_S_FAILURE
;
262 return (kctx
->endtime
< now
) ? GSS_S_CONTEXT_EXPIRED
: GSS_S_COMPLETE
;
266 gss_unwrap_kerberos_v1(struct krb5_ctx
*kctx
, int offset
, struct xdr_buf
*buf
)
270 char cksumdata
[GSS_KRB5_MAX_CKSUM_LEN
];
271 struct xdr_netobj md5cksum
= {.len
= sizeof(cksumdata
),
278 void *data_start
, *orig_start
;
281 u32 conflen
= kctx
->gk5e
->conflen
;
285 dprintk("RPC: gss_unwrap_kerberos\n");
287 ptr
= (u8
*)buf
->head
[0].iov_base
+ offset
;
288 if (g_verify_token_header(&kctx
->mech_used
, &bodysize
, &ptr
,
290 return GSS_S_DEFECTIVE_TOKEN
;
292 if ((ptr
[0] != ((KG_TOK_WRAP_MSG
>> 8) & 0xff)) ||
293 (ptr
[1] != (KG_TOK_WRAP_MSG
& 0xff)))
294 return GSS_S_DEFECTIVE_TOKEN
;
296 /* XXX sanity-check bodysize?? */
298 /* get the sign and seal algorithms */
300 signalg
= ptr
[2] + (ptr
[3] << 8);
301 if (signalg
!= kctx
->gk5e
->signalg
)
302 return GSS_S_DEFECTIVE_TOKEN
;
304 sealalg
= ptr
[4] + (ptr
[5] << 8);
305 if (sealalg
!= kctx
->gk5e
->sealalg
)
306 return GSS_S_DEFECTIVE_TOKEN
;
308 if ((ptr
[6] != 0xff) || (ptr
[7] != 0xff))
309 return GSS_S_DEFECTIVE_TOKEN
;
312 * Data starts after token header and checksum. ptr points
313 * to the beginning of the token header
315 crypt_offset
= ptr
+ (GSS_KRB5_TOK_HDR_LEN
+ kctx
->gk5e
->cksumlength
) -
316 (unsigned char *)buf
->head
[0].iov_base
;
319 * Need plaintext seqnum to derive encryption key for arcfour-hmac
321 if (krb5_get_seq_num(kctx
, ptr
+ GSS_KRB5_TOK_HDR_LEN
,
322 ptr
+ 8, &direction
, &seqnum
))
323 return GSS_S_BAD_SIG
;
325 if ((kctx
->initiate
&& direction
!= 0xff) ||
326 (!kctx
->initiate
&& direction
!= 0))
327 return GSS_S_BAD_SIG
;
329 if (kctx
->enctype
== ENCTYPE_ARCFOUR_HMAC
) {
330 struct crypto_blkcipher
*cipher
;
333 cipher
= crypto_alloc_blkcipher(kctx
->gk5e
->encrypt_name
, 0,
336 return GSS_S_FAILURE
;
338 krb5_rc4_setup_enc_key(kctx
, cipher
, seqnum
);
340 err
= gss_decrypt_xdr_buf(cipher
, buf
, crypt_offset
);
341 crypto_free_blkcipher(cipher
);
343 return GSS_S_DEFECTIVE_TOKEN
;
345 if (gss_decrypt_xdr_buf(kctx
->enc
, buf
, crypt_offset
))
346 return GSS_S_DEFECTIVE_TOKEN
;
349 if (kctx
->gk5e
->keyed_cksum
)
350 cksumkey
= kctx
->cksum
;
354 if (make_checksum(kctx
, ptr
, 8, buf
, crypt_offset
,
355 cksumkey
, KG_USAGE_SEAL
, &md5cksum
))
356 return GSS_S_FAILURE
;
358 if (memcmp(md5cksum
.data
, ptr
+ GSS_KRB5_TOK_HDR_LEN
,
359 kctx
->gk5e
->cksumlength
))
360 return GSS_S_BAD_SIG
;
362 /* it got through unscathed. Make sure the context is unexpired */
366 if (now
> kctx
->endtime
)
367 return GSS_S_CONTEXT_EXPIRED
;
369 /* do sequencing checks */
371 /* Copy the data back to the right position. XXX: Would probably be
372 * better to copy and encrypt at the same time. */
374 blocksize
= crypto_blkcipher_blocksize(kctx
->enc
);
375 data_start
= ptr
+ (GSS_KRB5_TOK_HDR_LEN
+ kctx
->gk5e
->cksumlength
) +
377 orig_start
= buf
->head
[0].iov_base
+ offset
;
378 data_len
= (buf
->head
[0].iov_base
+ buf
->head
[0].iov_len
) - data_start
;
379 memmove(orig_start
, data_start
, data_len
);
380 buf
->head
[0].iov_len
-= (data_start
- orig_start
);
381 buf
->len
-= (data_start
- orig_start
);
383 if (gss_krb5_remove_padding(buf
, blocksize
))
384 return GSS_S_DEFECTIVE_TOKEN
;
386 return GSS_S_COMPLETE
;
390 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
391 * to do more than that, we shift repeatedly. Kevin Coffman reports
392 * seeing 28 bytes as the value used by Microsoft clients and servers
393 * with AES, so this constant is chosen to allow handling 28 in one pass
394 * without using too much stack space.
396 * If that proves to a problem perhaps we could use a more clever
399 #define LOCAL_BUF_LEN 32u
401 static void rotate_buf_a_little(struct xdr_buf
*buf
, unsigned int shift
)
403 char head
[LOCAL_BUF_LEN
];
404 char tmp
[LOCAL_BUF_LEN
];
405 unsigned int this_len
, i
;
407 BUG_ON(shift
> LOCAL_BUF_LEN
);
409 read_bytes_from_xdr_buf(buf
, 0, head
, shift
);
410 for (i
= 0; i
+ shift
< buf
->len
; i
+= LOCAL_BUF_LEN
) {
411 this_len
= min(LOCAL_BUF_LEN
, buf
->len
- (i
+ shift
));
412 read_bytes_from_xdr_buf(buf
, i
+shift
, tmp
, this_len
);
413 write_bytes_to_xdr_buf(buf
, i
, tmp
, this_len
);
415 write_bytes_to_xdr_buf(buf
, buf
->len
- shift
, head
, shift
);
418 static void _rotate_left(struct xdr_buf
*buf
, unsigned int shift
)
424 while (shifted
< shift
) {
425 this_shift
= min(shift
- shifted
, LOCAL_BUF_LEN
);
426 rotate_buf_a_little(buf
, this_shift
);
427 shifted
+= this_shift
;
431 static void rotate_left(u32 base
, struct xdr_buf
*buf
, unsigned int shift
)
433 struct xdr_buf subbuf
;
435 xdr_buf_subsegment(buf
, &subbuf
, base
, buf
->len
- base
);
436 _rotate_left(&subbuf
, shift
);
440 gss_wrap_kerberos_v2(struct krb5_ctx
*kctx
, u32 offset
,
441 struct xdr_buf
*buf
, struct page
**pages
)
451 dprintk("RPC: %s\n", __func__
);
453 if (kctx
->gk5e
->encrypt_v2
== NULL
)
454 return GSS_S_FAILURE
;
456 /* make room for gss token header */
457 if (xdr_extend_head(buf
, offset
, GSS_KRB5_TOK_HDR_LEN
))
458 return GSS_S_FAILURE
;
460 /* construct gss token header */
461 ptr
= plainhdr
= buf
->head
[0].iov_base
+ offset
;
462 *ptr
++ = (unsigned char) ((KG2_TOK_WRAP
>>8) & 0xff);
463 *ptr
++ = (unsigned char) (KG2_TOK_WRAP
& 0xff);
465 if ((kctx
->flags
& KRB5_CTX_FLAG_INITIATOR
) == 0)
466 flags
|= KG2_TOKEN_FLAG_SENTBYACCEPTOR
;
467 if ((kctx
->flags
& KRB5_CTX_FLAG_ACCEPTOR_SUBKEY
) != 0)
468 flags
|= KG2_TOKEN_FLAG_ACCEPTORSUBKEY
;
469 /* We always do confidentiality in wrap tokens */
470 flags
|= KG2_TOKEN_FLAG_SEALED
;
474 be16ptr
= (__be16
*)ptr
;
476 blocksize
= crypto_blkcipher_blocksize(kctx
->acceptor_enc
);
478 /* "inner" token header always uses 0 for RRC */
481 be64ptr
= (__be64
*)be16ptr
;
482 spin_lock(&krb5_seq_lock
);
483 *be64ptr
= cpu_to_be64(kctx
->seq_send64
++);
484 spin_unlock(&krb5_seq_lock
);
486 err
= (*kctx
->gk5e
->encrypt_v2
)(kctx
, offset
, buf
, pages
);
491 return (kctx
->endtime
< now
) ? GSS_S_CONTEXT_EXPIRED
: GSS_S_COMPLETE
;
495 gss_unwrap_kerberos_v2(struct krb5_ctx
*kctx
, int offset
, struct xdr_buf
*buf
)
502 u32 headskip
, tailskip
;
503 u8 decrypted_hdr
[GSS_KRB5_TOK_HDR_LEN
];
504 unsigned int movelen
;
507 dprintk("RPC: %s\n", __func__
);
509 if (kctx
->gk5e
->decrypt_v2
== NULL
)
510 return GSS_S_FAILURE
;
512 ptr
= buf
->head
[0].iov_base
+ offset
;
514 if (be16_to_cpu(*((__be16
*)ptr
)) != KG2_TOK_WRAP
)
515 return GSS_S_DEFECTIVE_TOKEN
;
518 if ((!kctx
->initiate
&& (flags
& KG2_TOKEN_FLAG_SENTBYACCEPTOR
)) ||
519 (kctx
->initiate
&& !(flags
& KG2_TOKEN_FLAG_SENTBYACCEPTOR
)))
520 return GSS_S_BAD_SIG
;
522 if ((flags
& KG2_TOKEN_FLAG_SEALED
) == 0) {
523 dprintk("%s: token missing expected sealed flag\n", __func__
);
524 return GSS_S_DEFECTIVE_TOKEN
;
528 return GSS_S_DEFECTIVE_TOKEN
;
530 ec
= be16_to_cpup((__be16
*)(ptr
+ 4));
531 rrc
= be16_to_cpup((__be16
*)(ptr
+ 6));
534 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
535 * doesn't want it checked; see page 6 of rfc 2203.
539 rotate_left(offset
+ 16, buf
, rrc
);
541 err
= (*kctx
->gk5e
->decrypt_v2
)(kctx
, offset
, buf
,
542 &headskip
, &tailskip
);
544 return GSS_S_FAILURE
;
547 * Retrieve the decrypted gss token header and verify
548 * it against the original
550 err
= read_bytes_from_xdr_buf(buf
,
551 buf
->len
- GSS_KRB5_TOK_HDR_LEN
- tailskip
,
552 decrypted_hdr
, GSS_KRB5_TOK_HDR_LEN
);
554 dprintk("%s: error %u getting decrypted_hdr\n", __func__
, err
);
555 return GSS_S_FAILURE
;
557 if (memcmp(ptr
, decrypted_hdr
, 6)
558 || memcmp(ptr
+ 8, decrypted_hdr
+ 8, 8)) {
559 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__
);
560 return GSS_S_FAILURE
;
563 /* do sequencing checks */
565 /* it got through unscathed. Make sure the context is unexpired */
567 if (now
> kctx
->endtime
)
568 return GSS_S_CONTEXT_EXPIRED
;
571 * Move the head data back to the right position in xdr_buf.
572 * We ignore any "ec" data since it might be in the head or
573 * the tail, and we really don't need to deal with it.
574 * Note that buf->head[0].iov_len may indicate the available
575 * head buffer space rather than that actually occupied.
577 movelen
= min_t(unsigned int, buf
->head
[0].iov_len
, buf
->len
);
578 movelen
-= offset
+ GSS_KRB5_TOK_HDR_LEN
+ headskip
;
579 BUG_ON(offset
+ GSS_KRB5_TOK_HDR_LEN
+ headskip
+ movelen
>
580 buf
->head
[0].iov_len
);
581 memmove(ptr
, ptr
+ GSS_KRB5_TOK_HDR_LEN
+ headskip
, movelen
);
582 buf
->head
[0].iov_len
-= GSS_KRB5_TOK_HDR_LEN
+ headskip
;
583 buf
->len
-= GSS_KRB5_TOK_HDR_LEN
+ headskip
;
585 /* Trim off the trailing "extra count" and checksum blob */
586 xdr_buf_trim(buf
, ec
+ GSS_KRB5_TOK_HDR_LEN
+ tailskip
);
587 return GSS_S_COMPLETE
;
591 gss_wrap_kerberos(struct gss_ctx
*gctx
, int offset
,
592 struct xdr_buf
*buf
, struct page
**pages
)
594 struct krb5_ctx
*kctx
= gctx
->internal_ctx_id
;
596 switch (kctx
->enctype
) {
599 case ENCTYPE_DES_CBC_RAW
:
600 case ENCTYPE_DES3_CBC_RAW
:
601 case ENCTYPE_ARCFOUR_HMAC
:
602 return gss_wrap_kerberos_v1(kctx
, offset
, buf
, pages
);
603 case ENCTYPE_AES128_CTS_HMAC_SHA1_96
:
604 case ENCTYPE_AES256_CTS_HMAC_SHA1_96
:
605 return gss_wrap_kerberos_v2(kctx
, offset
, buf
, pages
);
610 gss_unwrap_kerberos(struct gss_ctx
*gctx
, int offset
, struct xdr_buf
*buf
)
612 struct krb5_ctx
*kctx
= gctx
->internal_ctx_id
;
614 switch (kctx
->enctype
) {
617 case ENCTYPE_DES_CBC_RAW
:
618 case ENCTYPE_DES3_CBC_RAW
:
619 case ENCTYPE_ARCFOUR_HMAC
:
620 return gss_unwrap_kerberos_v1(kctx
, offset
, buf
);
621 case ENCTYPE_AES128_CTS_HMAC_SHA1_96
:
622 case ENCTYPE_AES256_CTS_HMAC_SHA1_96
:
623 return gss_unwrap_kerberos_v2(kctx
, offset
, buf
);