2 * linux/net/sunrpc/gss_krb5_crypto.c
4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
12 * Copyright (C) 1998 by the FundsXpress, INC.
14 * All rights reserved.
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
37 #include <crypto/algapi.h>
38 #include <crypto/hash.h>
39 #include <crypto/skcipher.h>
40 #include <linux/err.h>
41 #include <linux/types.h>
43 #include <linux/scatterlist.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/random.h>
47 #include <linux/sunrpc/gss_krb5.h>
48 #include <linux/sunrpc/xdr.h>
50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
51 # define RPCDBG_FACILITY RPCDBG_AUTH
56 struct crypto_sync_skcipher
*tfm
,
63 struct scatterlist sg
[1];
64 u8 local_iv
[GSS_KRB5_MAX_BLOCKSIZE
] = {0};
65 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
67 if (length
% crypto_sync_skcipher_blocksize(tfm
) != 0)
70 if (crypto_sync_skcipher_ivsize(tfm
) > GSS_KRB5_MAX_BLOCKSIZE
) {
71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
72 crypto_sync_skcipher_ivsize(tfm
));
77 memcpy(local_iv
, iv
, crypto_sync_skcipher_ivsize(tfm
));
79 memcpy(out
, in
, length
);
80 sg_init_one(sg
, out
, length
);
82 skcipher_request_set_sync_tfm(req
, tfm
);
83 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
84 skcipher_request_set_crypt(req
, sg
, sg
, length
, local_iv
);
86 ret
= crypto_skcipher_encrypt(req
);
87 skcipher_request_zero(req
);
89 dprintk("RPC: krb5_encrypt returns %d\n", ret
);
95 struct crypto_sync_skcipher
*tfm
,
102 struct scatterlist sg
[1];
103 u8 local_iv
[GSS_KRB5_MAX_BLOCKSIZE
] = {0};
104 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
106 if (length
% crypto_sync_skcipher_blocksize(tfm
) != 0)
109 if (crypto_sync_skcipher_ivsize(tfm
) > GSS_KRB5_MAX_BLOCKSIZE
) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
111 crypto_sync_skcipher_ivsize(tfm
));
115 memcpy(local_iv
, iv
, crypto_sync_skcipher_ivsize(tfm
));
117 memcpy(out
, in
, length
);
118 sg_init_one(sg
, out
, length
);
120 skcipher_request_set_sync_tfm(req
, tfm
);
121 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
122 skcipher_request_set_crypt(req
, sg
, sg
, length
, local_iv
);
124 ret
= crypto_skcipher_decrypt(req
);
125 skcipher_request_zero(req
);
127 dprintk("RPC: gss_k5decrypt returns %d\n",ret
);
132 checksummer(struct scatterlist
*sg
, void *data
)
134 struct ahash_request
*req
= data
;
136 ahash_request_set_crypt(req
, sg
, NULL
, sg
->length
);
138 return crypto_ahash_update(req
);
142 arcfour_hmac_md5_usage_to_salt(unsigned int usage
, u8 salt
[4])
144 unsigned int ms_usage
;
156 salt
[0] = (ms_usage
>> 0) & 0xff;
157 salt
[1] = (ms_usage
>> 8) & 0xff;
158 salt
[2] = (ms_usage
>> 16) & 0xff;
159 salt
[3] = (ms_usage
>> 24) & 0xff;
165 make_checksum_hmac_md5(struct krb5_ctx
*kctx
, char *header
, int hdrlen
,
166 struct xdr_buf
*body
, int body_offset
, u8
*cksumkey
,
167 unsigned int usage
, struct xdr_netobj
*cksumout
)
169 struct scatterlist sg
[1];
173 struct crypto_ahash
*md5
;
174 struct crypto_ahash
*hmac_md5
;
175 struct ahash_request
*req
;
177 if (cksumkey
== NULL
)
178 return GSS_S_FAILURE
;
180 if (cksumout
->len
< kctx
->gk5e
->cksumlength
) {
181 dprintk("%s: checksum buffer length, %u, too small for %s\n",
182 __func__
, cksumout
->len
, kctx
->gk5e
->name
);
183 return GSS_S_FAILURE
;
186 rc4salt
= kmalloc_array(4, sizeof(*rc4salt
), GFP_NOFS
);
188 return GSS_S_FAILURE
;
190 if (arcfour_hmac_md5_usage_to_salt(usage
, rc4salt
)) {
191 dprintk("%s: invalid usage value %u\n", __func__
, usage
);
192 goto out_free_rc4salt
;
195 checksumdata
= kmalloc(GSS_KRB5_MAX_CKSUM_LEN
, GFP_NOFS
);
197 goto out_free_rc4salt
;
199 md5
= crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC
);
203 hmac_md5
= crypto_alloc_ahash(kctx
->gk5e
->cksum_name
, 0,
205 if (IS_ERR(hmac_md5
))
208 req
= ahash_request_alloc(md5
, GFP_NOFS
);
210 goto out_free_hmac_md5
;
212 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
, NULL
, NULL
);
214 err
= crypto_ahash_init(req
);
217 sg_init_one(sg
, rc4salt
, 4);
218 ahash_request_set_crypt(req
, sg
, NULL
, 4);
219 err
= crypto_ahash_update(req
);
223 sg_init_one(sg
, header
, hdrlen
);
224 ahash_request_set_crypt(req
, sg
, NULL
, hdrlen
);
225 err
= crypto_ahash_update(req
);
228 err
= xdr_process_buf(body
, body_offset
, body
->len
- body_offset
,
232 ahash_request_set_crypt(req
, NULL
, checksumdata
, 0);
233 err
= crypto_ahash_final(req
);
237 ahash_request_free(req
);
238 req
= ahash_request_alloc(hmac_md5
, GFP_NOFS
);
240 goto out_free_hmac_md5
;
242 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
, NULL
, NULL
);
244 err
= crypto_ahash_setkey(hmac_md5
, cksumkey
, kctx
->gk5e
->keylength
);
248 sg_init_one(sg
, checksumdata
, crypto_ahash_digestsize(md5
));
249 ahash_request_set_crypt(req
, sg
, checksumdata
,
250 crypto_ahash_digestsize(md5
));
251 err
= crypto_ahash_digest(req
);
255 memcpy(cksumout
->data
, checksumdata
, kctx
->gk5e
->cksumlength
);
256 cksumout
->len
= kctx
->gk5e
->cksumlength
;
258 ahash_request_free(req
);
260 crypto_free_ahash(hmac_md5
);
262 crypto_free_ahash(md5
);
267 return err
? GSS_S_FAILURE
: 0;
271 * checksum the plaintext data and hdrlen bytes of the token header
272 * The checksum is performed over the first 8 bytes of the
273 * gss token header and then over the data body
276 make_checksum(struct krb5_ctx
*kctx
, char *header
, int hdrlen
,
277 struct xdr_buf
*body
, int body_offset
, u8
*cksumkey
,
278 unsigned int usage
, struct xdr_netobj
*cksumout
)
280 struct crypto_ahash
*tfm
;
281 struct ahash_request
*req
;
282 struct scatterlist sg
[1];
285 unsigned int checksumlen
;
287 if (kctx
->gk5e
->ctype
== CKSUMTYPE_HMAC_MD5_ARCFOUR
)
288 return make_checksum_hmac_md5(kctx
, header
, hdrlen
,
290 cksumkey
, usage
, cksumout
);
292 if (cksumout
->len
< kctx
->gk5e
->cksumlength
) {
293 dprintk("%s: checksum buffer length, %u, too small for %s\n",
294 __func__
, cksumout
->len
, kctx
->gk5e
->name
);
295 return GSS_S_FAILURE
;
298 checksumdata
= kmalloc(GSS_KRB5_MAX_CKSUM_LEN
, GFP_NOFS
);
299 if (checksumdata
== NULL
)
300 return GSS_S_FAILURE
;
302 tfm
= crypto_alloc_ahash(kctx
->gk5e
->cksum_name
, 0, CRYPTO_ALG_ASYNC
);
306 req
= ahash_request_alloc(tfm
, GFP_NOFS
);
310 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
, NULL
, NULL
);
312 checksumlen
= crypto_ahash_digestsize(tfm
);
314 if (cksumkey
!= NULL
) {
315 err
= crypto_ahash_setkey(tfm
, cksumkey
,
316 kctx
->gk5e
->keylength
);
321 err
= crypto_ahash_init(req
);
324 sg_init_one(sg
, header
, hdrlen
);
325 ahash_request_set_crypt(req
, sg
, NULL
, hdrlen
);
326 err
= crypto_ahash_update(req
);
329 err
= xdr_process_buf(body
, body_offset
, body
->len
- body_offset
,
333 ahash_request_set_crypt(req
, NULL
, checksumdata
, 0);
334 err
= crypto_ahash_final(req
);
338 switch (kctx
->gk5e
->ctype
) {
339 case CKSUMTYPE_RSA_MD5
:
340 err
= kctx
->gk5e
->encrypt(kctx
->seq
, NULL
, checksumdata
,
341 checksumdata
, checksumlen
);
344 memcpy(cksumout
->data
,
345 checksumdata
+ checksumlen
- kctx
->gk5e
->cksumlength
,
346 kctx
->gk5e
->cksumlength
);
348 case CKSUMTYPE_HMAC_SHA1_DES3
:
349 memcpy(cksumout
->data
, checksumdata
, kctx
->gk5e
->cksumlength
);
355 cksumout
->len
= kctx
->gk5e
->cksumlength
;
357 ahash_request_free(req
);
359 crypto_free_ahash(tfm
);
362 return err
? GSS_S_FAILURE
: 0;
366 * checksum the plaintext data and hdrlen bytes of the token header
367 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
368 * body then over the first 16 octets of the MIC token
369 * Inclusion of the header data in the calculation of the
370 * checksum is optional.
373 make_checksum_v2(struct krb5_ctx
*kctx
, char *header
, int hdrlen
,
374 struct xdr_buf
*body
, int body_offset
, u8
*cksumkey
,
375 unsigned int usage
, struct xdr_netobj
*cksumout
)
377 struct crypto_ahash
*tfm
;
378 struct ahash_request
*req
;
379 struct scatterlist sg
[1];
383 if (kctx
->gk5e
->keyed_cksum
== 0) {
384 dprintk("%s: expected keyed hash for %s\n",
385 __func__
, kctx
->gk5e
->name
);
386 return GSS_S_FAILURE
;
388 if (cksumkey
== NULL
) {
389 dprintk("%s: no key supplied for %s\n",
390 __func__
, kctx
->gk5e
->name
);
391 return GSS_S_FAILURE
;
394 checksumdata
= kmalloc(GSS_KRB5_MAX_CKSUM_LEN
, GFP_NOFS
);
396 return GSS_S_FAILURE
;
398 tfm
= crypto_alloc_ahash(kctx
->gk5e
->cksum_name
, 0, CRYPTO_ALG_ASYNC
);
402 req
= ahash_request_alloc(tfm
, GFP_NOFS
);
406 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
, NULL
, NULL
);
408 err
= crypto_ahash_setkey(tfm
, cksumkey
, kctx
->gk5e
->keylength
);
412 err
= crypto_ahash_init(req
);
415 err
= xdr_process_buf(body
, body_offset
, body
->len
- body_offset
,
419 if (header
!= NULL
) {
420 sg_init_one(sg
, header
, hdrlen
);
421 ahash_request_set_crypt(req
, sg
, NULL
, hdrlen
);
422 err
= crypto_ahash_update(req
);
426 ahash_request_set_crypt(req
, NULL
, checksumdata
, 0);
427 err
= crypto_ahash_final(req
);
431 cksumout
->len
= kctx
->gk5e
->cksumlength
;
433 switch (kctx
->gk5e
->ctype
) {
434 case CKSUMTYPE_HMAC_SHA1_96_AES128
:
435 case CKSUMTYPE_HMAC_SHA1_96_AES256
:
436 /* note that this truncates the hash */
437 memcpy(cksumout
->data
, checksumdata
, kctx
->gk5e
->cksumlength
);
444 ahash_request_free(req
);
446 crypto_free_ahash(tfm
);
449 return err
? GSS_S_FAILURE
: 0;
452 struct encryptor_desc
{
453 u8 iv
[GSS_KRB5_MAX_BLOCKSIZE
];
454 struct skcipher_request
*req
;
456 struct xdr_buf
*outbuf
;
458 struct scatterlist infrags
[4];
459 struct scatterlist outfrags
[4];
465 encryptor(struct scatterlist
*sg
, void *data
)
467 struct encryptor_desc
*desc
= data
;
468 struct xdr_buf
*outbuf
= desc
->outbuf
;
469 struct crypto_sync_skcipher
*tfm
=
470 crypto_sync_skcipher_reqtfm(desc
->req
);
471 struct page
*in_page
;
472 int thislen
= desc
->fraglen
+ sg
->length
;
476 /* Worst case is 4 fragments: head, end of page 1, start
477 * of page 2, tail. Anything more is a bug. */
478 BUG_ON(desc
->fragno
> 3);
480 page_pos
= desc
->pos
- outbuf
->head
[0].iov_len
;
481 if (page_pos
>= 0 && page_pos
< outbuf
->page_len
) {
482 /* pages are not in place: */
483 int i
= (page_pos
+ outbuf
->page_base
) >> PAGE_SHIFT
;
484 in_page
= desc
->pages
[i
];
486 in_page
= sg_page(sg
);
488 sg_set_page(&desc
->infrags
[desc
->fragno
], in_page
, sg
->length
,
490 sg_set_page(&desc
->outfrags
[desc
->fragno
], sg_page(sg
), sg
->length
,
493 desc
->fraglen
+= sg
->length
;
494 desc
->pos
+= sg
->length
;
496 fraglen
= thislen
& (crypto_sync_skcipher_blocksize(tfm
) - 1);
502 sg_mark_end(&desc
->infrags
[desc
->fragno
- 1]);
503 sg_mark_end(&desc
->outfrags
[desc
->fragno
- 1]);
505 skcipher_request_set_crypt(desc
->req
, desc
->infrags
, desc
->outfrags
,
508 ret
= crypto_skcipher_encrypt(desc
->req
);
512 sg_init_table(desc
->infrags
, 4);
513 sg_init_table(desc
->outfrags
, 4);
516 sg_set_page(&desc
->outfrags
[0], sg_page(sg
), fraglen
,
517 sg
->offset
+ sg
->length
- fraglen
);
518 desc
->infrags
[0] = desc
->outfrags
[0];
519 sg_assign_page(&desc
->infrags
[0], in_page
);
521 desc
->fraglen
= fraglen
;
530 gss_encrypt_xdr_buf(struct crypto_sync_skcipher
*tfm
, struct xdr_buf
*buf
,
531 int offset
, struct page
**pages
)
534 struct encryptor_desc desc
;
535 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
537 BUG_ON((buf
->len
- offset
) % crypto_sync_skcipher_blocksize(tfm
) != 0);
539 skcipher_request_set_sync_tfm(req
, tfm
);
540 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
542 memset(desc
.iv
, 0, sizeof(desc
.iv
));
550 sg_init_table(desc
.infrags
, 4);
551 sg_init_table(desc
.outfrags
, 4);
553 ret
= xdr_process_buf(buf
, offset
, buf
->len
- offset
, encryptor
, &desc
);
554 skcipher_request_zero(req
);
558 struct decryptor_desc
{
559 u8 iv
[GSS_KRB5_MAX_BLOCKSIZE
];
560 struct skcipher_request
*req
;
561 struct scatterlist frags
[4];
567 decryptor(struct scatterlist
*sg
, void *data
)
569 struct decryptor_desc
*desc
= data
;
570 int thislen
= desc
->fraglen
+ sg
->length
;
571 struct crypto_sync_skcipher
*tfm
=
572 crypto_sync_skcipher_reqtfm(desc
->req
);
575 /* Worst case is 4 fragments: head, end of page 1, start
576 * of page 2, tail. Anything more is a bug. */
577 BUG_ON(desc
->fragno
> 3);
578 sg_set_page(&desc
->frags
[desc
->fragno
], sg_page(sg
), sg
->length
,
581 desc
->fraglen
+= sg
->length
;
583 fraglen
= thislen
& (crypto_sync_skcipher_blocksize(tfm
) - 1);
589 sg_mark_end(&desc
->frags
[desc
->fragno
- 1]);
591 skcipher_request_set_crypt(desc
->req
, desc
->frags
, desc
->frags
,
594 ret
= crypto_skcipher_decrypt(desc
->req
);
598 sg_init_table(desc
->frags
, 4);
601 sg_set_page(&desc
->frags
[0], sg_page(sg
), fraglen
,
602 sg
->offset
+ sg
->length
- fraglen
);
604 desc
->fraglen
= fraglen
;
613 gss_decrypt_xdr_buf(struct crypto_sync_skcipher
*tfm
, struct xdr_buf
*buf
,
617 struct decryptor_desc desc
;
618 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
621 BUG_ON((buf
->len
- offset
) % crypto_sync_skcipher_blocksize(tfm
) != 0);
623 skcipher_request_set_sync_tfm(req
, tfm
);
624 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
626 memset(desc
.iv
, 0, sizeof(desc
.iv
));
631 sg_init_table(desc
.frags
, 4);
633 ret
= xdr_process_buf(buf
, offset
, buf
->len
- offset
, decryptor
, &desc
);
634 skcipher_request_zero(req
);
639 * This function makes the assumption that it was ultimately called
642 * The client auth_gss code moves any existing tail data into a
643 * separate page before calling gss_wrap.
644 * The server svcauth_gss code ensures that both the head and the
645 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
647 * Even with that guarantee, this function may be called more than
648 * once in the processing of gss_wrap(). The best we can do is
649 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
650 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
651 * At run-time we can verify that a single invocation of this
652 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
656 xdr_extend_head(struct xdr_buf
*buf
, unsigned int base
, unsigned int shiftlen
)
663 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED
> RPC_MAX_AUTH_SIZE
);
664 BUG_ON(shiftlen
> RPC_MAX_AUTH_SIZE
);
666 p
= buf
->head
[0].iov_base
+ base
;
668 memmove(p
+ shiftlen
, p
, buf
->head
[0].iov_len
- base
);
670 buf
->head
[0].iov_len
+= shiftlen
;
671 buf
->len
+= shiftlen
;
677 gss_krb5_cts_crypt(struct crypto_sync_skcipher
*cipher
, struct xdr_buf
*buf
,
678 u32 offset
, u8
*iv
, struct page
**pages
, int encrypt
)
681 struct scatterlist sg
[1];
682 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, cipher
);
684 struct page
**save_pages
;
685 u32 len
= buf
->len
- offset
;
687 if (len
> GSS_KRB5_MAX_BLOCKSIZE
* 2) {
691 data
= kmalloc(GSS_KRB5_MAX_BLOCKSIZE
* 2, GFP_NOFS
);
696 * For encryption, we want to read from the cleartext
697 * page cache pages, and write the encrypted data to
698 * the supplied xdr_buf pages.
700 save_pages
= buf
->pages
;
704 ret
= read_bytes_from_xdr_buf(buf
, offset
, data
, len
);
705 buf
->pages
= save_pages
;
709 sg_init_one(sg
, data
, len
);
711 skcipher_request_set_sync_tfm(req
, cipher
);
712 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
713 skcipher_request_set_crypt(req
, sg
, sg
, len
, iv
);
716 ret
= crypto_skcipher_encrypt(req
);
718 ret
= crypto_skcipher_decrypt(req
);
720 skcipher_request_zero(req
);
725 ret
= write_bytes_to_xdr_buf(buf
, offset
, data
, len
);
733 gss_krb5_aes_encrypt(struct krb5_ctx
*kctx
, u32 offset
,
734 struct xdr_buf
*buf
, struct page
**pages
)
737 struct xdr_netobj hmac
;
740 struct crypto_sync_skcipher
*cipher
, *aux_cipher
;
742 struct page
**save_pages
;
744 struct encryptor_desc desc
;
748 if (kctx
->initiate
) {
749 cipher
= kctx
->initiator_enc
;
750 aux_cipher
= kctx
->initiator_enc_aux
;
751 cksumkey
= kctx
->initiator_integ
;
752 usage
= KG_USAGE_INITIATOR_SEAL
;
754 cipher
= kctx
->acceptor_enc
;
755 aux_cipher
= kctx
->acceptor_enc_aux
;
756 cksumkey
= kctx
->acceptor_integ
;
757 usage
= KG_USAGE_ACCEPTOR_SEAL
;
759 blocksize
= crypto_sync_skcipher_blocksize(cipher
);
761 /* hide the gss token header and insert the confounder */
762 offset
+= GSS_KRB5_TOK_HDR_LEN
;
763 if (xdr_extend_head(buf
, offset
, kctx
->gk5e
->conflen
))
764 return GSS_S_FAILURE
;
765 gss_krb5_make_confounder(buf
->head
[0].iov_base
+ offset
, kctx
->gk5e
->conflen
);
766 offset
-= GSS_KRB5_TOK_HDR_LEN
;
768 if (buf
->tail
[0].iov_base
!= NULL
) {
769 ecptr
= buf
->tail
[0].iov_base
+ buf
->tail
[0].iov_len
;
771 buf
->tail
[0].iov_base
= buf
->head
[0].iov_base
772 + buf
->head
[0].iov_len
;
773 buf
->tail
[0].iov_len
= 0;
774 ecptr
= buf
->tail
[0].iov_base
;
777 /* copy plaintext gss token header after filler (if any) */
778 memcpy(ecptr
, buf
->head
[0].iov_base
+ offset
, GSS_KRB5_TOK_HDR_LEN
);
779 buf
->tail
[0].iov_len
+= GSS_KRB5_TOK_HDR_LEN
;
780 buf
->len
+= GSS_KRB5_TOK_HDR_LEN
;
783 hmac
.len
= GSS_KRB5_MAX_CKSUM_LEN
;
784 hmac
.data
= buf
->tail
[0].iov_base
+ buf
->tail
[0].iov_len
;
787 * When we are called, pages points to the real page cache
788 * data -- which we can't go and encrypt! buf->pages points
789 * to scratch pages which we are going to send off to the
790 * client/server. Swap in the plaintext pages to calculate
793 save_pages
= buf
->pages
;
796 err
= make_checksum_v2(kctx
, NULL
, 0, buf
,
797 offset
+ GSS_KRB5_TOK_HDR_LEN
,
798 cksumkey
, usage
, &hmac
);
799 buf
->pages
= save_pages
;
801 return GSS_S_FAILURE
;
803 nbytes
= buf
->len
- offset
- GSS_KRB5_TOK_HDR_LEN
;
804 nblocks
= (nbytes
+ blocksize
- 1) / blocksize
;
807 cbcbytes
= (nblocks
- 2) * blocksize
;
809 memset(desc
.iv
, 0, sizeof(desc
.iv
));
812 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, aux_cipher
);
814 desc
.pos
= offset
+ GSS_KRB5_TOK_HDR_LEN
;
821 skcipher_request_set_sync_tfm(req
, aux_cipher
);
822 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
824 sg_init_table(desc
.infrags
, 4);
825 sg_init_table(desc
.outfrags
, 4);
827 err
= xdr_process_buf(buf
, offset
+ GSS_KRB5_TOK_HDR_LEN
,
828 cbcbytes
, encryptor
, &desc
);
829 skcipher_request_zero(req
);
834 /* Make sure IV carries forward from any CBC results. */
835 err
= gss_krb5_cts_crypt(cipher
, buf
,
836 offset
+ GSS_KRB5_TOK_HDR_LEN
+ cbcbytes
,
843 /* Now update buf to account for HMAC */
844 buf
->tail
[0].iov_len
+= kctx
->gk5e
->cksumlength
;
845 buf
->len
+= kctx
->gk5e
->cksumlength
;
854 gss_krb5_aes_decrypt(struct krb5_ctx
*kctx
, u32 offset
, struct xdr_buf
*buf
,
855 u32
*headskip
, u32
*tailskip
)
857 struct xdr_buf subbuf
;
860 struct crypto_sync_skcipher
*cipher
, *aux_cipher
;
861 struct xdr_netobj our_hmac_obj
;
862 u8 our_hmac
[GSS_KRB5_MAX_CKSUM_LEN
];
863 u8 pkt_hmac
[GSS_KRB5_MAX_CKSUM_LEN
];
864 int nblocks
, blocksize
, cbcbytes
;
865 struct decryptor_desc desc
;
868 if (kctx
->initiate
) {
869 cipher
= kctx
->acceptor_enc
;
870 aux_cipher
= kctx
->acceptor_enc_aux
;
871 cksum_key
= kctx
->acceptor_integ
;
872 usage
= KG_USAGE_ACCEPTOR_SEAL
;
874 cipher
= kctx
->initiator_enc
;
875 aux_cipher
= kctx
->initiator_enc_aux
;
876 cksum_key
= kctx
->initiator_integ
;
877 usage
= KG_USAGE_INITIATOR_SEAL
;
879 blocksize
= crypto_sync_skcipher_blocksize(cipher
);
882 /* create a segment skipping the header and leaving out the checksum */
883 xdr_buf_subsegment(buf
, &subbuf
, offset
+ GSS_KRB5_TOK_HDR_LEN
,
884 (buf
->len
- offset
- GSS_KRB5_TOK_HDR_LEN
-
885 kctx
->gk5e
->cksumlength
));
887 nblocks
= (subbuf
.len
+ blocksize
- 1) / blocksize
;
891 cbcbytes
= (nblocks
- 2) * blocksize
;
893 memset(desc
.iv
, 0, sizeof(desc
.iv
));
896 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, aux_cipher
);
902 skcipher_request_set_sync_tfm(req
, aux_cipher
);
903 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
905 sg_init_table(desc
.frags
, 4);
907 ret
= xdr_process_buf(&subbuf
, 0, cbcbytes
, decryptor
, &desc
);
908 skcipher_request_zero(req
);
913 /* Make sure IV carries forward from any CBC results. */
914 ret
= gss_krb5_cts_crypt(cipher
, &subbuf
, cbcbytes
, desc
.iv
, NULL
, 0);
919 /* Calculate our hmac over the plaintext data */
920 our_hmac_obj
.len
= sizeof(our_hmac
);
921 our_hmac_obj
.data
= our_hmac
;
923 ret
= make_checksum_v2(kctx
, NULL
, 0, &subbuf
, 0,
924 cksum_key
, usage
, &our_hmac_obj
);
928 /* Get the packet's hmac value */
929 ret
= read_bytes_from_xdr_buf(buf
, buf
->len
- kctx
->gk5e
->cksumlength
,
930 pkt_hmac
, kctx
->gk5e
->cksumlength
);
934 if (crypto_memneq(pkt_hmac
, our_hmac
, kctx
->gk5e
->cksumlength
) != 0) {
938 *headskip
= kctx
->gk5e
->conflen
;
939 *tailskip
= kctx
->gk5e
->cksumlength
;
941 if (ret
&& ret
!= GSS_S_BAD_SIG
)
947 * Compute Kseq given the initial session key and the checksum.
948 * Set the key of the given cipher.
951 krb5_rc4_setup_seq_key(struct krb5_ctx
*kctx
,
952 struct crypto_sync_skcipher
*cipher
,
953 unsigned char *cksum
)
955 struct crypto_shash
*hmac
;
956 struct shash_desc
*desc
;
957 u8 Kseq
[GSS_KRB5_MAX_KEYLEN
];
958 u32 zeroconstant
= 0;
961 dprintk("%s: entered\n", __func__
);
963 hmac
= crypto_alloc_shash(kctx
->gk5e
->cksum_name
, 0, 0);
965 dprintk("%s: error %ld, allocating hash '%s'\n",
966 __func__
, PTR_ERR(hmac
), kctx
->gk5e
->cksum_name
);
967 return PTR_ERR(hmac
);
970 desc
= kmalloc(sizeof(*desc
) + crypto_shash_descsize(hmac
),
973 dprintk("%s: failed to allocate shash descriptor for '%s'\n",
974 __func__
, kctx
->gk5e
->cksum_name
);
975 crypto_free_shash(hmac
);
982 /* Compute intermediate Kseq from session key */
983 err
= crypto_shash_setkey(hmac
, kctx
->Ksess
, kctx
->gk5e
->keylength
);
987 err
= crypto_shash_digest(desc
, (u8
*)&zeroconstant
, 4, Kseq
);
991 /* Compute final Kseq from the checksum and intermediate Kseq */
992 err
= crypto_shash_setkey(hmac
, Kseq
, kctx
->gk5e
->keylength
);
996 err
= crypto_shash_digest(desc
, cksum
, 8, Kseq
);
1000 err
= crypto_sync_skcipher_setkey(cipher
, Kseq
, kctx
->gk5e
->keylength
);
1008 crypto_free_shash(hmac
);
1009 dprintk("%s: returning %d\n", __func__
, err
);
1014 * Compute Kcrypt given the initial session key and the plaintext seqnum.
1015 * Set the key of cipher kctx->enc.
1018 krb5_rc4_setup_enc_key(struct krb5_ctx
*kctx
,
1019 struct crypto_sync_skcipher
*cipher
,
1022 struct crypto_shash
*hmac
;
1023 struct shash_desc
*desc
;
1024 u8 Kcrypt
[GSS_KRB5_MAX_KEYLEN
];
1025 u8 zeroconstant
[4] = {0};
1029 dprintk("%s: entered, seqnum %u\n", __func__
, seqnum
);
1031 hmac
= crypto_alloc_shash(kctx
->gk5e
->cksum_name
, 0, 0);
1033 dprintk("%s: error %ld, allocating hash '%s'\n",
1034 __func__
, PTR_ERR(hmac
), kctx
->gk5e
->cksum_name
);
1035 return PTR_ERR(hmac
);
1038 desc
= kmalloc(sizeof(*desc
) + crypto_shash_descsize(hmac
),
1041 dprintk("%s: failed to allocate shash descriptor for '%s'\n",
1042 __func__
, kctx
->gk5e
->cksum_name
);
1043 crypto_free_shash(hmac
);
1050 /* Compute intermediate Kcrypt from session key */
1051 for (i
= 0; i
< kctx
->gk5e
->keylength
; i
++)
1052 Kcrypt
[i
] = kctx
->Ksess
[i
] ^ 0xf0;
1054 err
= crypto_shash_setkey(hmac
, Kcrypt
, kctx
->gk5e
->keylength
);
1058 err
= crypto_shash_digest(desc
, zeroconstant
, 4, Kcrypt
);
1062 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
1063 err
= crypto_shash_setkey(hmac
, Kcrypt
, kctx
->gk5e
->keylength
);
1067 seqnumarray
[0] = (unsigned char) ((seqnum
>> 24) & 0xff);
1068 seqnumarray
[1] = (unsigned char) ((seqnum
>> 16) & 0xff);
1069 seqnumarray
[2] = (unsigned char) ((seqnum
>> 8) & 0xff);
1070 seqnumarray
[3] = (unsigned char) ((seqnum
>> 0) & 0xff);
1072 err
= crypto_shash_digest(desc
, seqnumarray
, 4, Kcrypt
);
1076 err
= crypto_sync_skcipher_setkey(cipher
, Kcrypt
,
1077 kctx
->gk5e
->keylength
);
1085 crypto_free_shash(hmac
);
1086 dprintk("%s: returning %d\n", __func__
, err
);