2 * Copyright (c) 1997 - 2008 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Institute nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include "krb5_locl.h"
37 _krb5_evp_schedule(krb5_context context
,
38 struct _krb5_key_type
*kt
,
39 struct _krb5_key_data
*kd
)
41 struct _krb5_evp_schedule
*key
= kd
->schedule
->data
;
42 const EVP_CIPHER
*c
= (*kt
->evp
)();
44 EVP_CIPHER_CTX_init(&key
->ectx
);
45 EVP_CIPHER_CTX_init(&key
->dctx
);
47 EVP_CipherInit_ex(&key
->ectx
, c
, NULL
, kd
->key
->keyvalue
.data
, NULL
, 1);
48 EVP_CipherInit_ex(&key
->dctx
, c
, NULL
, kd
->key
->keyvalue
.data
, NULL
, 0);
52 _krb5_evp_cleanup(krb5_context context
, struct _krb5_key_data
*kd
)
54 struct _krb5_evp_schedule
*key
= kd
->schedule
->data
;
55 EVP_CIPHER_CTX_cleanup(&key
->ectx
);
56 EVP_CIPHER_CTX_cleanup(&key
->dctx
);
60 _krb5_evp_digest_iov(krb5_crypto crypto
,
61 const struct krb5_crypto_iov
*iov
,
70 krb5_data current
= {0,0};
73 if (crypto
->mdctx
== NULL
)
74 crypto
->mdctx
= EVP_MD_CTX_create();
75 if (crypto
->mdctx
== NULL
)
79 ctx
= EVP_MD_CTX_create();
81 ret
= EVP_DigestInit_ex(ctx
, md
, engine
);
85 /* Minimize EVP calls by coalescing contiguous iovec elements */
86 for (i
= 0; i
< niov
; i
++) {
87 if (_krb5_crypto_iov_should_sign(&iov
[i
])) {
89 (char *)current
.data
+ current
.length
== iov
[i
].data
.data
) {
90 current
.length
+= iov
[i
].data
.length
;
93 ret
= EVP_DigestUpdate(ctx
, current
.data
, current
.length
);
97 current
= iov
[i
].data
;
103 ret
= EVP_DigestUpdate(ctx
, current
.data
, current
.length
);
108 ret
= EVP_DigestFinal_ex(ctx
, hash
, hsize
);
112 EVP_MD_CTX_destroy(ctx
);
118 _krb5_evp_hmac_iov(krb5_context context
,
120 struct _krb5_key_data
*key
,
121 const struct krb5_crypto_iov
*iov
,
124 unsigned int *hmaclen
,
129 krb5_data current
= {0, NULL
};
132 if (crypto
!= NULL
) {
133 if (crypto
->hmacctx
== NULL
)
134 crypto
->hmacctx
= HMAC_CTX_new();
135 ctx
= crypto
->hmacctx
;
137 ctx
= HMAC_CTX_new();
140 return krb5_enomem(context
);
142 if (HMAC_Init_ex(ctx
, key
->key
->keyvalue
.data
, key
->key
->keyvalue
.length
,
145 return krb5_enomem(context
);
148 for (i
= 0; i
< niov
; i
++) {
149 if (_krb5_crypto_iov_should_sign(&iov
[i
])) {
151 (char *)current
.data
+ current
.length
== iov
[i
].data
.data
) {
152 current
.length
+= iov
[i
].data
.length
;
155 HMAC_Update(ctx
, current
.data
, current
.length
);
156 current
= iov
[i
].data
;
162 HMAC_Update(ctx
, current
.data
, current
.length
);
164 HMAC_Final(ctx
, hmac
, hmaclen
);
173 _krb5_evp_encrypt(krb5_context context
,
174 struct _krb5_key_data
*key
,
177 krb5_boolean encryptp
,
181 struct _krb5_evp_schedule
*ctx
= key
->schedule
->data
;
183 c
= encryptp
? &ctx
->ectx
: &ctx
->dctx
;
186 size_t len2
= EVP_CIPHER_CTX_iv_length(c
);
187 void *loiv
= malloc(len2
);
189 return krb5_enomem(context
);
190 memset(loiv
, 0, len2
);
191 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, loiv
, -1);
194 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, ivec
, -1);
195 EVP_Cipher(c
, data
, data
, len
);
199 struct _krb5_evp_iov_cursor
201 struct krb5_crypto_iov
*iov
;
207 static const unsigned char zero_ivec
[EVP_MAX_BLOCK_LENGTH
] = { 0 };
210 _krb5_evp_iov_should_encrypt(struct krb5_crypto_iov
*iov
)
212 return (iov
->flags
== KRB5_CRYPTO_TYPE_DATA
213 || iov
->flags
== KRB5_CRYPTO_TYPE_HEADER
214 || iov
->flags
== KRB5_CRYPTO_TYPE_PADDING
);
217 * If we have a group of iovecs which have been split up from
218 * a single common buffer, expand the 'current' iovec out to
219 * be as large as possible.
223 _krb5_evp_iov_cursor_expand(struct _krb5_evp_iov_cursor
*cursor
)
225 if (cursor
->nextidx
== cursor
->niov
)
228 while (_krb5_evp_iov_should_encrypt(&cursor
->iov
[cursor
->nextidx
])) {
229 if (cursor
->iov
[cursor
->nextidx
].data
.length
!= 0 &&
230 ((char *)cursor
->current
.data
+ cursor
->current
.length
231 != cursor
->iov
[cursor
->nextidx
].data
.data
)) {
234 cursor
->current
.length
+= cursor
->iov
[cursor
->nextidx
].data
.length
;
241 /* Move the cursor along to the start of the next block to be
244 _krb5_evp_iov_cursor_nextcrypt(struct _krb5_evp_iov_cursor
*cursor
)
246 for (; cursor
->nextidx
< cursor
->niov
; cursor
->nextidx
++) {
247 if (_krb5_evp_iov_should_encrypt(&cursor
->iov
[cursor
->nextidx
])
248 && cursor
->iov
[cursor
->nextidx
].data
.length
!= 0) {
249 cursor
->current
= cursor
->iov
[cursor
->nextidx
].data
;
251 _krb5_evp_iov_cursor_expand(cursor
);
256 cursor
->current
.length
= 0; /* No matches, so we're done here */
260 _krb5_evp_iov_cursor_init(struct _krb5_evp_iov_cursor
*cursor
,
261 struct krb5_crypto_iov
*iov
, int niov
)
263 memset(cursor
, 0, sizeof(struct _krb5_evp_iov_cursor
));
269 /* Move along to the first block we're going to be encrypting */
270 _krb5_evp_iov_cursor_nextcrypt(cursor
);
274 _krb5_evp_iov_cursor_advance(struct _krb5_evp_iov_cursor
*cursor
,
278 if (cursor
->current
.length
> amount
) {
279 cursor
->current
.data
= (char *)cursor
->current
.data
+ amount
;
280 cursor
->current
.length
-= amount
;
283 amount
-= cursor
->current
.length
;
284 _krb5_evp_iov_cursor_nextcrypt(cursor
);
289 _krb5_evp_iov_cursor_done(struct _krb5_evp_iov_cursor
*cursor
)
291 return (cursor
->nextidx
== cursor
->niov
&& cursor
->current
.length
== 0);
294 /* Fill a memory buffer with data from one or more iovecs. Doesn't
295 * advance the passed in cursor - use outcursor for the position
299 _krb5_evp_iov_cursor_fillbuf(struct _krb5_evp_iov_cursor
*cursor
,
300 unsigned char *buf
, size_t length
,
301 struct _krb5_evp_iov_cursor
*outcursor
)
303 struct _krb5_evp_iov_cursor cursorint
;
307 while (length
> 0 && !_krb5_evp_iov_cursor_done(&cursorint
)) {
308 if (cursorint
.current
.length
> length
) {
309 memcpy(buf
, cursorint
.current
.data
, length
);
310 _krb5_evp_iov_cursor_advance(&cursorint
, length
);
313 memcpy(buf
, cursorint
.current
.data
, cursorint
.current
.length
);
314 length
-= cursorint
.current
.length
;
315 buf
+= cursorint
.current
.length
;
316 _krb5_evp_iov_cursor_nextcrypt(&cursorint
);
320 if (outcursor
!= NULL
)
321 *outcursor
= cursorint
;
324 /* Fill an iovec from a memory buffer. Always advances the cursor to
325 * the end of the filled region
328 _krb5_evp_iov_cursor_fillvec(struct _krb5_evp_iov_cursor
*cursor
,
329 unsigned char *buf
, size_t length
)
331 while (length
> 0 && !_krb5_evp_iov_cursor_done(cursor
)) {
332 if (cursor
->current
.length
> length
) {
333 memcpy(cursor
->current
.data
, buf
, length
);
334 _krb5_evp_iov_cursor_advance(cursor
, length
);
337 memcpy(cursor
->current
.data
, buf
, cursor
->current
.length
);
338 length
-= cursor
->current
.length
;
339 buf
+= cursor
->current
.length
;
340 _krb5_evp_iov_cursor_nextcrypt(cursor
);
346 _krb5_evp_iov_cryptlength(struct krb5_crypto_iov
*iov
, int niov
)
351 for (i
= 0; i
< niov
; i
++) {
352 if (_krb5_evp_iov_should_encrypt(&iov
[i
]))
353 length
+= iov
[i
].data
.length
;
360 _krb5_evp_encrypt_iov(krb5_context context
,
361 struct _krb5_key_data
*key
,
362 struct krb5_crypto_iov
*iov
,
364 krb5_boolean encryptp
,
368 size_t blocksize
, blockmask
, wholeblocks
;
369 struct _krb5_evp_schedule
*ctx
= key
->schedule
->data
;
370 unsigned char tmp
[EVP_MAX_BLOCK_LENGTH
];
372 struct _krb5_evp_iov_cursor cursor
;
374 c
= encryptp
? &ctx
->ectx
: &ctx
->dctx
;
376 blocksize
= EVP_CIPHER_CTX_block_size(c
);
378 blockmask
= ~(blocksize
- 1);
381 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, ivec
, -1);
383 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
385 _krb5_evp_iov_cursor_init(&cursor
, iov
, niov
);
387 while (!_krb5_evp_iov_cursor_done(&cursor
)) {
389 /* Number of bytes of data in this iovec that are in whole blocks */
390 wholeblocks
= cursor
.current
.length
& ~blockmask
;
392 if (wholeblocks
!= 0) {
393 EVP_Cipher(c
, cursor
.current
.data
,
394 cursor
.current
.data
, wholeblocks
);
395 _krb5_evp_iov_cursor_advance(&cursor
, wholeblocks
);
398 /* If there's a partial block of data remaining in the current
399 * iovec, steal enough from subsequent iovecs to form a whole block */
400 if (cursor
.current
.length
> 0 && cursor
.current
.length
< blocksize
) {
401 /* Build up a block's worth of data in tmp, leaving the cursor
402 * pointing at where we started */
403 _krb5_evp_iov_cursor_fillbuf(&cursor
, tmp
, blocksize
, NULL
);
405 EVP_Cipher(c
, tmp
, tmp
, blocksize
);
407 /* Copy the data in tmp back into the iovecs that it came from,
408 * advancing the cursor */
409 _krb5_evp_iov_cursor_fillvec(&cursor
, tmp
, blocksize
);
417 _krb5_evp_encrypt_iov_cts(krb5_context context
,
418 struct _krb5_key_data
*key
,
419 struct krb5_crypto_iov
*iov
,
421 krb5_boolean encryptp
,
425 size_t blocksize
, blockmask
, wholeblocks
, length
;
426 size_t remaining
, partiallen
;
427 struct _krb5_evp_iov_cursor cursor
, lastpos
;
428 struct _krb5_evp_schedule
*ctx
= key
->schedule
->data
;
429 unsigned char tmp
[EVP_MAX_BLOCK_LENGTH
], tmp2
[EVP_MAX_BLOCK_LENGTH
];
430 unsigned char tmp3
[EVP_MAX_BLOCK_LENGTH
], ivec2
[EVP_MAX_BLOCK_LENGTH
];
434 c
= encryptp
? &ctx
->ectx
: &ctx
->dctx
;
436 blocksize
= EVP_CIPHER_CTX_block_size(c
);
437 blockmask
= ~(blocksize
- 1);
439 length
= _krb5_evp_iov_cryptlength(iov
, niov
);
441 if (length
< blocksize
) {
442 krb5_set_error_message(context
, EINVAL
,
443 "message block too short");
447 if (length
== blocksize
)
448 return _krb5_evp_encrypt_iov(context
, key
, iov
, niov
,
449 encryptp
, usage
, ivec
);
452 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, ivec
, -1);
454 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
457 /* On our first pass, we want to process everything but the
458 * final partial block */
459 remaining
= ((length
- 1) & blockmask
);
460 partiallen
= length
- remaining
;
462 memset(&lastpos
, 0, sizeof(lastpos
)); /* Keep the compiler happy */
464 /* Decryption needs to leave 2 whole blocks and a partial for
465 * further processing */
466 if (length
> 2 * blocksize
) {
467 remaining
= (((length
- 1) / blocksize
) * blocksize
) - (blocksize
*2);
468 partiallen
= length
- remaining
- (blocksize
* 2);
471 partiallen
= length
- blocksize
;
475 _krb5_evp_iov_cursor_init(&cursor
, iov
, niov
);
476 while (remaining
> 0) {
477 /* If the iovec has more data than we need, just use it */
478 if (cursor
.current
.length
>= remaining
) {
479 EVP_Cipher(c
, cursor
.current
.data
, cursor
.current
.data
, remaining
);
482 /* We've just encrypted the last block of data. Make a copy
483 * of it (and its location) for the CTS dance, below */
485 _krb5_evp_iov_cursor_advance(&lastpos
, remaining
- blocksize
);
486 memcpy(ivec2
, lastpos
.current
.data
, blocksize
);
489 _krb5_evp_iov_cursor_advance(&cursor
, remaining
);
492 /* Use as much as we can, firstly all of the whole blocks */
493 wholeblocks
= cursor
.current
.length
& blockmask
;
495 if (wholeblocks
> 0) {
496 EVP_Cipher(c
, cursor
.current
.data
, cursor
.current
.data
,
498 _krb5_evp_iov_cursor_advance(&cursor
, wholeblocks
);
499 remaining
-= wholeblocks
;
502 /* Then, if we have partial data left, steal enough from subsequent
503 * iovecs to make a whole block */
504 if (cursor
.current
.length
> 0 && cursor
.current
.length
< blocksize
) {
505 if (encryptp
&& remaining
== blocksize
)
508 _krb5_evp_iov_cursor_fillbuf(&cursor
, ivec2
, blocksize
, NULL
);
509 EVP_Cipher(c
, ivec2
, ivec2
, blocksize
);
510 _krb5_evp_iov_cursor_fillvec(&cursor
, ivec2
, blocksize
);
512 remaining
-= blocksize
;
519 /* Copy the partial block into tmp */
520 _krb5_evp_iov_cursor_fillbuf(&cursor
, tmp
, partiallen
, NULL
);
522 /* XOR the final partial block with ivec2 */
523 for (i
= 0; i
< partiallen
; i
++)
524 tmp
[i
] = tmp
[i
] ^ ivec2
[i
];
525 for (; i
< blocksize
; i
++)
526 tmp
[i
] = 0 ^ ivec2
[i
]; /* XOR 0s if partial block exhausted */
528 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
529 EVP_Cipher(c
, tmp
, tmp
, blocksize
);
531 _krb5_evp_iov_cursor_fillvec(&lastpos
, tmp
, blocksize
);
532 _krb5_evp_iov_cursor_fillvec(&cursor
, ivec2
, partiallen
);
535 memcpy(ivec
, tmp
, blocksize
);
542 /* Make a copy of the 2nd last full ciphertext block in ivec2 before
543 * decrypting it. If no such block exists, use ivec or zero_ivec */
544 if (length
<= blocksize
* 2) {
546 memcpy(ivec2
, ivec
, blocksize
);
548 memcpy(ivec2
, zero_ivec
, blocksize
);
550 _krb5_evp_iov_cursor_fillbuf(&cursor
, ivec2
, blocksize
, NULL
);
551 EVP_Cipher(c
, tmp
, ivec2
, blocksize
);
552 _krb5_evp_iov_cursor_fillvec(&cursor
, tmp
, blocksize
);
555 lastpos
= cursor
; /* Remember where the last block is */
556 _krb5_evp_iov_cursor_fillbuf(&cursor
, tmp
, blocksize
, &cursor
);
557 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
558 EVP_Cipher(c
, tmp2
, tmp
, blocksize
); /* tmp eventually becomes output ivec */
560 _krb5_evp_iov_cursor_fillbuf(&cursor
, tmp3
, partiallen
, NULL
);
562 memcpy(tmp3
+ partiallen
, tmp2
+ partiallen
, blocksize
- partiallen
); /* xor 0 */
563 for (i
= 0; i
< partiallen
; i
++)
564 tmp2
[i
] = tmp2
[i
] ^ tmp3
[i
];
566 _krb5_evp_iov_cursor_fillvec(&cursor
, tmp2
, partiallen
);
568 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
569 EVP_Cipher(c
, tmp3
, tmp3
, blocksize
);
571 for (i
= 0; i
< blocksize
; i
++)
574 _krb5_evp_iov_cursor_fillvec(&lastpos
, tmp3
, blocksize
);
577 memcpy(ivec
, tmp
, blocksize
);
583 _krb5_evp_encrypt_cts(krb5_context context
,
584 struct _krb5_key_data
*key
,
587 krb5_boolean encryptp
,
592 struct _krb5_evp_schedule
*ctx
= key
->schedule
->data
;
593 unsigned char tmp
[EVP_MAX_BLOCK_LENGTH
], ivec2
[EVP_MAX_BLOCK_LENGTH
];
597 c
= encryptp
? &ctx
->ectx
: &ctx
->dctx
;
599 blocksize
= EVP_CIPHER_CTX_block_size(c
);
601 if (len
< blocksize
) {
602 krb5_set_error_message(context
, EINVAL
,
603 "message block too short");
605 } else if (len
== blocksize
) {
606 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
607 EVP_Cipher(c
, data
, data
, len
);
612 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, ivec
, -1);
614 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
619 i
= ((len
- 1) / blocksize
) * blocksize
;
620 EVP_Cipher(c
, p
, p
, i
);
623 memcpy(ivec2
, p
, blocksize
);
625 for (i
= 0; i
< len
; i
++)
626 tmp
[i
] = p
[i
+ blocksize
] ^ ivec2
[i
];
627 for (; i
< blocksize
; i
++)
628 tmp
[i
] = 0 ^ ivec2
[i
];
630 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
631 EVP_Cipher(c
, p
, tmp
, blocksize
);
633 memcpy(p
+ blocksize
, ivec2
, len
);
635 memcpy(ivec
, p
, blocksize
);
637 unsigned char tmp2
[EVP_MAX_BLOCK_LENGTH
], tmp3
[EVP_MAX_BLOCK_LENGTH
];
640 if (len
> blocksize
* 2) {
641 /* remove last two blocks and round up, decrypt this with cbc, then do cts dance */
642 i
= ((((len
- blocksize
* 2) + blocksize
- 1) / blocksize
) * blocksize
);
643 memcpy(ivec2
, p
+ i
- blocksize
, blocksize
);
644 EVP_Cipher(c
, p
, p
, i
);
646 len
-= i
+ blocksize
;
649 memcpy(ivec2
, ivec
, blocksize
);
651 memcpy(ivec2
, zero_ivec
, blocksize
);
655 memcpy(tmp
, p
, blocksize
);
656 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
657 EVP_Cipher(c
, tmp2
, p
, blocksize
);
659 memcpy(tmp3
, p
+ blocksize
, len
);
660 memcpy(tmp3
+ len
, tmp2
+ len
, blocksize
- len
); /* xor 0 */
662 for (i
= 0; i
< len
; i
++)
663 p
[i
+ blocksize
] = tmp2
[i
] ^ tmp3
[i
];
665 EVP_CipherInit_ex(c
, NULL
, NULL
, NULL
, zero_ivec
, -1);
666 EVP_Cipher(c
, p
, tmp3
, blocksize
);
668 for (i
= 0; i
< blocksize
; i
++)
671 memcpy(ivec
, tmp
, blocksize
);