4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 #include <security/cryptoki.h>
34 #include <sys/types.h>
36 #include <modes/modes.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/impl.h>
39 #include <sys/byteorder.h>
44 #include <sys/cpuvar.h> /* cpu_t, CPU */
45 #include <sys/x86_archext.h> /* x86_featureset, X86FSET_*, CPUID_* */
46 #include <sys/disp.h> /* kpreempt_disable(), kpreempt_enable */
47 /* Workaround for no XMM kernel thread save/restore */
48 #define KPREEMPT_DISABLE kpreempt_disable()
49 #define KPREEMPT_ENABLE kpreempt_enable()
52 #include <sys/auxv.h> /* getisax() */
53 #include <sys/auxv_386.h> /* AV_386_PCLMULQDQ bit */
54 #define KPREEMPT_DISABLE
55 #define KPREEMPT_ENABLE
58 extern void gcm_mul_pclmulqdq(uint64_t *x_in
, uint64_t *y
, uint64_t *res
);
59 static int intel_pclmulqdq_instruction_present(void);
70 * Perform a carry-less multiplication (that is, use XOR instead of the
71 * multiply operator) on *x_in and *y and place the result in *res.
73 * Byte swap the input (*x_in and *y) and the output (*res).
75 * Note: x_in, y, and res all point to 16-byte numbers (an array of two
79 gcm_mul(uint64_t *x_in
, uint64_t *y
, uint64_t *res
)
82 if (intel_pclmulqdq_instruction_present()) {
84 gcm_mul_pclmulqdq(x_in
, y
, res
);
89 static const uint64_t R
= 0xe100000000000000ULL
;
90 struct aes_block z
= {0, 0};
98 for (j
= 0; j
< 2; j
++) {
100 for (i
= 0; i
< 64; i
++, x
<<= 1) {
101 if (x
& 0x8000000000000000ULL
) {
106 v
.b
= (v
.a
<< 63)|(v
.b
>> 1);
107 v
.a
= (v
.a
>> 1) ^ R
;
109 v
.b
= (v
.a
<< 63)|(v
.b
>> 1);
114 res
[0] = htonll(z
.a
);
115 res
[1] = htonll(z
.b
);
120 #define GHASH(c, d, t) \
121 xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
122 gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
123 (uint64_t *)(void *)(t));
127 * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
128 * is done in another function.
131 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t
*ctx
, char *data
, size_t length
,
132 crypto_data_t
*out
, size_t block_size
,
133 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
134 void (*copy_block
)(uint8_t *, uint8_t *),
135 void (*xor_block
)(uint8_t *, uint8_t *))
137 size_t remainder
= length
;
139 uint8_t *datap
= (uint8_t *)data
;
146 size_t out_data_1_len
;
148 uint64_t counter_mask
= ntohll(0x00000000ffffffffULL
);
150 if (length
+ ctx
->gcm_remainder_len
< block_size
) {
151 /* accumulate bytes here and return */
153 (uint8_t *)ctx
->gcm_remainder
+ ctx
->gcm_remainder_len
,
155 ctx
->gcm_remainder_len
+= length
;
156 ctx
->gcm_copy_to
= datap
;
157 return (CRYPTO_SUCCESS
);
160 lastp
= (uint8_t *)ctx
->gcm_cb
;
162 crypto_init_ptrs(out
, &iov_or_mp
, &offset
);
165 /* Unprocessed data from last call. */
166 if (ctx
->gcm_remainder_len
> 0) {
167 need
= block_size
- ctx
->gcm_remainder_len
;
169 if (need
> remainder
)
170 return (CRYPTO_DATA_LEN_RANGE
);
172 bcopy(datap
, &((uint8_t *)ctx
->gcm_remainder
)
173 [ctx
->gcm_remainder_len
], need
);
175 blockp
= (uint8_t *)ctx
->gcm_remainder
;
181 * Increment counter. Counter bits are confined
182 * to the bottom 32 bits of the counter block.
184 counter
= ntohll(ctx
->gcm_cb
[1] & counter_mask
);
185 counter
= htonll(counter
+ 1);
186 counter
&= counter_mask
;
187 ctx
->gcm_cb
[1] = (ctx
->gcm_cb
[1] & ~counter_mask
) | counter
;
189 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_cb
,
190 (uint8_t *)ctx
->gcm_tmp
);
191 xor_block(blockp
, (uint8_t *)ctx
->gcm_tmp
);
193 lastp
= (uint8_t *)ctx
->gcm_tmp
;
195 ctx
->gcm_processed_data_len
+= block_size
;
198 if (ctx
->gcm_remainder_len
> 0) {
199 bcopy(blockp
, ctx
->gcm_copy_to
,
200 ctx
->gcm_remainder_len
);
201 bcopy(blockp
+ ctx
->gcm_remainder_len
, datap
,
205 crypto_get_ptrs(out
, &iov_or_mp
, &offset
, &out_data_1
,
206 &out_data_1_len
, &out_data_2
, block_size
);
208 /* copy block to where it belongs */
209 if (out_data_1_len
== block_size
) {
210 copy_block(lastp
, out_data_1
);
212 bcopy(lastp
, out_data_1
, out_data_1_len
);
213 if (out_data_2
!= NULL
) {
214 bcopy(lastp
+ out_data_1_len
,
216 block_size
- out_data_1_len
);
220 out
->cd_offset
+= block_size
;
223 /* add ciphertext to the hash */
224 GHASH(ctx
, ctx
->gcm_tmp
, ctx
->gcm_ghash
);
226 /* Update pointer to next block of data to be processed. */
227 if (ctx
->gcm_remainder_len
!= 0) {
229 ctx
->gcm_remainder_len
= 0;
234 remainder
= (size_t)&data
[length
] - (size_t)datap
;
236 /* Incomplete last block. */
237 if (remainder
> 0 && remainder
< block_size
) {
238 bcopy(datap
, ctx
->gcm_remainder
, remainder
);
239 ctx
->gcm_remainder_len
= remainder
;
240 ctx
->gcm_copy_to
= datap
;
243 ctx
->gcm_copy_to
= NULL
;
245 } while (remainder
> 0);
247 return (CRYPTO_SUCCESS
);
252 gcm_encrypt_final(gcm_ctx_t
*ctx
, crypto_data_t
*out
, size_t block_size
,
253 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
254 void (*copy_block
)(uint8_t *, uint8_t *),
255 void (*xor_block
)(uint8_t *, uint8_t *))
257 uint64_t counter_mask
= ntohll(0x00000000ffffffffULL
);
258 uint8_t *ghash
, *macp
;
262 (ctx
->gcm_remainder_len
+ ctx
->gcm_tag_len
)) {
263 return (CRYPTO_DATA_LEN_RANGE
);
266 ghash
= (uint8_t *)ctx
->gcm_ghash
;
268 if (ctx
->gcm_remainder_len
> 0) {
270 uint8_t *tmpp
= (uint8_t *)ctx
->gcm_tmp
;
273 * Here is where we deal with data that is not a
274 * multiple of the block size.
280 counter
= ntohll(ctx
->gcm_cb
[1] & counter_mask
);
281 counter
= htonll(counter
+ 1);
282 counter
&= counter_mask
;
283 ctx
->gcm_cb
[1] = (ctx
->gcm_cb
[1] & ~counter_mask
) | counter
;
285 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_cb
,
286 (uint8_t *)ctx
->gcm_tmp
);
288 macp
= (uint8_t *)ctx
->gcm_remainder
;
289 bzero(macp
+ ctx
->gcm_remainder_len
,
290 block_size
- ctx
->gcm_remainder_len
);
292 /* XOR with counter block */
293 for (i
= 0; i
< ctx
->gcm_remainder_len
; i
++) {
297 /* add ciphertext to the hash */
298 GHASH(ctx
, macp
, ghash
);
300 ctx
->gcm_processed_data_len
+= ctx
->gcm_remainder_len
;
303 ctx
->gcm_len_a_len_c
[1] =
304 htonll(CRYPTO_BYTES2BITS(ctx
->gcm_processed_data_len
));
305 GHASH(ctx
, ctx
->gcm_len_a_len_c
, ghash
);
306 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_J0
,
307 (uint8_t *)ctx
->gcm_J0
);
308 xor_block((uint8_t *)ctx
->gcm_J0
, ghash
);
310 if (ctx
->gcm_remainder_len
> 0) {
311 rv
= crypto_put_output_data(macp
, out
, ctx
->gcm_remainder_len
);
312 if (rv
!= CRYPTO_SUCCESS
)
315 out
->cd_offset
+= ctx
->gcm_remainder_len
;
316 ctx
->gcm_remainder_len
= 0;
317 rv
= crypto_put_output_data(ghash
, out
, ctx
->gcm_tag_len
);
318 if (rv
!= CRYPTO_SUCCESS
)
320 out
->cd_offset
+= ctx
->gcm_tag_len
;
322 return (CRYPTO_SUCCESS
);
326 * This will only deal with decrypting the last block of the input that
327 * might not be a multiple of block length.
330 gcm_decrypt_incomplete_block(gcm_ctx_t
*ctx
, size_t block_size
, size_t index
,
331 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
332 void (*xor_block
)(uint8_t *, uint8_t *))
334 uint8_t *datap
, *outp
, *counterp
;
336 uint64_t counter_mask
= ntohll(0x00000000ffffffffULL
);
341 * Counter bits are confined to the bottom 32 bits
343 counter
= ntohll(ctx
->gcm_cb
[1] & counter_mask
);
344 counter
= htonll(counter
+ 1);
345 counter
&= counter_mask
;
346 ctx
->gcm_cb
[1] = (ctx
->gcm_cb
[1] & ~counter_mask
) | counter
;
348 datap
= (uint8_t *)ctx
->gcm_remainder
;
349 outp
= &((ctx
->gcm_pt_buf
)[index
]);
350 counterp
= (uint8_t *)ctx
->gcm_tmp
;
352 /* authentication tag */
353 bzero((uint8_t *)ctx
->gcm_tmp
, block_size
);
354 bcopy(datap
, (uint8_t *)ctx
->gcm_tmp
, ctx
->gcm_remainder_len
);
356 /* add ciphertext to the hash */
357 GHASH(ctx
, ctx
->gcm_tmp
, ctx
->gcm_ghash
);
359 /* decrypt remaining ciphertext */
360 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_cb
, counterp
);
362 /* XOR with counter block */
363 for (i
= 0; i
< ctx
->gcm_remainder_len
; i
++) {
364 outp
[i
] = datap
[i
] ^ counterp
[i
];
370 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t
*ctx
, char *data
, size_t length
,
371 crypto_data_t
*out
, size_t block_size
,
372 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
373 void (*copy_block
)(uint8_t *, uint8_t *),
374 void (*xor_block
)(uint8_t *, uint8_t *))
380 * Copy contiguous ciphertext input blocks to plaintext buffer.
381 * Ciphertext will be decrypted in the final.
384 new_len
= ctx
->gcm_pt_buf_len
+ length
;
386 new = kmem_alloc(new_len
, ctx
->gcm_kmflag
);
387 bcopy(ctx
->gcm_pt_buf
, new, ctx
->gcm_pt_buf_len
);
388 kmem_free(ctx
->gcm_pt_buf
, ctx
->gcm_pt_buf_len
);
390 new = malloc(new_len
);
391 bcopy(ctx
->gcm_pt_buf
, new, ctx
->gcm_pt_buf_len
);
392 free(ctx
->gcm_pt_buf
);
395 return (CRYPTO_HOST_MEMORY
);
397 ctx
->gcm_pt_buf
= new;
398 ctx
->gcm_pt_buf_len
= new_len
;
399 bcopy(data
, &ctx
->gcm_pt_buf
[ctx
->gcm_processed_data_len
],
401 ctx
->gcm_processed_data_len
+= length
;
404 ctx
->gcm_remainder_len
= 0;
405 return (CRYPTO_SUCCESS
);
409 gcm_decrypt_final(gcm_ctx_t
*ctx
, crypto_data_t
*out
, size_t block_size
,
410 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
411 void (*xor_block
)(uint8_t *, uint8_t *))
419 uint64_t counter_mask
= ntohll(0x00000000ffffffffULL
);
420 int processed
= 0, rv
;
422 ASSERT(ctx
->gcm_processed_data_len
== ctx
->gcm_pt_buf_len
);
424 pt_len
= ctx
->gcm_processed_data_len
- ctx
->gcm_tag_len
;
425 ghash
= (uint8_t *)ctx
->gcm_ghash
;
426 blockp
= ctx
->gcm_pt_buf
;
428 while (remainder
> 0) {
429 /* Incomplete last block */
430 if (remainder
< block_size
) {
431 bcopy(blockp
, ctx
->gcm_remainder
, remainder
);
432 ctx
->gcm_remainder_len
= remainder
;
434 * not expecting anymore ciphertext, just
435 * compute plaintext for the remaining input
437 gcm_decrypt_incomplete_block(ctx
, block_size
,
438 processed
, encrypt_block
, xor_block
);
439 ctx
->gcm_remainder_len
= 0;
442 /* add ciphertext to the hash */
443 GHASH(ctx
, blockp
, ghash
);
447 * Counter bits are confined to the bottom 32 bits
449 counter
= ntohll(ctx
->gcm_cb
[1] & counter_mask
);
450 counter
= htonll(counter
+ 1);
451 counter
&= counter_mask
;
452 ctx
->gcm_cb
[1] = (ctx
->gcm_cb
[1] & ~counter_mask
) | counter
;
454 cbp
= (uint8_t *)ctx
->gcm_tmp
;
455 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_cb
, cbp
);
457 /* XOR with ciphertext */
458 xor_block(cbp
, blockp
);
460 processed
+= block_size
;
461 blockp
+= block_size
;
462 remainder
-= block_size
;
465 ctx
->gcm_len_a_len_c
[1] = htonll(CRYPTO_BYTES2BITS(pt_len
));
466 GHASH(ctx
, ctx
->gcm_len_a_len_c
, ghash
);
467 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_J0
,
468 (uint8_t *)ctx
->gcm_J0
);
469 xor_block((uint8_t *)ctx
->gcm_J0
, ghash
);
471 /* compare the input authentication tag with what we calculated */
472 if (bcmp(&ctx
->gcm_pt_buf
[pt_len
], ghash
, ctx
->gcm_tag_len
)) {
473 /* They don't match */
474 return (CRYPTO_INVALID_MAC
);
476 rv
= crypto_put_output_data(ctx
->gcm_pt_buf
, out
, pt_len
);
477 if (rv
!= CRYPTO_SUCCESS
)
479 out
->cd_offset
+= pt_len
;
481 return (CRYPTO_SUCCESS
);
485 gcm_validate_args(CK_AES_GCM_PARAMS
*gcm_param
)
490 * Check the length of the authentication tag (in bits).
492 tag_len
= gcm_param
->ulTagBits
;
503 return (CRYPTO_MECHANISM_PARAM_INVALID
);
506 if (gcm_param
->ulIvLen
== 0)
507 return (CRYPTO_MECHANISM_PARAM_INVALID
);
509 return (CRYPTO_SUCCESS
);
513 gcm_format_initial_blocks(uchar_t
*iv
, ulong_t iv_len
,
514 gcm_ctx_t
*ctx
, size_t block_size
,
515 void (*copy_block
)(uint8_t *, uint8_t *),
516 void (*xor_block
)(uint8_t *, uint8_t *))
519 ulong_t remainder
= iv_len
;
520 ulong_t processed
= 0;
521 uint8_t *datap
, *ghash
;
522 uint64_t len_a_len_c
[2];
524 ghash
= (uint8_t *)ctx
->gcm_ghash
;
525 cb
= (uint8_t *)ctx
->gcm_cb
;
532 /* J0 will be used again in the final */
533 copy_block(cb
, (uint8_t *)ctx
->gcm_J0
);
537 if (remainder
< block_size
) {
538 bzero(cb
, block_size
);
539 bcopy(&(iv
[processed
]), cb
, remainder
);
540 datap
= (uint8_t *)cb
;
543 datap
= (uint8_t *)(&(iv
[processed
]));
544 processed
+= block_size
;
545 remainder
-= block_size
;
547 GHASH(ctx
, datap
, ghash
);
548 } while (remainder
> 0);
551 len_a_len_c
[1] = htonll(CRYPTO_BYTES2BITS(iv_len
));
552 GHASH(ctx
, len_a_len_c
, ctx
->gcm_J0
);
554 /* J0 will be used again in the final */
555 copy_block((uint8_t *)ctx
->gcm_J0
, (uint8_t *)cb
);
560 * The following function is called at encrypt or decrypt init time
564 gcm_init(gcm_ctx_t
*ctx
, unsigned char *iv
, size_t iv_len
,
565 unsigned char *auth_data
, size_t auth_data_len
, size_t block_size
,
566 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
567 void (*copy_block
)(uint8_t *, uint8_t *),
568 void (*xor_block
)(uint8_t *, uint8_t *))
570 uint8_t *ghash
, *datap
, *authp
;
571 size_t remainder
, processed
;
573 /* encrypt zero block to get subkey H */
574 bzero(ctx
->gcm_H
, sizeof (ctx
->gcm_H
));
575 encrypt_block(ctx
->gcm_keysched
, (uint8_t *)ctx
->gcm_H
,
576 (uint8_t *)ctx
->gcm_H
);
578 gcm_format_initial_blocks(iv
, iv_len
, ctx
, block_size
,
579 copy_block
, xor_block
);
581 authp
= (uint8_t *)ctx
->gcm_tmp
;
582 ghash
= (uint8_t *)ctx
->gcm_ghash
;
583 bzero(authp
, block_size
);
584 bzero(ghash
, block_size
);
587 remainder
= auth_data_len
;
589 if (remainder
< block_size
) {
591 * There's not a block full of data, pad rest of
594 bzero(authp
, block_size
);
595 bcopy(&(auth_data
[processed
]), authp
, remainder
);
596 datap
= (uint8_t *)authp
;
599 datap
= (uint8_t *)(&(auth_data
[processed
]));
600 processed
+= block_size
;
601 remainder
-= block_size
;
604 /* add auth data to the hash */
605 GHASH(ctx
, datap
, ghash
);
607 } while (remainder
> 0);
609 return (CRYPTO_SUCCESS
);
613 gcm_init_ctx(gcm_ctx_t
*gcm_ctx
, char *param
, size_t block_size
,
614 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
615 void (*copy_block
)(uint8_t *, uint8_t *),
616 void (*xor_block
)(uint8_t *, uint8_t *))
619 CK_AES_GCM_PARAMS
*gcm_param
;
622 gcm_param
= (CK_AES_GCM_PARAMS
*)(void *)param
;
624 if ((rv
= gcm_validate_args(gcm_param
)) != 0) {
628 gcm_ctx
->gcm_tag_len
= gcm_param
->ulTagBits
;
629 gcm_ctx
->gcm_tag_len
>>= 3;
630 gcm_ctx
->gcm_processed_data_len
= 0;
632 /* these values are in bits */
633 gcm_ctx
->gcm_len_a_len_c
[0]
634 = htonll(CRYPTO_BYTES2BITS(gcm_param
->ulAADLen
));
637 gcm_ctx
->gcm_flags
|= GCM_MODE
;
639 rv
= CRYPTO_MECHANISM_PARAM_INVALID
;
643 if (gcm_init(gcm_ctx
, gcm_param
->pIv
, gcm_param
->ulIvLen
,
644 gcm_param
->pAAD
, gcm_param
->ulAADLen
, block_size
,
645 encrypt_block
, copy_block
, xor_block
) != 0) {
646 rv
= CRYPTO_MECHANISM_PARAM_INVALID
;
653 gmac_init_ctx(gcm_ctx_t
*gcm_ctx
, char *param
, size_t block_size
,
654 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
655 void (*copy_block
)(uint8_t *, uint8_t *),
656 void (*xor_block
)(uint8_t *, uint8_t *))
659 CK_AES_GMAC_PARAMS
*gmac_param
;
662 gmac_param
= (CK_AES_GMAC_PARAMS
*)(void *)param
;
664 gcm_ctx
->gcm_tag_len
= CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS
);
665 gcm_ctx
->gcm_processed_data_len
= 0;
667 /* these values are in bits */
668 gcm_ctx
->gcm_len_a_len_c
[0]
669 = htonll(CRYPTO_BYTES2BITS(gmac_param
->ulAADLen
));
672 gcm_ctx
->gcm_flags
|= GMAC_MODE
;
674 rv
= CRYPTO_MECHANISM_PARAM_INVALID
;
678 if (gcm_init(gcm_ctx
, gmac_param
->pIv
, AES_GMAC_IV_LEN
,
679 gmac_param
->pAAD
, gmac_param
->ulAADLen
, block_size
,
680 encrypt_block
, copy_block
, xor_block
) != 0) {
681 rv
= CRYPTO_MECHANISM_PARAM_INVALID
;
688 gcm_alloc_ctx(int kmflag
)
693 if ((gcm_ctx
= kmem_zalloc(sizeof (gcm_ctx_t
), kmflag
)) == NULL
)
695 if ((gcm_ctx
= calloc(1, sizeof (gcm_ctx_t
))) == NULL
)
699 gcm_ctx
->gcm_flags
= GCM_MODE
;
704 gmac_alloc_ctx(int kmflag
)
709 if ((gcm_ctx
= kmem_zalloc(sizeof (gcm_ctx_t
), kmflag
)) == NULL
)
711 if ((gcm_ctx
= calloc(1, sizeof (gcm_ctx_t
))) == NULL
)
715 gcm_ctx
->gcm_flags
= GMAC_MODE
;
720 gcm_set_kmflag(gcm_ctx_t
*ctx
, int kmflag
)
722 ctx
->gcm_kmflag
= kmflag
;
728 * Return 1 if executing on Intel with PCLMULQDQ instructions,
729 * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
730 * Cache the result, as the CPU can't change.
732 * Note: the userland version uses getisax(). The kernel version uses
733 * is_x86_featureset().
736 intel_pclmulqdq_instruction_present(void)
738 static int cached_result
= -1;
740 if (cached_result
== -1) { /* first time */
743 is_x86_feature(x86_featureset
, X86FSET_PCLMULQDQ
);
747 (void) getisax(&ui
, 1);
748 cached_result
= (ui
& AV_386_PCLMULQDQ
) != 0;
752 return (cached_result
);