8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / common / crypto / modes / gcm.c
blobf75b0b70dd061f5dc7ae07ba60f4e863becc94db
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 #ifndef _KERNEL
27 #include <strings.h>
28 #include <limits.h>
29 #include <assert.h>
30 #include <security/cryptoki.h>
31 #endif /* _KERNEL */
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <modes/modes.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/impl.h>
39 #include <sys/byteorder.h>
41 #ifdef __amd64
43 #ifdef _KERNEL
44 #include <sys/cpuvar.h> /* cpu_t, CPU */
45 #include <sys/x86_archext.h> /* x86_featureset, X86FSET_*, CPUID_* */
46 #include <sys/disp.h> /* kpreempt_disable(), kpreempt_enable */
47 /* Workaround for no XMM kernel thread save/restore */
48 #define KPREEMPT_DISABLE kpreempt_disable()
49 #define KPREEMPT_ENABLE kpreempt_enable()
51 #else
52 #include <sys/auxv.h> /* getisax() */
53 #include <sys/auxv_386.h> /* AV_386_PCLMULQDQ bit */
54 #define KPREEMPT_DISABLE
55 #define KPREEMPT_ENABLE
56 #endif /* _KERNEL */
58 extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
59 static int intel_pclmulqdq_instruction_present(void);
60 #endif /* __amd64 */
62 struct aes_block {
63 uint64_t a;
64 uint64_t b;
69 * gcm_mul()
70 * Perform a carry-less multiplication (that is, use XOR instead of the
71 * multiply operator) on *x_in and *y and place the result in *res.
73 * Byte swap the input (*x_in and *y) and the output (*res).
75 * Note: x_in, y, and res all point to 16-byte numbers (an array of two
76 * 64-bit integers).
78 void
79 gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
81 #ifdef __amd64
82 if (intel_pclmulqdq_instruction_present()) {
83 KPREEMPT_DISABLE;
84 gcm_mul_pclmulqdq(x_in, y, res);
85 KPREEMPT_ENABLE;
86 } else
87 #endif /* __amd64 */
89 static const uint64_t R = 0xe100000000000000ULL;
90 struct aes_block z = {0, 0};
91 struct aes_block v;
92 uint64_t x;
93 int i, j;
95 v.a = ntohll(y[0]);
96 v.b = ntohll(y[1]);
98 for (j = 0; j < 2; j++) {
99 x = ntohll(x_in[j]);
100 for (i = 0; i < 64; i++, x <<= 1) {
101 if (x & 0x8000000000000000ULL) {
102 z.a ^= v.a;
103 z.b ^= v.b;
105 if (v.b & 1ULL) {
106 v.b = (v.a << 63)|(v.b >> 1);
107 v.a = (v.a >> 1) ^ R;
108 } else {
109 v.b = (v.a << 63)|(v.b >> 1);
110 v.a = v.a >> 1;
114 res[0] = htonll(z.a);
115 res[1] = htonll(z.b);
120 #define GHASH(c, d, t) \
121 xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
122 gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
123 (uint64_t *)(void *)(t));
127 * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
128 * is done in another function.
131 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
132 crypto_data_t *out, size_t block_size,
133 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
134 void (*copy_block)(uint8_t *, uint8_t *),
135 void (*xor_block)(uint8_t *, uint8_t *))
137 size_t remainder = length;
138 size_t need;
139 uint8_t *datap = (uint8_t *)data;
140 uint8_t *blockp;
141 uint8_t *lastp;
142 void *iov_or_mp;
143 offset_t offset;
144 uint8_t *out_data_1;
145 uint8_t *out_data_2;
146 size_t out_data_1_len;
147 uint64_t counter;
148 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
150 if (length + ctx->gcm_remainder_len < block_size) {
151 /* accumulate bytes here and return */
152 bcopy(datap,
153 (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
154 length);
155 ctx->gcm_remainder_len += length;
156 ctx->gcm_copy_to = datap;
157 return (CRYPTO_SUCCESS);
160 lastp = (uint8_t *)ctx->gcm_cb;
161 if (out != NULL)
162 crypto_init_ptrs(out, &iov_or_mp, &offset);
164 do {
165 /* Unprocessed data from last call. */
166 if (ctx->gcm_remainder_len > 0) {
167 need = block_size - ctx->gcm_remainder_len;
169 if (need > remainder)
170 return (CRYPTO_DATA_LEN_RANGE);
172 bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
173 [ctx->gcm_remainder_len], need);
175 blockp = (uint8_t *)ctx->gcm_remainder;
176 } else {
177 blockp = datap;
181 * Increment counter. Counter bits are confined
182 * to the bottom 32 bits of the counter block.
184 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
185 counter = htonll(counter + 1);
186 counter &= counter_mask;
187 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
189 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
190 (uint8_t *)ctx->gcm_tmp);
191 xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
193 lastp = (uint8_t *)ctx->gcm_tmp;
195 ctx->gcm_processed_data_len += block_size;
197 if (out == NULL) {
198 if (ctx->gcm_remainder_len > 0) {
199 bcopy(blockp, ctx->gcm_copy_to,
200 ctx->gcm_remainder_len);
201 bcopy(blockp + ctx->gcm_remainder_len, datap,
202 need);
204 } else {
205 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
206 &out_data_1_len, &out_data_2, block_size);
208 /* copy block to where it belongs */
209 if (out_data_1_len == block_size) {
210 copy_block(lastp, out_data_1);
211 } else {
212 bcopy(lastp, out_data_1, out_data_1_len);
213 if (out_data_2 != NULL) {
214 bcopy(lastp + out_data_1_len,
215 out_data_2,
216 block_size - out_data_1_len);
219 /* update offset */
220 out->cd_offset += block_size;
223 /* add ciphertext to the hash */
224 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
226 /* Update pointer to next block of data to be processed. */
227 if (ctx->gcm_remainder_len != 0) {
228 datap += need;
229 ctx->gcm_remainder_len = 0;
230 } else {
231 datap += block_size;
234 remainder = (size_t)&data[length] - (size_t)datap;
236 /* Incomplete last block. */
237 if (remainder > 0 && remainder < block_size) {
238 bcopy(datap, ctx->gcm_remainder, remainder);
239 ctx->gcm_remainder_len = remainder;
240 ctx->gcm_copy_to = datap;
241 goto out;
243 ctx->gcm_copy_to = NULL;
245 } while (remainder > 0);
246 out:
247 return (CRYPTO_SUCCESS);
250 /* ARGSUSED */
252 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
253 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
254 void (*copy_block)(uint8_t *, uint8_t *),
255 void (*xor_block)(uint8_t *, uint8_t *))
257 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
258 uint8_t *ghash, *macp;
259 int i, rv;
261 if (out->cd_length <
262 (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
263 return (CRYPTO_DATA_LEN_RANGE);
266 ghash = (uint8_t *)ctx->gcm_ghash;
268 if (ctx->gcm_remainder_len > 0) {
269 uint64_t counter;
270 uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
273 * Here is where we deal with data that is not a
274 * multiple of the block size.
278 * Increment counter.
280 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
281 counter = htonll(counter + 1);
282 counter &= counter_mask;
283 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
285 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
286 (uint8_t *)ctx->gcm_tmp);
288 macp = (uint8_t *)ctx->gcm_remainder;
289 bzero(macp + ctx->gcm_remainder_len,
290 block_size - ctx->gcm_remainder_len);
292 /* XOR with counter block */
293 for (i = 0; i < ctx->gcm_remainder_len; i++) {
294 macp[i] ^= tmpp[i];
297 /* add ciphertext to the hash */
298 GHASH(ctx, macp, ghash);
300 ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
303 ctx->gcm_len_a_len_c[1] =
304 htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
305 GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
306 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
307 (uint8_t *)ctx->gcm_J0);
308 xor_block((uint8_t *)ctx->gcm_J0, ghash);
310 if (ctx->gcm_remainder_len > 0) {
311 rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
312 if (rv != CRYPTO_SUCCESS)
313 return (rv);
315 out->cd_offset += ctx->gcm_remainder_len;
316 ctx->gcm_remainder_len = 0;
317 rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
318 if (rv != CRYPTO_SUCCESS)
319 return (rv);
320 out->cd_offset += ctx->gcm_tag_len;
322 return (CRYPTO_SUCCESS);
326 * This will only deal with decrypting the last block of the input that
327 * might not be a multiple of block length.
329 static void
330 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
331 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
332 void (*xor_block)(uint8_t *, uint8_t *))
334 uint8_t *datap, *outp, *counterp;
335 uint64_t counter;
336 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
337 int i;
340 * Increment counter.
341 * Counter bits are confined to the bottom 32 bits
343 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
344 counter = htonll(counter + 1);
345 counter &= counter_mask;
346 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
348 datap = (uint8_t *)ctx->gcm_remainder;
349 outp = &((ctx->gcm_pt_buf)[index]);
350 counterp = (uint8_t *)ctx->gcm_tmp;
352 /* authentication tag */
353 bzero((uint8_t *)ctx->gcm_tmp, block_size);
354 bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
356 /* add ciphertext to the hash */
357 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
359 /* decrypt remaining ciphertext */
360 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
362 /* XOR with counter block */
363 for (i = 0; i < ctx->gcm_remainder_len; i++) {
364 outp[i] = datap[i] ^ counterp[i];
368 /* ARGSUSED */
370 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
371 crypto_data_t *out, size_t block_size,
372 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
373 void (*copy_block)(uint8_t *, uint8_t *),
374 void (*xor_block)(uint8_t *, uint8_t *))
376 size_t new_len;
377 uint8_t *new;
380 * Copy contiguous ciphertext input blocks to plaintext buffer.
381 * Ciphertext will be decrypted in the final.
383 if (length > 0) {
384 new_len = ctx->gcm_pt_buf_len + length;
385 #ifdef _KERNEL
386 new = kmem_alloc(new_len, ctx->gcm_kmflag);
387 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
388 kmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
389 #else
390 new = malloc(new_len);
391 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
392 free(ctx->gcm_pt_buf);
393 #endif
394 if (new == NULL)
395 return (CRYPTO_HOST_MEMORY);
397 ctx->gcm_pt_buf = new;
398 ctx->gcm_pt_buf_len = new_len;
399 bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
400 length);
401 ctx->gcm_processed_data_len += length;
404 ctx->gcm_remainder_len = 0;
405 return (CRYPTO_SUCCESS);
409 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
410 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
411 void (*xor_block)(uint8_t *, uint8_t *))
413 size_t pt_len;
414 size_t remainder;
415 uint8_t *ghash;
416 uint8_t *blockp;
417 uint8_t *cbp;
418 uint64_t counter;
419 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
420 int processed = 0, rv;
422 ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
424 pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
425 ghash = (uint8_t *)ctx->gcm_ghash;
426 blockp = ctx->gcm_pt_buf;
427 remainder = pt_len;
428 while (remainder > 0) {
429 /* Incomplete last block */
430 if (remainder < block_size) {
431 bcopy(blockp, ctx->gcm_remainder, remainder);
432 ctx->gcm_remainder_len = remainder;
434 * not expecting anymore ciphertext, just
435 * compute plaintext for the remaining input
437 gcm_decrypt_incomplete_block(ctx, block_size,
438 processed, encrypt_block, xor_block);
439 ctx->gcm_remainder_len = 0;
440 goto out;
442 /* add ciphertext to the hash */
443 GHASH(ctx, blockp, ghash);
446 * Increment counter.
447 * Counter bits are confined to the bottom 32 bits
449 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
450 counter = htonll(counter + 1);
451 counter &= counter_mask;
452 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
454 cbp = (uint8_t *)ctx->gcm_tmp;
455 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
457 /* XOR with ciphertext */
458 xor_block(cbp, blockp);
460 processed += block_size;
461 blockp += block_size;
462 remainder -= block_size;
464 out:
465 ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
466 GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
467 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
468 (uint8_t *)ctx->gcm_J0);
469 xor_block((uint8_t *)ctx->gcm_J0, ghash);
471 /* compare the input authentication tag with what we calculated */
472 if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
473 /* They don't match */
474 return (CRYPTO_INVALID_MAC);
475 } else {
476 rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
477 if (rv != CRYPTO_SUCCESS)
478 return (rv);
479 out->cd_offset += pt_len;
481 return (CRYPTO_SUCCESS);
484 static int
485 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
487 size_t tag_len;
490 * Check the length of the authentication tag (in bits).
492 tag_len = gcm_param->ulTagBits;
493 switch (tag_len) {
494 case 32:
495 case 64:
496 case 96:
497 case 104:
498 case 112:
499 case 120:
500 case 128:
501 break;
502 default:
503 return (CRYPTO_MECHANISM_PARAM_INVALID);
506 if (gcm_param->ulIvLen == 0)
507 return (CRYPTO_MECHANISM_PARAM_INVALID);
509 return (CRYPTO_SUCCESS);
512 static void
513 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
514 gcm_ctx_t *ctx, size_t block_size,
515 void (*copy_block)(uint8_t *, uint8_t *),
516 void (*xor_block)(uint8_t *, uint8_t *))
518 uint8_t *cb;
519 ulong_t remainder = iv_len;
520 ulong_t processed = 0;
521 uint8_t *datap, *ghash;
522 uint64_t len_a_len_c[2];
524 ghash = (uint8_t *)ctx->gcm_ghash;
525 cb = (uint8_t *)ctx->gcm_cb;
526 if (iv_len == 12) {
527 bcopy(iv, cb, 12);
528 cb[12] = 0;
529 cb[13] = 0;
530 cb[14] = 0;
531 cb[15] = 1;
532 /* J0 will be used again in the final */
533 copy_block(cb, (uint8_t *)ctx->gcm_J0);
534 } else {
535 /* GHASH the IV */
536 do {
537 if (remainder < block_size) {
538 bzero(cb, block_size);
539 bcopy(&(iv[processed]), cb, remainder);
540 datap = (uint8_t *)cb;
541 remainder = 0;
542 } else {
543 datap = (uint8_t *)(&(iv[processed]));
544 processed += block_size;
545 remainder -= block_size;
547 GHASH(ctx, datap, ghash);
548 } while (remainder > 0);
550 len_a_len_c[0] = 0;
551 len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
552 GHASH(ctx, len_a_len_c, ctx->gcm_J0);
554 /* J0 will be used again in the final */
555 copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
560 * The following function is called at encrypt or decrypt init time
561 * for AES GCM mode.
564 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
565 unsigned char *auth_data, size_t auth_data_len, size_t block_size,
566 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
567 void (*copy_block)(uint8_t *, uint8_t *),
568 void (*xor_block)(uint8_t *, uint8_t *))
570 uint8_t *ghash, *datap, *authp;
571 size_t remainder, processed;
573 /* encrypt zero block to get subkey H */
574 bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
575 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
576 (uint8_t *)ctx->gcm_H);
578 gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
579 copy_block, xor_block);
581 authp = (uint8_t *)ctx->gcm_tmp;
582 ghash = (uint8_t *)ctx->gcm_ghash;
583 bzero(authp, block_size);
584 bzero(ghash, block_size);
586 processed = 0;
587 remainder = auth_data_len;
588 do {
589 if (remainder < block_size) {
591 * There's not a block full of data, pad rest of
592 * buffer with zero
594 bzero(authp, block_size);
595 bcopy(&(auth_data[processed]), authp, remainder);
596 datap = (uint8_t *)authp;
597 remainder = 0;
598 } else {
599 datap = (uint8_t *)(&(auth_data[processed]));
600 processed += block_size;
601 remainder -= block_size;
604 /* add auth data to the hash */
605 GHASH(ctx, datap, ghash);
607 } while (remainder > 0);
609 return (CRYPTO_SUCCESS);
613 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
614 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
615 void (*copy_block)(uint8_t *, uint8_t *),
616 void (*xor_block)(uint8_t *, uint8_t *))
618 int rv;
619 CK_AES_GCM_PARAMS *gcm_param;
621 if (param != NULL) {
622 gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
624 if ((rv = gcm_validate_args(gcm_param)) != 0) {
625 return (rv);
628 gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
629 gcm_ctx->gcm_tag_len >>= 3;
630 gcm_ctx->gcm_processed_data_len = 0;
632 /* these values are in bits */
633 gcm_ctx->gcm_len_a_len_c[0]
634 = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
636 rv = CRYPTO_SUCCESS;
637 gcm_ctx->gcm_flags |= GCM_MODE;
638 } else {
639 rv = CRYPTO_MECHANISM_PARAM_INVALID;
640 goto out;
643 if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
644 gcm_param->pAAD, gcm_param->ulAADLen, block_size,
645 encrypt_block, copy_block, xor_block) != 0) {
646 rv = CRYPTO_MECHANISM_PARAM_INVALID;
648 out:
649 return (rv);
653 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
654 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
655 void (*copy_block)(uint8_t *, uint8_t *),
656 void (*xor_block)(uint8_t *, uint8_t *))
658 int rv;
659 CK_AES_GMAC_PARAMS *gmac_param;
661 if (param != NULL) {
662 gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
664 gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
665 gcm_ctx->gcm_processed_data_len = 0;
667 /* these values are in bits */
668 gcm_ctx->gcm_len_a_len_c[0]
669 = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
671 rv = CRYPTO_SUCCESS;
672 gcm_ctx->gcm_flags |= GMAC_MODE;
673 } else {
674 rv = CRYPTO_MECHANISM_PARAM_INVALID;
675 goto out;
678 if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
679 gmac_param->pAAD, gmac_param->ulAADLen, block_size,
680 encrypt_block, copy_block, xor_block) != 0) {
681 rv = CRYPTO_MECHANISM_PARAM_INVALID;
683 out:
684 return (rv);
687 void *
688 gcm_alloc_ctx(int kmflag)
690 gcm_ctx_t *gcm_ctx;
692 #ifdef _KERNEL
693 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
694 #else
695 if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
696 #endif
697 return (NULL);
699 gcm_ctx->gcm_flags = GCM_MODE;
700 return (gcm_ctx);
703 void *
704 gmac_alloc_ctx(int kmflag)
706 gcm_ctx_t *gcm_ctx;
708 #ifdef _KERNEL
709 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
710 #else
711 if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL)
712 #endif
713 return (NULL);
715 gcm_ctx->gcm_flags = GMAC_MODE;
716 return (gcm_ctx);
719 void
720 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
722 ctx->gcm_kmflag = kmflag;
726 #ifdef __amd64
728 * Return 1 if executing on Intel with PCLMULQDQ instructions,
729 * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
730 * Cache the result, as the CPU can't change.
732 * Note: the userland version uses getisax(). The kernel version uses
733 * is_x86_featureset().
735 static int
736 intel_pclmulqdq_instruction_present(void)
738 static int cached_result = -1;
740 if (cached_result == -1) { /* first time */
741 #ifdef _KERNEL
742 cached_result =
743 is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ);
744 #else
745 uint_t ui = 0;
747 (void) getisax(&ui, 1);
748 cached_result = (ui & AV_386_PCLMULQDQ) != 0;
749 #endif /* _KERNEL */
752 return (cached_result);
754 #endif /* __amd64 */