x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / x86 / crypto / aesni-intel_glue.c
blobf80e668785c0bf5f36383e5ab49495c24f93f843
1 /*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <asm/crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
46 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47 #define HAS_PCBC
48 #endif
50 /* This data is stored at the end of the crypto_tfm struct.
51 * It's a type of per "session" data storage location.
52 * This needs to be 16 byte aligned.
54 struct aesni_rfc4106_gcm_ctx {
55 u8 hash_subkey[16];
56 struct crypto_aes_ctx aes_key_expanded;
57 u8 nonce[4];
58 struct cryptd_aead *cryptd_tfm;
61 struct aesni_gcm_set_hash_subkey_result {
62 int err;
63 struct completion completion;
66 struct aesni_hash_subkey_req_data {
67 u8 iv[16];
68 struct aesni_gcm_set_hash_subkey_result result;
69 struct scatterlist sg;
72 #define AESNI_ALIGN (16)
73 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
74 #define RFC4106_HASH_SUBKEY_SIZE 16
76 struct aesni_lrw_ctx {
77 struct lrw_table_ctx lrw_table;
78 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 struct aesni_xts_ctx {
82 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
83 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
86 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
87 unsigned int key_len);
88 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in);
90 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in);
92 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len);
94 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len);
96 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
97 const u8 *in, unsigned int len, u8 *iv);
98 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
99 const u8 *in, unsigned int len, u8 *iv);
101 int crypto_fpu_init(void);
102 void crypto_fpu_exit(void);
104 #ifdef CONFIG_X86_64
105 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
106 const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
109 const u8 *in, bool enc, u8 *iv);
111 /* asmlinkage void aesni_gcm_enc()
112 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
113 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
114 * const u8 *in, Plaintext input
115 * unsigned long plaintext_len, Length of data in bytes for encryption.
116 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
117 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
118 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
119 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
120 * const u8 *aad, Additional Authentication Data (AAD)
121 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
122 * is going to be 8 or 12 bytes
123 * u8 *auth_tag, Authenticated Tag output.
124 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
125 * Valid values are 16 (most likely), 12 or 8.
127 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
128 const u8 *in, unsigned long plaintext_len, u8 *iv,
129 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
130 u8 *auth_tag, unsigned long auth_tag_len);
132 /* asmlinkage void aesni_gcm_dec()
133 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
134 * u8 *out, Plaintext output. Decrypt in-place is allowed.
135 * const u8 *in, Ciphertext input
136 * unsigned long ciphertext_len, Length of data in bytes for decryption.
137 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
138 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
139 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
140 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
141 * const u8 *aad, Additional Authentication Data (AAD)
142 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
143 * to be 8 or 12 bytes
144 * u8 *auth_tag, Authenticated Tag output.
145 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
146 * Valid values are 16 (most likely), 12 or 8.
148 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
149 const u8 *in, unsigned long ciphertext_len, u8 *iv,
150 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
151 u8 *auth_tag, unsigned long auth_tag_len);
153 static inline struct
154 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
156 return
157 (struct aesni_rfc4106_gcm_ctx *)
158 PTR_ALIGN((u8 *)
159 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
161 #endif
163 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
165 unsigned long addr = (unsigned long)raw_ctx;
166 unsigned long align = AESNI_ALIGN;
168 if (align <= crypto_tfm_ctx_alignment())
169 align = 1;
170 return (struct crypto_aes_ctx *)ALIGN(addr, align);
173 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
174 const u8 *in_key, unsigned int key_len)
176 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
177 u32 *flags = &tfm->crt_flags;
178 int err;
180 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
181 key_len != AES_KEYSIZE_256) {
182 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
183 return -EINVAL;
186 if (!irq_fpu_usable())
187 err = crypto_aes_expand_key(ctx, in_key, key_len);
188 else {
189 kernel_fpu_begin();
190 err = aesni_set_key(ctx, in_key, key_len);
191 kernel_fpu_end();
194 return err;
197 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
198 unsigned int key_len)
200 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
203 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
205 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
207 if (!irq_fpu_usable())
208 crypto_aes_encrypt_x86(ctx, dst, src);
209 else {
210 kernel_fpu_begin();
211 aesni_enc(ctx, dst, src);
212 kernel_fpu_end();
216 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
218 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
220 if (!irq_fpu_usable())
221 crypto_aes_decrypt_x86(ctx, dst, src);
222 else {
223 kernel_fpu_begin();
224 aesni_dec(ctx, dst, src);
225 kernel_fpu_end();
229 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
231 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
233 aesni_enc(ctx, dst, src);
236 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
238 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
240 aesni_dec(ctx, dst, src);
243 static int ecb_encrypt(struct blkcipher_desc *desc,
244 struct scatterlist *dst, struct scatterlist *src,
245 unsigned int nbytes)
247 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
248 struct blkcipher_walk walk;
249 int err;
251 blkcipher_walk_init(&walk, dst, src, nbytes);
252 err = blkcipher_walk_virt(desc, &walk);
253 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
255 kernel_fpu_begin();
256 while ((nbytes = walk.nbytes)) {
257 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
258 nbytes & AES_BLOCK_MASK);
259 nbytes &= AES_BLOCK_SIZE - 1;
260 err = blkcipher_walk_done(desc, &walk, nbytes);
262 kernel_fpu_end();
264 return err;
267 static int ecb_decrypt(struct blkcipher_desc *desc,
268 struct scatterlist *dst, struct scatterlist *src,
269 unsigned int nbytes)
271 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
272 struct blkcipher_walk walk;
273 int err;
275 blkcipher_walk_init(&walk, dst, src, nbytes);
276 err = blkcipher_walk_virt(desc, &walk);
277 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
279 kernel_fpu_begin();
280 while ((nbytes = walk.nbytes)) {
281 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
282 nbytes & AES_BLOCK_MASK);
283 nbytes &= AES_BLOCK_SIZE - 1;
284 err = blkcipher_walk_done(desc, &walk, nbytes);
286 kernel_fpu_end();
288 return err;
291 static int cbc_encrypt(struct blkcipher_desc *desc,
292 struct scatterlist *dst, struct scatterlist *src,
293 unsigned int nbytes)
295 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
296 struct blkcipher_walk walk;
297 int err;
299 blkcipher_walk_init(&walk, dst, src, nbytes);
300 err = blkcipher_walk_virt(desc, &walk);
301 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
303 kernel_fpu_begin();
304 while ((nbytes = walk.nbytes)) {
305 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
306 nbytes & AES_BLOCK_MASK, walk.iv);
307 nbytes &= AES_BLOCK_SIZE - 1;
308 err = blkcipher_walk_done(desc, &walk, nbytes);
310 kernel_fpu_end();
312 return err;
315 static int cbc_decrypt(struct blkcipher_desc *desc,
316 struct scatterlist *dst, struct scatterlist *src,
317 unsigned int nbytes)
319 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
320 struct blkcipher_walk walk;
321 int err;
323 blkcipher_walk_init(&walk, dst, src, nbytes);
324 err = blkcipher_walk_virt(desc, &walk);
325 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
327 kernel_fpu_begin();
328 while ((nbytes = walk.nbytes)) {
329 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
330 nbytes & AES_BLOCK_MASK, walk.iv);
331 nbytes &= AES_BLOCK_SIZE - 1;
332 err = blkcipher_walk_done(desc, &walk, nbytes);
334 kernel_fpu_end();
336 return err;
339 #ifdef CONFIG_X86_64
340 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
341 struct blkcipher_walk *walk)
343 u8 *ctrblk = walk->iv;
344 u8 keystream[AES_BLOCK_SIZE];
345 u8 *src = walk->src.virt.addr;
346 u8 *dst = walk->dst.virt.addr;
347 unsigned int nbytes = walk->nbytes;
349 aesni_enc(ctx, keystream, ctrblk);
350 crypto_xor(keystream, src, nbytes);
351 memcpy(dst, keystream, nbytes);
352 crypto_inc(ctrblk, AES_BLOCK_SIZE);
355 static int ctr_crypt(struct blkcipher_desc *desc,
356 struct scatterlist *dst, struct scatterlist *src,
357 unsigned int nbytes)
359 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
360 struct blkcipher_walk walk;
361 int err;
363 blkcipher_walk_init(&walk, dst, src, nbytes);
364 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
365 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
367 kernel_fpu_begin();
368 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
369 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
370 nbytes & AES_BLOCK_MASK, walk.iv);
371 nbytes &= AES_BLOCK_SIZE - 1;
372 err = blkcipher_walk_done(desc, &walk, nbytes);
374 if (walk.nbytes) {
375 ctr_crypt_final(ctx, &walk);
376 err = blkcipher_walk_done(desc, &walk, 0);
378 kernel_fpu_end();
380 return err;
382 #endif
384 static int ablk_ecb_init(struct crypto_tfm *tfm)
386 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
389 static int ablk_cbc_init(struct crypto_tfm *tfm)
391 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
394 #ifdef CONFIG_X86_64
395 static int ablk_ctr_init(struct crypto_tfm *tfm)
397 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
400 #endif
402 #ifdef HAS_PCBC
403 static int ablk_pcbc_init(struct crypto_tfm *tfm)
405 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
407 #endif
409 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
411 aesni_ecb_enc(ctx, blks, blks, nbytes);
414 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
416 aesni_ecb_dec(ctx, blks, blks, nbytes);
419 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
420 unsigned int keylen)
422 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
423 int err;
425 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
426 keylen - AES_BLOCK_SIZE);
427 if (err)
428 return err;
430 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
433 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
435 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
437 lrw_free_table(&ctx->lrw_table);
440 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
441 struct scatterlist *src, unsigned int nbytes)
443 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
444 be128 buf[8];
445 struct lrw_crypt_req req = {
446 .tbuf = buf,
447 .tbuflen = sizeof(buf),
449 .table_ctx = &ctx->lrw_table,
450 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
451 .crypt_fn = lrw_xts_encrypt_callback,
453 int ret;
455 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
457 kernel_fpu_begin();
458 ret = lrw_crypt(desc, dst, src, nbytes, &req);
459 kernel_fpu_end();
461 return ret;
464 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
465 struct scatterlist *src, unsigned int nbytes)
467 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
468 be128 buf[8];
469 struct lrw_crypt_req req = {
470 .tbuf = buf,
471 .tbuflen = sizeof(buf),
473 .table_ctx = &ctx->lrw_table,
474 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
475 .crypt_fn = lrw_xts_decrypt_callback,
477 int ret;
479 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
481 kernel_fpu_begin();
482 ret = lrw_crypt(desc, dst, src, nbytes, &req);
483 kernel_fpu_end();
485 return ret;
488 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
489 unsigned int keylen)
491 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
492 u32 *flags = &tfm->crt_flags;
493 int err;
495 /* key consists of keys of equal size concatenated, therefore
496 * the length must be even
498 if (keylen % 2) {
499 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
500 return -EINVAL;
503 /* first half of xts-key is for crypt */
504 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
505 if (err)
506 return err;
508 /* second half of xts-key is for tweak */
509 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
510 keylen / 2);
514 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
516 aesni_enc(ctx, out, in);
519 #ifdef CONFIG_X86_64
521 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
523 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
526 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
528 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
531 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
533 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
536 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
538 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
541 static const struct common_glue_ctx aesni_enc_xts = {
542 .num_funcs = 2,
543 .fpu_blocks_limit = 1,
545 .funcs = { {
546 .num_blocks = 8,
547 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
548 }, {
549 .num_blocks = 1,
550 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
554 static const struct common_glue_ctx aesni_dec_xts = {
555 .num_funcs = 2,
556 .fpu_blocks_limit = 1,
558 .funcs = { {
559 .num_blocks = 8,
560 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
561 }, {
562 .num_blocks = 1,
563 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
567 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
568 struct scatterlist *src, unsigned int nbytes)
570 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
572 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
573 XTS_TWEAK_CAST(aesni_xts_tweak),
574 aes_ctx(ctx->raw_tweak_ctx),
575 aes_ctx(ctx->raw_crypt_ctx));
578 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
579 struct scatterlist *src, unsigned int nbytes)
581 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
583 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
584 XTS_TWEAK_CAST(aesni_xts_tweak),
585 aes_ctx(ctx->raw_tweak_ctx),
586 aes_ctx(ctx->raw_crypt_ctx));
589 #else
591 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
592 struct scatterlist *src, unsigned int nbytes)
594 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
595 be128 buf[8];
596 struct xts_crypt_req req = {
597 .tbuf = buf,
598 .tbuflen = sizeof(buf),
600 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
601 .tweak_fn = aesni_xts_tweak,
602 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
603 .crypt_fn = lrw_xts_encrypt_callback,
605 int ret;
607 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
609 kernel_fpu_begin();
610 ret = xts_crypt(desc, dst, src, nbytes, &req);
611 kernel_fpu_end();
613 return ret;
616 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
617 struct scatterlist *src, unsigned int nbytes)
619 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
620 be128 buf[8];
621 struct xts_crypt_req req = {
622 .tbuf = buf,
623 .tbuflen = sizeof(buf),
625 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
626 .tweak_fn = aesni_xts_tweak,
627 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
628 .crypt_fn = lrw_xts_decrypt_callback,
630 int ret;
632 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
634 kernel_fpu_begin();
635 ret = xts_crypt(desc, dst, src, nbytes, &req);
636 kernel_fpu_end();
638 return ret;
641 #endif
643 #ifdef CONFIG_X86_64
644 static int rfc4106_init(struct crypto_tfm *tfm)
646 struct cryptd_aead *cryptd_tfm;
647 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
648 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
649 struct crypto_aead *cryptd_child;
650 struct aesni_rfc4106_gcm_ctx *child_ctx;
651 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
652 if (IS_ERR(cryptd_tfm))
653 return PTR_ERR(cryptd_tfm);
655 cryptd_child = cryptd_aead_child(cryptd_tfm);
656 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
657 memcpy(child_ctx, ctx, sizeof(*ctx));
658 ctx->cryptd_tfm = cryptd_tfm;
659 tfm->crt_aead.reqsize = sizeof(struct aead_request)
660 + crypto_aead_reqsize(&cryptd_tfm->base);
661 return 0;
664 static void rfc4106_exit(struct crypto_tfm *tfm)
666 struct aesni_rfc4106_gcm_ctx *ctx =
667 (struct aesni_rfc4106_gcm_ctx *)
668 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
669 if (!IS_ERR(ctx->cryptd_tfm))
670 cryptd_free_aead(ctx->cryptd_tfm);
671 return;
674 static void
675 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
677 struct aesni_gcm_set_hash_subkey_result *result = req->data;
679 if (err == -EINPROGRESS)
680 return;
681 result->err = err;
682 complete(&result->completion);
685 static int
686 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
688 struct crypto_ablkcipher *ctr_tfm;
689 struct ablkcipher_request *req;
690 int ret = -EINVAL;
691 struct aesni_hash_subkey_req_data *req_data;
693 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
694 if (IS_ERR(ctr_tfm))
695 return PTR_ERR(ctr_tfm);
697 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
699 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
700 if (ret)
701 goto out_free_ablkcipher;
703 ret = -ENOMEM;
704 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
705 if (!req)
706 goto out_free_ablkcipher;
708 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
709 if (!req_data)
710 goto out_free_request;
712 memset(req_data->iv, 0, sizeof(req_data->iv));
714 /* Clear the data in the hash sub key container to zero.*/
715 /* We want to cipher all zeros to create the hash sub key. */
716 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
718 init_completion(&req_data->result.completion);
719 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
720 ablkcipher_request_set_tfm(req, ctr_tfm);
721 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
722 CRYPTO_TFM_REQ_MAY_BACKLOG,
723 rfc4106_set_hash_subkey_done,
724 &req_data->result);
726 ablkcipher_request_set_crypt(req, &req_data->sg,
727 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
729 ret = crypto_ablkcipher_encrypt(req);
730 if (ret == -EINPROGRESS || ret == -EBUSY) {
731 ret = wait_for_completion_interruptible
732 (&req_data->result.completion);
733 if (!ret)
734 ret = req_data->result.err;
736 kfree(req_data);
737 out_free_request:
738 ablkcipher_request_free(req);
739 out_free_ablkcipher:
740 crypto_free_ablkcipher(ctr_tfm);
741 return ret;
744 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
745 unsigned int key_len)
747 int ret = 0;
748 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
749 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
750 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
751 struct aesni_rfc4106_gcm_ctx *child_ctx =
752 aesni_rfc4106_gcm_ctx_get(cryptd_child);
753 u8 *new_key_align, *new_key_mem = NULL;
755 if (key_len < 4) {
756 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
757 return -EINVAL;
759 /*Account for 4 byte nonce at the end.*/
760 key_len -= 4;
761 if (key_len != AES_KEYSIZE_128) {
762 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
763 return -EINVAL;
766 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
767 /*This must be on a 16 byte boundary!*/
768 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
769 return -EINVAL;
771 if ((unsigned long)key % AESNI_ALIGN) {
772 /*key is not aligned: use an auxuliar aligned pointer*/
773 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
774 if (!new_key_mem)
775 return -ENOMEM;
777 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
778 memcpy(new_key_align, key, key_len);
779 key = new_key_align;
782 if (!irq_fpu_usable())
783 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
784 key, key_len);
785 else {
786 kernel_fpu_begin();
787 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
788 kernel_fpu_end();
790 /*This must be on a 16 byte boundary!*/
791 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
792 ret = -EINVAL;
793 goto exit;
795 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
796 memcpy(child_ctx, ctx, sizeof(*ctx));
797 exit:
798 kfree(new_key_mem);
799 return ret;
802 /* This is the Integrity Check Value (aka the authentication tag length and can
803 * be 8, 12 or 16 bytes long. */
804 static int rfc4106_set_authsize(struct crypto_aead *parent,
805 unsigned int authsize)
807 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
808 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
810 switch (authsize) {
811 case 8:
812 case 12:
813 case 16:
814 break;
815 default:
816 return -EINVAL;
818 crypto_aead_crt(parent)->authsize = authsize;
819 crypto_aead_crt(cryptd_child)->authsize = authsize;
820 return 0;
823 static int rfc4106_encrypt(struct aead_request *req)
825 int ret;
826 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
827 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
829 if (!irq_fpu_usable()) {
830 struct aead_request *cryptd_req =
831 (struct aead_request *) aead_request_ctx(req);
832 memcpy(cryptd_req, req, sizeof(*req));
833 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
834 return crypto_aead_encrypt(cryptd_req);
835 } else {
836 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
837 kernel_fpu_begin();
838 ret = cryptd_child->base.crt_aead.encrypt(req);
839 kernel_fpu_end();
840 return ret;
844 static int rfc4106_decrypt(struct aead_request *req)
846 int ret;
847 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
848 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
850 if (!irq_fpu_usable()) {
851 struct aead_request *cryptd_req =
852 (struct aead_request *) aead_request_ctx(req);
853 memcpy(cryptd_req, req, sizeof(*req));
854 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
855 return crypto_aead_decrypt(cryptd_req);
856 } else {
857 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
858 kernel_fpu_begin();
859 ret = cryptd_child->base.crt_aead.decrypt(req);
860 kernel_fpu_end();
861 return ret;
865 static int __driver_rfc4106_encrypt(struct aead_request *req)
867 u8 one_entry_in_sg = 0;
868 u8 *src, *dst, *assoc;
869 __be32 counter = cpu_to_be32(1);
870 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
871 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
872 void *aes_ctx = &(ctx->aes_key_expanded);
873 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
874 u8 iv_tab[16+AESNI_ALIGN];
875 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
876 struct scatter_walk src_sg_walk;
877 struct scatter_walk assoc_sg_walk;
878 struct scatter_walk dst_sg_walk;
879 unsigned int i;
881 /* Assuming we are supporting rfc4106 64-bit extended */
882 /* sequence numbers We need to have the AAD length equal */
883 /* to 8 or 12 bytes */
884 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
885 return -EINVAL;
886 /* IV below built */
887 for (i = 0; i < 4; i++)
888 *(iv+i) = ctx->nonce[i];
889 for (i = 0; i < 8; i++)
890 *(iv+4+i) = req->iv[i];
891 *((__be32 *)(iv+12)) = counter;
893 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
894 one_entry_in_sg = 1;
895 scatterwalk_start(&src_sg_walk, req->src);
896 scatterwalk_start(&assoc_sg_walk, req->assoc);
897 src = scatterwalk_map(&src_sg_walk);
898 assoc = scatterwalk_map(&assoc_sg_walk);
899 dst = src;
900 if (unlikely(req->src != req->dst)) {
901 scatterwalk_start(&dst_sg_walk, req->dst);
902 dst = scatterwalk_map(&dst_sg_walk);
905 } else {
906 /* Allocate memory for src, dst, assoc */
907 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
908 GFP_ATOMIC);
909 if (unlikely(!src))
910 return -ENOMEM;
911 assoc = (src + req->cryptlen + auth_tag_len);
912 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
913 scatterwalk_map_and_copy(assoc, req->assoc, 0,
914 req->assoclen, 0);
915 dst = src;
918 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
919 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
920 + ((unsigned long)req->cryptlen), auth_tag_len);
922 /* The authTag (aka the Integrity Check Value) needs to be written
923 * back to the packet. */
924 if (one_entry_in_sg) {
925 if (unlikely(req->src != req->dst)) {
926 scatterwalk_unmap(dst);
927 scatterwalk_done(&dst_sg_walk, 0, 0);
929 scatterwalk_unmap(src);
930 scatterwalk_unmap(assoc);
931 scatterwalk_done(&src_sg_walk, 0, 0);
932 scatterwalk_done(&assoc_sg_walk, 0, 0);
933 } else {
934 scatterwalk_map_and_copy(dst, req->dst, 0,
935 req->cryptlen + auth_tag_len, 1);
936 kfree(src);
938 return 0;
941 static int __driver_rfc4106_decrypt(struct aead_request *req)
943 u8 one_entry_in_sg = 0;
944 u8 *src, *dst, *assoc;
945 unsigned long tempCipherLen = 0;
946 __be32 counter = cpu_to_be32(1);
947 int retval = 0;
948 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
949 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
950 void *aes_ctx = &(ctx->aes_key_expanded);
951 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
952 u8 iv_and_authTag[32+AESNI_ALIGN];
953 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
954 u8 *authTag = iv + 16;
955 struct scatter_walk src_sg_walk;
956 struct scatter_walk assoc_sg_walk;
957 struct scatter_walk dst_sg_walk;
958 unsigned int i;
960 if (unlikely((req->cryptlen < auth_tag_len) ||
961 (req->assoclen != 8 && req->assoclen != 12)))
962 return -EINVAL;
963 /* Assuming we are supporting rfc4106 64-bit extended */
964 /* sequence numbers We need to have the AAD length */
965 /* equal to 8 or 12 bytes */
967 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
968 /* IV below built */
969 for (i = 0; i < 4; i++)
970 *(iv+i) = ctx->nonce[i];
971 for (i = 0; i < 8; i++)
972 *(iv+4+i) = req->iv[i];
973 *((__be32 *)(iv+12)) = counter;
975 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
976 one_entry_in_sg = 1;
977 scatterwalk_start(&src_sg_walk, req->src);
978 scatterwalk_start(&assoc_sg_walk, req->assoc);
979 src = scatterwalk_map(&src_sg_walk);
980 assoc = scatterwalk_map(&assoc_sg_walk);
981 dst = src;
982 if (unlikely(req->src != req->dst)) {
983 scatterwalk_start(&dst_sg_walk, req->dst);
984 dst = scatterwalk_map(&dst_sg_walk);
987 } else {
988 /* Allocate memory for src, dst, assoc */
989 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
990 if (!src)
991 return -ENOMEM;
992 assoc = (src + req->cryptlen + auth_tag_len);
993 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
994 scatterwalk_map_and_copy(assoc, req->assoc, 0,
995 req->assoclen, 0);
996 dst = src;
999 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1000 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1001 authTag, auth_tag_len);
1003 /* Compare generated tag with passed in tag. */
1004 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1005 -EBADMSG : 0;
1007 if (one_entry_in_sg) {
1008 if (unlikely(req->src != req->dst)) {
1009 scatterwalk_unmap(dst);
1010 scatterwalk_done(&dst_sg_walk, 0, 0);
1012 scatterwalk_unmap(src);
1013 scatterwalk_unmap(assoc);
1014 scatterwalk_done(&src_sg_walk, 0, 0);
1015 scatterwalk_done(&assoc_sg_walk, 0, 0);
1016 } else {
1017 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1018 kfree(src);
1020 return retval;
1022 #endif
1024 static struct crypto_alg aesni_algs[] = { {
1025 .cra_name = "aes",
1026 .cra_driver_name = "aes-aesni",
1027 .cra_priority = 300,
1028 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1029 .cra_blocksize = AES_BLOCK_SIZE,
1030 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1031 AESNI_ALIGN - 1,
1032 .cra_alignmask = 0,
1033 .cra_module = THIS_MODULE,
1034 .cra_u = {
1035 .cipher = {
1036 .cia_min_keysize = AES_MIN_KEY_SIZE,
1037 .cia_max_keysize = AES_MAX_KEY_SIZE,
1038 .cia_setkey = aes_set_key,
1039 .cia_encrypt = aes_encrypt,
1040 .cia_decrypt = aes_decrypt
1043 }, {
1044 .cra_name = "__aes-aesni",
1045 .cra_driver_name = "__driver-aes-aesni",
1046 .cra_priority = 0,
1047 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1048 .cra_blocksize = AES_BLOCK_SIZE,
1049 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1050 AESNI_ALIGN - 1,
1051 .cra_alignmask = 0,
1052 .cra_module = THIS_MODULE,
1053 .cra_u = {
1054 .cipher = {
1055 .cia_min_keysize = AES_MIN_KEY_SIZE,
1056 .cia_max_keysize = AES_MAX_KEY_SIZE,
1057 .cia_setkey = aes_set_key,
1058 .cia_encrypt = __aes_encrypt,
1059 .cia_decrypt = __aes_decrypt
1062 }, {
1063 .cra_name = "__ecb-aes-aesni",
1064 .cra_driver_name = "__driver-ecb-aes-aesni",
1065 .cra_priority = 0,
1066 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1067 .cra_blocksize = AES_BLOCK_SIZE,
1068 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1069 AESNI_ALIGN - 1,
1070 .cra_alignmask = 0,
1071 .cra_type = &crypto_blkcipher_type,
1072 .cra_module = THIS_MODULE,
1073 .cra_u = {
1074 .blkcipher = {
1075 .min_keysize = AES_MIN_KEY_SIZE,
1076 .max_keysize = AES_MAX_KEY_SIZE,
1077 .setkey = aes_set_key,
1078 .encrypt = ecb_encrypt,
1079 .decrypt = ecb_decrypt,
1082 }, {
1083 .cra_name = "__cbc-aes-aesni",
1084 .cra_driver_name = "__driver-cbc-aes-aesni",
1085 .cra_priority = 0,
1086 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1087 .cra_blocksize = AES_BLOCK_SIZE,
1088 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1089 AESNI_ALIGN - 1,
1090 .cra_alignmask = 0,
1091 .cra_type = &crypto_blkcipher_type,
1092 .cra_module = THIS_MODULE,
1093 .cra_u = {
1094 .blkcipher = {
1095 .min_keysize = AES_MIN_KEY_SIZE,
1096 .max_keysize = AES_MAX_KEY_SIZE,
1097 .setkey = aes_set_key,
1098 .encrypt = cbc_encrypt,
1099 .decrypt = cbc_decrypt,
1102 }, {
1103 .cra_name = "ecb(aes)",
1104 .cra_driver_name = "ecb-aes-aesni",
1105 .cra_priority = 400,
1106 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1107 .cra_blocksize = AES_BLOCK_SIZE,
1108 .cra_ctxsize = sizeof(struct async_helper_ctx),
1109 .cra_alignmask = 0,
1110 .cra_type = &crypto_ablkcipher_type,
1111 .cra_module = THIS_MODULE,
1112 .cra_init = ablk_ecb_init,
1113 .cra_exit = ablk_exit,
1114 .cra_u = {
1115 .ablkcipher = {
1116 .min_keysize = AES_MIN_KEY_SIZE,
1117 .max_keysize = AES_MAX_KEY_SIZE,
1118 .setkey = ablk_set_key,
1119 .encrypt = ablk_encrypt,
1120 .decrypt = ablk_decrypt,
1123 }, {
1124 .cra_name = "cbc(aes)",
1125 .cra_driver_name = "cbc-aes-aesni",
1126 .cra_priority = 400,
1127 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1128 .cra_blocksize = AES_BLOCK_SIZE,
1129 .cra_ctxsize = sizeof(struct async_helper_ctx),
1130 .cra_alignmask = 0,
1131 .cra_type = &crypto_ablkcipher_type,
1132 .cra_module = THIS_MODULE,
1133 .cra_init = ablk_cbc_init,
1134 .cra_exit = ablk_exit,
1135 .cra_u = {
1136 .ablkcipher = {
1137 .min_keysize = AES_MIN_KEY_SIZE,
1138 .max_keysize = AES_MAX_KEY_SIZE,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .setkey = ablk_set_key,
1141 .encrypt = ablk_encrypt,
1142 .decrypt = ablk_decrypt,
1145 #ifdef CONFIG_X86_64
1146 }, {
1147 .cra_name = "__ctr-aes-aesni",
1148 .cra_driver_name = "__driver-ctr-aes-aesni",
1149 .cra_priority = 0,
1150 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1151 .cra_blocksize = 1,
1152 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1153 AESNI_ALIGN - 1,
1154 .cra_alignmask = 0,
1155 .cra_type = &crypto_blkcipher_type,
1156 .cra_module = THIS_MODULE,
1157 .cra_u = {
1158 .blkcipher = {
1159 .min_keysize = AES_MIN_KEY_SIZE,
1160 .max_keysize = AES_MAX_KEY_SIZE,
1161 .ivsize = AES_BLOCK_SIZE,
1162 .setkey = aes_set_key,
1163 .encrypt = ctr_crypt,
1164 .decrypt = ctr_crypt,
1167 }, {
1168 .cra_name = "ctr(aes)",
1169 .cra_driver_name = "ctr-aes-aesni",
1170 .cra_priority = 400,
1171 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1172 .cra_blocksize = 1,
1173 .cra_ctxsize = sizeof(struct async_helper_ctx),
1174 .cra_alignmask = 0,
1175 .cra_type = &crypto_ablkcipher_type,
1176 .cra_module = THIS_MODULE,
1177 .cra_init = ablk_ctr_init,
1178 .cra_exit = ablk_exit,
1179 .cra_u = {
1180 .ablkcipher = {
1181 .min_keysize = AES_MIN_KEY_SIZE,
1182 .max_keysize = AES_MAX_KEY_SIZE,
1183 .ivsize = AES_BLOCK_SIZE,
1184 .setkey = ablk_set_key,
1185 .encrypt = ablk_encrypt,
1186 .decrypt = ablk_encrypt,
1187 .geniv = "chainiv",
1190 }, {
1191 .cra_name = "__gcm-aes-aesni",
1192 .cra_driver_name = "__driver-gcm-aes-aesni",
1193 .cra_priority = 0,
1194 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1195 .cra_blocksize = 1,
1196 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1197 AESNI_ALIGN,
1198 .cra_alignmask = 0,
1199 .cra_type = &crypto_aead_type,
1200 .cra_module = THIS_MODULE,
1201 .cra_u = {
1202 .aead = {
1203 .encrypt = __driver_rfc4106_encrypt,
1204 .decrypt = __driver_rfc4106_decrypt,
1207 }, {
1208 .cra_name = "rfc4106(gcm(aes))",
1209 .cra_driver_name = "rfc4106-gcm-aesni",
1210 .cra_priority = 400,
1211 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1212 .cra_blocksize = 1,
1213 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1214 AESNI_ALIGN,
1215 .cra_alignmask = 0,
1216 .cra_type = &crypto_nivaead_type,
1217 .cra_module = THIS_MODULE,
1218 .cra_init = rfc4106_init,
1219 .cra_exit = rfc4106_exit,
1220 .cra_u = {
1221 .aead = {
1222 .setkey = rfc4106_set_key,
1223 .setauthsize = rfc4106_set_authsize,
1224 .encrypt = rfc4106_encrypt,
1225 .decrypt = rfc4106_decrypt,
1226 .geniv = "seqiv",
1227 .ivsize = 8,
1228 .maxauthsize = 16,
1231 #endif
1232 #ifdef HAS_PCBC
1233 }, {
1234 .cra_name = "pcbc(aes)",
1235 .cra_driver_name = "pcbc-aes-aesni",
1236 .cra_priority = 400,
1237 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1238 .cra_blocksize = AES_BLOCK_SIZE,
1239 .cra_ctxsize = sizeof(struct async_helper_ctx),
1240 .cra_alignmask = 0,
1241 .cra_type = &crypto_ablkcipher_type,
1242 .cra_module = THIS_MODULE,
1243 .cra_init = ablk_pcbc_init,
1244 .cra_exit = ablk_exit,
1245 .cra_u = {
1246 .ablkcipher = {
1247 .min_keysize = AES_MIN_KEY_SIZE,
1248 .max_keysize = AES_MAX_KEY_SIZE,
1249 .ivsize = AES_BLOCK_SIZE,
1250 .setkey = ablk_set_key,
1251 .encrypt = ablk_encrypt,
1252 .decrypt = ablk_decrypt,
1255 #endif
1256 }, {
1257 .cra_name = "__lrw-aes-aesni",
1258 .cra_driver_name = "__driver-lrw-aes-aesni",
1259 .cra_priority = 0,
1260 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1261 .cra_blocksize = AES_BLOCK_SIZE,
1262 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1263 .cra_alignmask = 0,
1264 .cra_type = &crypto_blkcipher_type,
1265 .cra_module = THIS_MODULE,
1266 .cra_exit = lrw_aesni_exit_tfm,
1267 .cra_u = {
1268 .blkcipher = {
1269 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1270 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1271 .ivsize = AES_BLOCK_SIZE,
1272 .setkey = lrw_aesni_setkey,
1273 .encrypt = lrw_encrypt,
1274 .decrypt = lrw_decrypt,
1277 }, {
1278 .cra_name = "__xts-aes-aesni",
1279 .cra_driver_name = "__driver-xts-aes-aesni",
1280 .cra_priority = 0,
1281 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1282 .cra_blocksize = AES_BLOCK_SIZE,
1283 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1284 .cra_alignmask = 0,
1285 .cra_type = &crypto_blkcipher_type,
1286 .cra_module = THIS_MODULE,
1287 .cra_u = {
1288 .blkcipher = {
1289 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1290 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .setkey = xts_aesni_setkey,
1293 .encrypt = xts_encrypt,
1294 .decrypt = xts_decrypt,
1297 }, {
1298 .cra_name = "lrw(aes)",
1299 .cra_driver_name = "lrw-aes-aesni",
1300 .cra_priority = 400,
1301 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1302 .cra_blocksize = AES_BLOCK_SIZE,
1303 .cra_ctxsize = sizeof(struct async_helper_ctx),
1304 .cra_alignmask = 0,
1305 .cra_type = &crypto_ablkcipher_type,
1306 .cra_module = THIS_MODULE,
1307 .cra_init = ablk_init,
1308 .cra_exit = ablk_exit,
1309 .cra_u = {
1310 .ablkcipher = {
1311 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1312 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1313 .ivsize = AES_BLOCK_SIZE,
1314 .setkey = ablk_set_key,
1315 .encrypt = ablk_encrypt,
1316 .decrypt = ablk_decrypt,
1319 }, {
1320 .cra_name = "xts(aes)",
1321 .cra_driver_name = "xts-aes-aesni",
1322 .cra_priority = 400,
1323 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1324 .cra_blocksize = AES_BLOCK_SIZE,
1325 .cra_ctxsize = sizeof(struct async_helper_ctx),
1326 .cra_alignmask = 0,
1327 .cra_type = &crypto_ablkcipher_type,
1328 .cra_module = THIS_MODULE,
1329 .cra_init = ablk_init,
1330 .cra_exit = ablk_exit,
1331 .cra_u = {
1332 .ablkcipher = {
1333 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1334 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1335 .ivsize = AES_BLOCK_SIZE,
1336 .setkey = ablk_set_key,
1337 .encrypt = ablk_encrypt,
1338 .decrypt = ablk_decrypt,
1341 } };
1344 static const struct x86_cpu_id aesni_cpu_id[] = {
1345 X86_FEATURE_MATCH(X86_FEATURE_AES),
1348 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1350 static int __init aesni_init(void)
1352 int err;
1354 if (!x86_match_cpu(aesni_cpu_id))
1355 return -ENODEV;
1357 err = crypto_fpu_init();
1358 if (err)
1359 return err;
1361 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1364 static void __exit aesni_exit(void)
1366 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1368 crypto_fpu_exit();
1371 module_init(aesni_init);
1372 module_exit(aesni_exit);
1374 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1375 MODULE_LICENSE("GPL");
1376 MODULE_ALIAS("aes");