spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / arch / x86 / crypto / aesni-intel_glue.c
blob545d0ce5981839583d1ed6f185d6df64a9c13c1b
1 /*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <asm/i387.h>
32 #include <asm/aes.h>
33 #include <crypto/scatterwalk.h>
34 #include <crypto/internal/aead.h>
35 #include <linux/workqueue.h>
36 #include <linux/spinlock.h>
38 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
39 #define HAS_CTR
40 #endif
42 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
43 #define HAS_LRW
44 #endif
46 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47 #define HAS_PCBC
48 #endif
50 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
51 #define HAS_XTS
52 #endif
54 struct async_aes_ctx {
55 struct cryptd_ablkcipher *cryptd_tfm;
58 /* This data is stored at the end of the crypto_tfm struct.
59 * It's a type of per "session" data storage location.
60 * This needs to be 16 byte aligned.
62 struct aesni_rfc4106_gcm_ctx {
63 u8 hash_subkey[16];
64 struct crypto_aes_ctx aes_key_expanded;
65 u8 nonce[4];
66 struct cryptd_aead *cryptd_tfm;
69 struct aesni_gcm_set_hash_subkey_result {
70 int err;
71 struct completion completion;
74 struct aesni_hash_subkey_req_data {
75 u8 iv[16];
76 struct aesni_gcm_set_hash_subkey_result result;
77 struct scatterlist sg;
80 #define AESNI_ALIGN (16)
81 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
82 #define RFC4106_HASH_SUBKEY_SIZE 16
84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85 unsigned int key_len);
86 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
87 const u8 *in);
88 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in);
90 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in, unsigned int len);
92 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len);
94 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len, u8 *iv);
96 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
97 const u8 *in, unsigned int len, u8 *iv);
99 int crypto_fpu_init(void);
100 void crypto_fpu_exit(void);
102 #ifdef CONFIG_X86_64
103 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
104 const u8 *in, unsigned int len, u8 *iv);
106 /* asmlinkage void aesni_gcm_enc()
107 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
108 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
109 * const u8 *in, Plaintext input
110 * unsigned long plaintext_len, Length of data in bytes for encryption.
111 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
112 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
113 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
114 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
115 * const u8 *aad, Additional Authentication Data (AAD)
116 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
117 * is going to be 8 or 12 bytes
118 * u8 *auth_tag, Authenticated Tag output.
119 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
120 * Valid values are 16 (most likely), 12 or 8.
122 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
123 const u8 *in, unsigned long plaintext_len, u8 *iv,
124 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
125 u8 *auth_tag, unsigned long auth_tag_len);
127 /* asmlinkage void aesni_gcm_dec()
128 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
129 * u8 *out, Plaintext output. Decrypt in-place is allowed.
130 * const u8 *in, Ciphertext input
131 * unsigned long ciphertext_len, Length of data in bytes for decryption.
132 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
133 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
134 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
135 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
136 * const u8 *aad, Additional Authentication Data (AAD)
137 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
138 * to be 8 or 12 bytes
139 * u8 *auth_tag, Authenticated Tag output.
140 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
141 * Valid values are 16 (most likely), 12 or 8.
143 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
144 const u8 *in, unsigned long ciphertext_len, u8 *iv,
145 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
146 u8 *auth_tag, unsigned long auth_tag_len);
148 static inline struct
149 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
151 return
152 (struct aesni_rfc4106_gcm_ctx *)
153 PTR_ALIGN((u8 *)
154 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
156 #endif
158 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
160 unsigned long addr = (unsigned long)raw_ctx;
161 unsigned long align = AESNI_ALIGN;
163 if (align <= crypto_tfm_ctx_alignment())
164 align = 1;
165 return (struct crypto_aes_ctx *)ALIGN(addr, align);
168 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
169 const u8 *in_key, unsigned int key_len)
171 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
172 u32 *flags = &tfm->crt_flags;
173 int err;
175 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
176 key_len != AES_KEYSIZE_256) {
177 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
178 return -EINVAL;
181 if (!irq_fpu_usable())
182 err = crypto_aes_expand_key(ctx, in_key, key_len);
183 else {
184 kernel_fpu_begin();
185 err = aesni_set_key(ctx, in_key, key_len);
186 kernel_fpu_end();
189 return err;
192 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
193 unsigned int key_len)
195 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
198 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
200 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
202 if (!irq_fpu_usable())
203 crypto_aes_encrypt_x86(ctx, dst, src);
204 else {
205 kernel_fpu_begin();
206 aesni_enc(ctx, dst, src);
207 kernel_fpu_end();
211 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
213 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
215 if (!irq_fpu_usable())
216 crypto_aes_decrypt_x86(ctx, dst, src);
217 else {
218 kernel_fpu_begin();
219 aesni_dec(ctx, dst, src);
220 kernel_fpu_end();
224 static struct crypto_alg aesni_alg = {
225 .cra_name = "aes",
226 .cra_driver_name = "aes-aesni",
227 .cra_priority = 300,
228 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
229 .cra_blocksize = AES_BLOCK_SIZE,
230 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
231 .cra_alignmask = 0,
232 .cra_module = THIS_MODULE,
233 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
234 .cra_u = {
235 .cipher = {
236 .cia_min_keysize = AES_MIN_KEY_SIZE,
237 .cia_max_keysize = AES_MAX_KEY_SIZE,
238 .cia_setkey = aes_set_key,
239 .cia_encrypt = aes_encrypt,
240 .cia_decrypt = aes_decrypt
245 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
247 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
249 aesni_enc(ctx, dst, src);
252 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
254 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
256 aesni_dec(ctx, dst, src);
259 static struct crypto_alg __aesni_alg = {
260 .cra_name = "__aes-aesni",
261 .cra_driver_name = "__driver-aes-aesni",
262 .cra_priority = 0,
263 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
264 .cra_blocksize = AES_BLOCK_SIZE,
265 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
266 .cra_alignmask = 0,
267 .cra_module = THIS_MODULE,
268 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
269 .cra_u = {
270 .cipher = {
271 .cia_min_keysize = AES_MIN_KEY_SIZE,
272 .cia_max_keysize = AES_MAX_KEY_SIZE,
273 .cia_setkey = aes_set_key,
274 .cia_encrypt = __aes_encrypt,
275 .cia_decrypt = __aes_decrypt
280 static int ecb_encrypt(struct blkcipher_desc *desc,
281 struct scatterlist *dst, struct scatterlist *src,
282 unsigned int nbytes)
284 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
285 struct blkcipher_walk walk;
286 int err;
288 blkcipher_walk_init(&walk, dst, src, nbytes);
289 err = blkcipher_walk_virt(desc, &walk);
290 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
292 kernel_fpu_begin();
293 while ((nbytes = walk.nbytes)) {
294 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
295 nbytes & AES_BLOCK_MASK);
296 nbytes &= AES_BLOCK_SIZE - 1;
297 err = blkcipher_walk_done(desc, &walk, nbytes);
299 kernel_fpu_end();
301 return err;
304 static int ecb_decrypt(struct blkcipher_desc *desc,
305 struct scatterlist *dst, struct scatterlist *src,
306 unsigned int nbytes)
308 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
309 struct blkcipher_walk walk;
310 int err;
312 blkcipher_walk_init(&walk, dst, src, nbytes);
313 err = blkcipher_walk_virt(desc, &walk);
314 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
316 kernel_fpu_begin();
317 while ((nbytes = walk.nbytes)) {
318 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
319 nbytes & AES_BLOCK_MASK);
320 nbytes &= AES_BLOCK_SIZE - 1;
321 err = blkcipher_walk_done(desc, &walk, nbytes);
323 kernel_fpu_end();
325 return err;
328 static struct crypto_alg blk_ecb_alg = {
329 .cra_name = "__ecb-aes-aesni",
330 .cra_driver_name = "__driver-ecb-aes-aesni",
331 .cra_priority = 0,
332 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
333 .cra_blocksize = AES_BLOCK_SIZE,
334 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
335 .cra_alignmask = 0,
336 .cra_type = &crypto_blkcipher_type,
337 .cra_module = THIS_MODULE,
338 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
339 .cra_u = {
340 .blkcipher = {
341 .min_keysize = AES_MIN_KEY_SIZE,
342 .max_keysize = AES_MAX_KEY_SIZE,
343 .setkey = aes_set_key,
344 .encrypt = ecb_encrypt,
345 .decrypt = ecb_decrypt,
350 static int cbc_encrypt(struct blkcipher_desc *desc,
351 struct scatterlist *dst, struct scatterlist *src,
352 unsigned int nbytes)
354 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
355 struct blkcipher_walk walk;
356 int err;
358 blkcipher_walk_init(&walk, dst, src, nbytes);
359 err = blkcipher_walk_virt(desc, &walk);
360 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
362 kernel_fpu_begin();
363 while ((nbytes = walk.nbytes)) {
364 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
365 nbytes & AES_BLOCK_MASK, walk.iv);
366 nbytes &= AES_BLOCK_SIZE - 1;
367 err = blkcipher_walk_done(desc, &walk, nbytes);
369 kernel_fpu_end();
371 return err;
374 static int cbc_decrypt(struct blkcipher_desc *desc,
375 struct scatterlist *dst, struct scatterlist *src,
376 unsigned int nbytes)
378 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379 struct blkcipher_walk walk;
380 int err;
382 blkcipher_walk_init(&walk, dst, src, nbytes);
383 err = blkcipher_walk_virt(desc, &walk);
384 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
386 kernel_fpu_begin();
387 while ((nbytes = walk.nbytes)) {
388 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389 nbytes & AES_BLOCK_MASK, walk.iv);
390 nbytes &= AES_BLOCK_SIZE - 1;
391 err = blkcipher_walk_done(desc, &walk, nbytes);
393 kernel_fpu_end();
395 return err;
398 static struct crypto_alg blk_cbc_alg = {
399 .cra_name = "__cbc-aes-aesni",
400 .cra_driver_name = "__driver-cbc-aes-aesni",
401 .cra_priority = 0,
402 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
403 .cra_blocksize = AES_BLOCK_SIZE,
404 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
405 .cra_alignmask = 0,
406 .cra_type = &crypto_blkcipher_type,
407 .cra_module = THIS_MODULE,
408 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
409 .cra_u = {
410 .blkcipher = {
411 .min_keysize = AES_MIN_KEY_SIZE,
412 .max_keysize = AES_MAX_KEY_SIZE,
413 .setkey = aes_set_key,
414 .encrypt = cbc_encrypt,
415 .decrypt = cbc_decrypt,
420 #ifdef CONFIG_X86_64
421 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
422 struct blkcipher_walk *walk)
424 u8 *ctrblk = walk->iv;
425 u8 keystream[AES_BLOCK_SIZE];
426 u8 *src = walk->src.virt.addr;
427 u8 *dst = walk->dst.virt.addr;
428 unsigned int nbytes = walk->nbytes;
430 aesni_enc(ctx, keystream, ctrblk);
431 crypto_xor(keystream, src, nbytes);
432 memcpy(dst, keystream, nbytes);
433 crypto_inc(ctrblk, AES_BLOCK_SIZE);
436 static int ctr_crypt(struct blkcipher_desc *desc,
437 struct scatterlist *dst, struct scatterlist *src,
438 unsigned int nbytes)
440 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
441 struct blkcipher_walk walk;
442 int err;
444 blkcipher_walk_init(&walk, dst, src, nbytes);
445 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
446 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
448 kernel_fpu_begin();
449 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
450 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
451 nbytes & AES_BLOCK_MASK, walk.iv);
452 nbytes &= AES_BLOCK_SIZE - 1;
453 err = blkcipher_walk_done(desc, &walk, nbytes);
455 if (walk.nbytes) {
456 ctr_crypt_final(ctx, &walk);
457 err = blkcipher_walk_done(desc, &walk, 0);
459 kernel_fpu_end();
461 return err;
464 static struct crypto_alg blk_ctr_alg = {
465 .cra_name = "__ctr-aes-aesni",
466 .cra_driver_name = "__driver-ctr-aes-aesni",
467 .cra_priority = 0,
468 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
469 .cra_blocksize = 1,
470 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
471 .cra_alignmask = 0,
472 .cra_type = &crypto_blkcipher_type,
473 .cra_module = THIS_MODULE,
474 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
475 .cra_u = {
476 .blkcipher = {
477 .min_keysize = AES_MIN_KEY_SIZE,
478 .max_keysize = AES_MAX_KEY_SIZE,
479 .ivsize = AES_BLOCK_SIZE,
480 .setkey = aes_set_key,
481 .encrypt = ctr_crypt,
482 .decrypt = ctr_crypt,
486 #endif
488 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
489 unsigned int key_len)
491 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
492 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
493 int err;
495 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
496 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
497 & CRYPTO_TFM_REQ_MASK);
498 err = crypto_ablkcipher_setkey(child, key, key_len);
499 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
500 & CRYPTO_TFM_RES_MASK);
501 return err;
504 static int ablk_encrypt(struct ablkcipher_request *req)
506 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
507 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
509 if (!irq_fpu_usable()) {
510 struct ablkcipher_request *cryptd_req =
511 ablkcipher_request_ctx(req);
512 memcpy(cryptd_req, req, sizeof(*req));
513 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
514 return crypto_ablkcipher_encrypt(cryptd_req);
515 } else {
516 struct blkcipher_desc desc;
517 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
518 desc.info = req->info;
519 desc.flags = 0;
520 return crypto_blkcipher_crt(desc.tfm)->encrypt(
521 &desc, req->dst, req->src, req->nbytes);
525 static int ablk_decrypt(struct ablkcipher_request *req)
527 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
528 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
530 if (!irq_fpu_usable()) {
531 struct ablkcipher_request *cryptd_req =
532 ablkcipher_request_ctx(req);
533 memcpy(cryptd_req, req, sizeof(*req));
534 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
535 return crypto_ablkcipher_decrypt(cryptd_req);
536 } else {
537 struct blkcipher_desc desc;
538 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
539 desc.info = req->info;
540 desc.flags = 0;
541 return crypto_blkcipher_crt(desc.tfm)->decrypt(
542 &desc, req->dst, req->src, req->nbytes);
546 static void ablk_exit(struct crypto_tfm *tfm)
548 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
550 cryptd_free_ablkcipher(ctx->cryptd_tfm);
553 static void ablk_init_common(struct crypto_tfm *tfm,
554 struct cryptd_ablkcipher *cryptd_tfm)
556 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
558 ctx->cryptd_tfm = cryptd_tfm;
559 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
560 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
563 static int ablk_ecb_init(struct crypto_tfm *tfm)
565 struct cryptd_ablkcipher *cryptd_tfm;
567 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
568 if (IS_ERR(cryptd_tfm))
569 return PTR_ERR(cryptd_tfm);
570 ablk_init_common(tfm, cryptd_tfm);
571 return 0;
574 static struct crypto_alg ablk_ecb_alg = {
575 .cra_name = "ecb(aes)",
576 .cra_driver_name = "ecb-aes-aesni",
577 .cra_priority = 400,
578 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
579 .cra_blocksize = AES_BLOCK_SIZE,
580 .cra_ctxsize = sizeof(struct async_aes_ctx),
581 .cra_alignmask = 0,
582 .cra_type = &crypto_ablkcipher_type,
583 .cra_module = THIS_MODULE,
584 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
585 .cra_init = ablk_ecb_init,
586 .cra_exit = ablk_exit,
587 .cra_u = {
588 .ablkcipher = {
589 .min_keysize = AES_MIN_KEY_SIZE,
590 .max_keysize = AES_MAX_KEY_SIZE,
591 .setkey = ablk_set_key,
592 .encrypt = ablk_encrypt,
593 .decrypt = ablk_decrypt,
598 static int ablk_cbc_init(struct crypto_tfm *tfm)
600 struct cryptd_ablkcipher *cryptd_tfm;
602 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
603 if (IS_ERR(cryptd_tfm))
604 return PTR_ERR(cryptd_tfm);
605 ablk_init_common(tfm, cryptd_tfm);
606 return 0;
609 static struct crypto_alg ablk_cbc_alg = {
610 .cra_name = "cbc(aes)",
611 .cra_driver_name = "cbc-aes-aesni",
612 .cra_priority = 400,
613 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
614 .cra_blocksize = AES_BLOCK_SIZE,
615 .cra_ctxsize = sizeof(struct async_aes_ctx),
616 .cra_alignmask = 0,
617 .cra_type = &crypto_ablkcipher_type,
618 .cra_module = THIS_MODULE,
619 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
620 .cra_init = ablk_cbc_init,
621 .cra_exit = ablk_exit,
622 .cra_u = {
623 .ablkcipher = {
624 .min_keysize = AES_MIN_KEY_SIZE,
625 .max_keysize = AES_MAX_KEY_SIZE,
626 .ivsize = AES_BLOCK_SIZE,
627 .setkey = ablk_set_key,
628 .encrypt = ablk_encrypt,
629 .decrypt = ablk_decrypt,
634 #ifdef CONFIG_X86_64
635 static int ablk_ctr_init(struct crypto_tfm *tfm)
637 struct cryptd_ablkcipher *cryptd_tfm;
639 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
640 if (IS_ERR(cryptd_tfm))
641 return PTR_ERR(cryptd_tfm);
642 ablk_init_common(tfm, cryptd_tfm);
643 return 0;
646 static struct crypto_alg ablk_ctr_alg = {
647 .cra_name = "ctr(aes)",
648 .cra_driver_name = "ctr-aes-aesni",
649 .cra_priority = 400,
650 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
651 .cra_blocksize = 1,
652 .cra_ctxsize = sizeof(struct async_aes_ctx),
653 .cra_alignmask = 0,
654 .cra_type = &crypto_ablkcipher_type,
655 .cra_module = THIS_MODULE,
656 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
657 .cra_init = ablk_ctr_init,
658 .cra_exit = ablk_exit,
659 .cra_u = {
660 .ablkcipher = {
661 .min_keysize = AES_MIN_KEY_SIZE,
662 .max_keysize = AES_MAX_KEY_SIZE,
663 .ivsize = AES_BLOCK_SIZE,
664 .setkey = ablk_set_key,
665 .encrypt = ablk_encrypt,
666 .decrypt = ablk_encrypt,
667 .geniv = "chainiv",
672 #ifdef HAS_CTR
673 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
675 struct cryptd_ablkcipher *cryptd_tfm;
677 cryptd_tfm = cryptd_alloc_ablkcipher(
678 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
679 if (IS_ERR(cryptd_tfm))
680 return PTR_ERR(cryptd_tfm);
681 ablk_init_common(tfm, cryptd_tfm);
682 return 0;
685 static struct crypto_alg ablk_rfc3686_ctr_alg = {
686 .cra_name = "rfc3686(ctr(aes))",
687 .cra_driver_name = "rfc3686-ctr-aes-aesni",
688 .cra_priority = 400,
689 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
690 .cra_blocksize = 1,
691 .cra_ctxsize = sizeof(struct async_aes_ctx),
692 .cra_alignmask = 0,
693 .cra_type = &crypto_ablkcipher_type,
694 .cra_module = THIS_MODULE,
695 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
696 .cra_init = ablk_rfc3686_ctr_init,
697 .cra_exit = ablk_exit,
698 .cra_u = {
699 .ablkcipher = {
700 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
701 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
702 .ivsize = CTR_RFC3686_IV_SIZE,
703 .setkey = ablk_set_key,
704 .encrypt = ablk_encrypt,
705 .decrypt = ablk_decrypt,
706 .geniv = "seqiv",
710 #endif
711 #endif
713 #ifdef HAS_LRW
714 static int ablk_lrw_init(struct crypto_tfm *tfm)
716 struct cryptd_ablkcipher *cryptd_tfm;
718 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
719 0, 0);
720 if (IS_ERR(cryptd_tfm))
721 return PTR_ERR(cryptd_tfm);
722 ablk_init_common(tfm, cryptd_tfm);
723 return 0;
726 static struct crypto_alg ablk_lrw_alg = {
727 .cra_name = "lrw(aes)",
728 .cra_driver_name = "lrw-aes-aesni",
729 .cra_priority = 400,
730 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
731 .cra_blocksize = AES_BLOCK_SIZE,
732 .cra_ctxsize = sizeof(struct async_aes_ctx),
733 .cra_alignmask = 0,
734 .cra_type = &crypto_ablkcipher_type,
735 .cra_module = THIS_MODULE,
736 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
737 .cra_init = ablk_lrw_init,
738 .cra_exit = ablk_exit,
739 .cra_u = {
740 .ablkcipher = {
741 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
742 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
743 .ivsize = AES_BLOCK_SIZE,
744 .setkey = ablk_set_key,
745 .encrypt = ablk_encrypt,
746 .decrypt = ablk_decrypt,
750 #endif
752 #ifdef HAS_PCBC
753 static int ablk_pcbc_init(struct crypto_tfm *tfm)
755 struct cryptd_ablkcipher *cryptd_tfm;
757 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
758 0, 0);
759 if (IS_ERR(cryptd_tfm))
760 return PTR_ERR(cryptd_tfm);
761 ablk_init_common(tfm, cryptd_tfm);
762 return 0;
765 static struct crypto_alg ablk_pcbc_alg = {
766 .cra_name = "pcbc(aes)",
767 .cra_driver_name = "pcbc-aes-aesni",
768 .cra_priority = 400,
769 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
770 .cra_blocksize = AES_BLOCK_SIZE,
771 .cra_ctxsize = sizeof(struct async_aes_ctx),
772 .cra_alignmask = 0,
773 .cra_type = &crypto_ablkcipher_type,
774 .cra_module = THIS_MODULE,
775 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
776 .cra_init = ablk_pcbc_init,
777 .cra_exit = ablk_exit,
778 .cra_u = {
779 .ablkcipher = {
780 .min_keysize = AES_MIN_KEY_SIZE,
781 .max_keysize = AES_MAX_KEY_SIZE,
782 .ivsize = AES_BLOCK_SIZE,
783 .setkey = ablk_set_key,
784 .encrypt = ablk_encrypt,
785 .decrypt = ablk_decrypt,
789 #endif
791 #ifdef HAS_XTS
792 static int ablk_xts_init(struct crypto_tfm *tfm)
794 struct cryptd_ablkcipher *cryptd_tfm;
796 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
797 0, 0);
798 if (IS_ERR(cryptd_tfm))
799 return PTR_ERR(cryptd_tfm);
800 ablk_init_common(tfm, cryptd_tfm);
801 return 0;
804 static struct crypto_alg ablk_xts_alg = {
805 .cra_name = "xts(aes)",
806 .cra_driver_name = "xts-aes-aesni",
807 .cra_priority = 400,
808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
809 .cra_blocksize = AES_BLOCK_SIZE,
810 .cra_ctxsize = sizeof(struct async_aes_ctx),
811 .cra_alignmask = 0,
812 .cra_type = &crypto_ablkcipher_type,
813 .cra_module = THIS_MODULE,
814 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
815 .cra_init = ablk_xts_init,
816 .cra_exit = ablk_exit,
817 .cra_u = {
818 .ablkcipher = {
819 .min_keysize = 2 * AES_MIN_KEY_SIZE,
820 .max_keysize = 2 * AES_MAX_KEY_SIZE,
821 .ivsize = AES_BLOCK_SIZE,
822 .setkey = ablk_set_key,
823 .encrypt = ablk_encrypt,
824 .decrypt = ablk_decrypt,
828 #endif
830 #ifdef CONFIG_X86_64
831 static int rfc4106_init(struct crypto_tfm *tfm)
833 struct cryptd_aead *cryptd_tfm;
834 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
835 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
836 struct crypto_aead *cryptd_child;
837 struct aesni_rfc4106_gcm_ctx *child_ctx;
838 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
839 if (IS_ERR(cryptd_tfm))
840 return PTR_ERR(cryptd_tfm);
842 cryptd_child = cryptd_aead_child(cryptd_tfm);
843 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
844 memcpy(child_ctx, ctx, sizeof(*ctx));
845 ctx->cryptd_tfm = cryptd_tfm;
846 tfm->crt_aead.reqsize = sizeof(struct aead_request)
847 + crypto_aead_reqsize(&cryptd_tfm->base);
848 return 0;
851 static void rfc4106_exit(struct crypto_tfm *tfm)
853 struct aesni_rfc4106_gcm_ctx *ctx =
854 (struct aesni_rfc4106_gcm_ctx *)
855 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
856 if (!IS_ERR(ctx->cryptd_tfm))
857 cryptd_free_aead(ctx->cryptd_tfm);
858 return;
861 static void
862 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
864 struct aesni_gcm_set_hash_subkey_result *result = req->data;
866 if (err == -EINPROGRESS)
867 return;
868 result->err = err;
869 complete(&result->completion);
872 static int
873 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
875 struct crypto_ablkcipher *ctr_tfm;
876 struct ablkcipher_request *req;
877 int ret = -EINVAL;
878 struct aesni_hash_subkey_req_data *req_data;
880 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
881 if (IS_ERR(ctr_tfm))
882 return PTR_ERR(ctr_tfm);
884 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
886 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
887 if (ret)
888 goto out_free_ablkcipher;
890 ret = -ENOMEM;
891 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
892 if (!req)
893 goto out_free_ablkcipher;
895 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
896 if (!req_data)
897 goto out_free_request;
899 memset(req_data->iv, 0, sizeof(req_data->iv));
901 /* Clear the data in the hash sub key container to zero.*/
902 /* We want to cipher all zeros to create the hash sub key. */
903 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
905 init_completion(&req_data->result.completion);
906 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
907 ablkcipher_request_set_tfm(req, ctr_tfm);
908 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
909 CRYPTO_TFM_REQ_MAY_BACKLOG,
910 rfc4106_set_hash_subkey_done,
911 &req_data->result);
913 ablkcipher_request_set_crypt(req, &req_data->sg,
914 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
916 ret = crypto_ablkcipher_encrypt(req);
917 if (ret == -EINPROGRESS || ret == -EBUSY) {
918 ret = wait_for_completion_interruptible
919 (&req_data->result.completion);
920 if (!ret)
921 ret = req_data->result.err;
923 kfree(req_data);
924 out_free_request:
925 ablkcipher_request_free(req);
926 out_free_ablkcipher:
927 crypto_free_ablkcipher(ctr_tfm);
928 return ret;
931 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
932 unsigned int key_len)
934 int ret = 0;
935 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
936 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
937 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
938 struct aesni_rfc4106_gcm_ctx *child_ctx =
939 aesni_rfc4106_gcm_ctx_get(cryptd_child);
940 u8 *new_key_mem = NULL;
942 if (key_len < 4) {
943 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
944 return -EINVAL;
946 /*Account for 4 byte nonce at the end.*/
947 key_len -= 4;
948 if (key_len != AES_KEYSIZE_128) {
949 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
950 return -EINVAL;
953 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
954 /*This must be on a 16 byte boundary!*/
955 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
956 return -EINVAL;
958 if ((unsigned long)key % AESNI_ALIGN) {
959 /*key is not aligned: use an auxuliar aligned pointer*/
960 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
961 if (!new_key_mem)
962 return -ENOMEM;
964 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
965 memcpy(new_key_mem, key, key_len);
966 key = new_key_mem;
969 if (!irq_fpu_usable())
970 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
971 key, key_len);
972 else {
973 kernel_fpu_begin();
974 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
975 kernel_fpu_end();
977 /*This must be on a 16 byte boundary!*/
978 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
979 ret = -EINVAL;
980 goto exit;
982 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
983 memcpy(child_ctx, ctx, sizeof(*ctx));
984 exit:
985 kfree(new_key_mem);
986 return ret;
989 /* This is the Integrity Check Value (aka the authentication tag length and can
990 * be 8, 12 or 16 bytes long. */
991 static int rfc4106_set_authsize(struct crypto_aead *parent,
992 unsigned int authsize)
994 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
995 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
997 switch (authsize) {
998 case 8:
999 case 12:
1000 case 16:
1001 break;
1002 default:
1003 return -EINVAL;
1005 crypto_aead_crt(parent)->authsize = authsize;
1006 crypto_aead_crt(cryptd_child)->authsize = authsize;
1007 return 0;
1010 static int rfc4106_encrypt(struct aead_request *req)
1012 int ret;
1013 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1014 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1016 if (!irq_fpu_usable()) {
1017 struct aead_request *cryptd_req =
1018 (struct aead_request *) aead_request_ctx(req);
1019 memcpy(cryptd_req, req, sizeof(*req));
1020 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1021 return crypto_aead_encrypt(cryptd_req);
1022 } else {
1023 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1024 kernel_fpu_begin();
1025 ret = cryptd_child->base.crt_aead.encrypt(req);
1026 kernel_fpu_end();
1027 return ret;
1031 static int rfc4106_decrypt(struct aead_request *req)
1033 int ret;
1034 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1037 if (!irq_fpu_usable()) {
1038 struct aead_request *cryptd_req =
1039 (struct aead_request *) aead_request_ctx(req);
1040 memcpy(cryptd_req, req, sizeof(*req));
1041 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1042 return crypto_aead_decrypt(cryptd_req);
1043 } else {
1044 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1045 kernel_fpu_begin();
1046 ret = cryptd_child->base.crt_aead.decrypt(req);
1047 kernel_fpu_end();
1048 return ret;
1052 static struct crypto_alg rfc4106_alg = {
1053 .cra_name = "rfc4106(gcm(aes))",
1054 .cra_driver_name = "rfc4106-gcm-aesni",
1055 .cra_priority = 400,
1056 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1057 .cra_blocksize = 1,
1058 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1059 .cra_alignmask = 0,
1060 .cra_type = &crypto_nivaead_type,
1061 .cra_module = THIS_MODULE,
1062 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1063 .cra_init = rfc4106_init,
1064 .cra_exit = rfc4106_exit,
1065 .cra_u = {
1066 .aead = {
1067 .setkey = rfc4106_set_key,
1068 .setauthsize = rfc4106_set_authsize,
1069 .encrypt = rfc4106_encrypt,
1070 .decrypt = rfc4106_decrypt,
1071 .geniv = "seqiv",
1072 .ivsize = 8,
1073 .maxauthsize = 16,
1078 static int __driver_rfc4106_encrypt(struct aead_request *req)
1080 u8 one_entry_in_sg = 0;
1081 u8 *src, *dst, *assoc;
1082 __be32 counter = cpu_to_be32(1);
1083 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1084 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1085 void *aes_ctx = &(ctx->aes_key_expanded);
1086 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1087 u8 iv_tab[16+AESNI_ALIGN];
1088 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1089 struct scatter_walk src_sg_walk;
1090 struct scatter_walk assoc_sg_walk;
1091 struct scatter_walk dst_sg_walk;
1092 unsigned int i;
1094 /* Assuming we are supporting rfc4106 64-bit extended */
1095 /* sequence numbers We need to have the AAD length equal */
1096 /* to 8 or 12 bytes */
1097 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1098 return -EINVAL;
1099 /* IV below built */
1100 for (i = 0; i < 4; i++)
1101 *(iv+i) = ctx->nonce[i];
1102 for (i = 0; i < 8; i++)
1103 *(iv+4+i) = req->iv[i];
1104 *((__be32 *)(iv+12)) = counter;
1106 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1107 one_entry_in_sg = 1;
1108 scatterwalk_start(&src_sg_walk, req->src);
1109 scatterwalk_start(&assoc_sg_walk, req->assoc);
1110 src = scatterwalk_map(&src_sg_walk, 0);
1111 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1112 dst = src;
1113 if (unlikely(req->src != req->dst)) {
1114 scatterwalk_start(&dst_sg_walk, req->dst);
1115 dst = scatterwalk_map(&dst_sg_walk, 0);
1118 } else {
1119 /* Allocate memory for src, dst, assoc */
1120 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1121 GFP_ATOMIC);
1122 if (unlikely(!src))
1123 return -ENOMEM;
1124 assoc = (src + req->cryptlen + auth_tag_len);
1125 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1126 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1127 req->assoclen, 0);
1128 dst = src;
1131 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1132 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1133 + ((unsigned long)req->cryptlen), auth_tag_len);
1135 /* The authTag (aka the Integrity Check Value) needs to be written
1136 * back to the packet. */
1137 if (one_entry_in_sg) {
1138 if (unlikely(req->src != req->dst)) {
1139 scatterwalk_unmap(dst, 0);
1140 scatterwalk_done(&dst_sg_walk, 0, 0);
1142 scatterwalk_unmap(src, 0);
1143 scatterwalk_unmap(assoc, 0);
1144 scatterwalk_done(&src_sg_walk, 0, 0);
1145 scatterwalk_done(&assoc_sg_walk, 0, 0);
1146 } else {
1147 scatterwalk_map_and_copy(dst, req->dst, 0,
1148 req->cryptlen + auth_tag_len, 1);
1149 kfree(src);
1151 return 0;
1154 static int __driver_rfc4106_decrypt(struct aead_request *req)
1156 u8 one_entry_in_sg = 0;
1157 u8 *src, *dst, *assoc;
1158 unsigned long tempCipherLen = 0;
1159 __be32 counter = cpu_to_be32(1);
1160 int retval = 0;
1161 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1162 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1163 void *aes_ctx = &(ctx->aes_key_expanded);
1164 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1165 u8 iv_and_authTag[32+AESNI_ALIGN];
1166 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1167 u8 *authTag = iv + 16;
1168 struct scatter_walk src_sg_walk;
1169 struct scatter_walk assoc_sg_walk;
1170 struct scatter_walk dst_sg_walk;
1171 unsigned int i;
1173 if (unlikely((req->cryptlen < auth_tag_len) ||
1174 (req->assoclen != 8 && req->assoclen != 12)))
1175 return -EINVAL;
1176 /* Assuming we are supporting rfc4106 64-bit extended */
1177 /* sequence numbers We need to have the AAD length */
1178 /* equal to 8 or 12 bytes */
1180 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1181 /* IV below built */
1182 for (i = 0; i < 4; i++)
1183 *(iv+i) = ctx->nonce[i];
1184 for (i = 0; i < 8; i++)
1185 *(iv+4+i) = req->iv[i];
1186 *((__be32 *)(iv+12)) = counter;
1188 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1189 one_entry_in_sg = 1;
1190 scatterwalk_start(&src_sg_walk, req->src);
1191 scatterwalk_start(&assoc_sg_walk, req->assoc);
1192 src = scatterwalk_map(&src_sg_walk, 0);
1193 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1194 dst = src;
1195 if (unlikely(req->src != req->dst)) {
1196 scatterwalk_start(&dst_sg_walk, req->dst);
1197 dst = scatterwalk_map(&dst_sg_walk, 0);
1200 } else {
1201 /* Allocate memory for src, dst, assoc */
1202 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1203 if (!src)
1204 return -ENOMEM;
1205 assoc = (src + req->cryptlen + auth_tag_len);
1206 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1207 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1208 req->assoclen, 0);
1209 dst = src;
1212 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1213 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1214 authTag, auth_tag_len);
1216 /* Compare generated tag with passed in tag. */
1217 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1218 -EBADMSG : 0;
1220 if (one_entry_in_sg) {
1221 if (unlikely(req->src != req->dst)) {
1222 scatterwalk_unmap(dst, 0);
1223 scatterwalk_done(&dst_sg_walk, 0, 0);
1225 scatterwalk_unmap(src, 0);
1226 scatterwalk_unmap(assoc, 0);
1227 scatterwalk_done(&src_sg_walk, 0, 0);
1228 scatterwalk_done(&assoc_sg_walk, 0, 0);
1229 } else {
1230 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1231 kfree(src);
1233 return retval;
1236 static struct crypto_alg __rfc4106_alg = {
1237 .cra_name = "__gcm-aes-aesni",
1238 .cra_driver_name = "__driver-gcm-aes-aesni",
1239 .cra_priority = 0,
1240 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1241 .cra_blocksize = 1,
1242 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1243 .cra_alignmask = 0,
1244 .cra_type = &crypto_aead_type,
1245 .cra_module = THIS_MODULE,
1246 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1247 .cra_u = {
1248 .aead = {
1249 .encrypt = __driver_rfc4106_encrypt,
1250 .decrypt = __driver_rfc4106_decrypt,
1254 #endif
1256 static int __init aesni_init(void)
1258 int err;
1260 if (!cpu_has_aes) {
1261 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1262 return -ENODEV;
1265 if ((err = crypto_fpu_init()))
1266 goto fpu_err;
1267 if ((err = crypto_register_alg(&aesni_alg)))
1268 goto aes_err;
1269 if ((err = crypto_register_alg(&__aesni_alg)))
1270 goto __aes_err;
1271 if ((err = crypto_register_alg(&blk_ecb_alg)))
1272 goto blk_ecb_err;
1273 if ((err = crypto_register_alg(&blk_cbc_alg)))
1274 goto blk_cbc_err;
1275 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1276 goto ablk_ecb_err;
1277 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1278 goto ablk_cbc_err;
1279 #ifdef CONFIG_X86_64
1280 if ((err = crypto_register_alg(&blk_ctr_alg)))
1281 goto blk_ctr_err;
1282 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1283 goto ablk_ctr_err;
1284 if ((err = crypto_register_alg(&__rfc4106_alg)))
1285 goto __aead_gcm_err;
1286 if ((err = crypto_register_alg(&rfc4106_alg)))
1287 goto aead_gcm_err;
1288 #ifdef HAS_CTR
1289 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1290 goto ablk_rfc3686_ctr_err;
1291 #endif
1292 #endif
1293 #ifdef HAS_LRW
1294 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1295 goto ablk_lrw_err;
1296 #endif
1297 #ifdef HAS_PCBC
1298 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1299 goto ablk_pcbc_err;
1300 #endif
1301 #ifdef HAS_XTS
1302 if ((err = crypto_register_alg(&ablk_xts_alg)))
1303 goto ablk_xts_err;
1304 #endif
1305 return err;
1307 #ifdef HAS_XTS
1308 ablk_xts_err:
1309 #endif
1310 #ifdef HAS_PCBC
1311 crypto_unregister_alg(&ablk_pcbc_alg);
1312 ablk_pcbc_err:
1313 #endif
1314 #ifdef HAS_LRW
1315 crypto_unregister_alg(&ablk_lrw_alg);
1316 ablk_lrw_err:
1317 #endif
1318 #ifdef CONFIG_X86_64
1319 #ifdef HAS_CTR
1320 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1321 ablk_rfc3686_ctr_err:
1322 #endif
1323 crypto_unregister_alg(&rfc4106_alg);
1324 aead_gcm_err:
1325 crypto_unregister_alg(&__rfc4106_alg);
1326 __aead_gcm_err:
1327 crypto_unregister_alg(&ablk_ctr_alg);
1328 ablk_ctr_err:
1329 crypto_unregister_alg(&blk_ctr_alg);
1330 blk_ctr_err:
1331 #endif
1332 crypto_unregister_alg(&ablk_cbc_alg);
1333 ablk_cbc_err:
1334 crypto_unregister_alg(&ablk_ecb_alg);
1335 ablk_ecb_err:
1336 crypto_unregister_alg(&blk_cbc_alg);
1337 blk_cbc_err:
1338 crypto_unregister_alg(&blk_ecb_alg);
1339 blk_ecb_err:
1340 crypto_unregister_alg(&__aesni_alg);
1341 __aes_err:
1342 crypto_unregister_alg(&aesni_alg);
1343 aes_err:
1344 fpu_err:
1345 return err;
1348 static void __exit aesni_exit(void)
1350 #ifdef HAS_XTS
1351 crypto_unregister_alg(&ablk_xts_alg);
1352 #endif
1353 #ifdef HAS_PCBC
1354 crypto_unregister_alg(&ablk_pcbc_alg);
1355 #endif
1356 #ifdef HAS_LRW
1357 crypto_unregister_alg(&ablk_lrw_alg);
1358 #endif
1359 #ifdef CONFIG_X86_64
1360 #ifdef HAS_CTR
1361 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1362 #endif
1363 crypto_unregister_alg(&rfc4106_alg);
1364 crypto_unregister_alg(&__rfc4106_alg);
1365 crypto_unregister_alg(&ablk_ctr_alg);
1366 crypto_unregister_alg(&blk_ctr_alg);
1367 #endif
1368 crypto_unregister_alg(&ablk_cbc_alg);
1369 crypto_unregister_alg(&ablk_ecb_alg);
1370 crypto_unregister_alg(&blk_cbc_alg);
1371 crypto_unregister_alg(&blk_ecb_alg);
1372 crypto_unregister_alg(&__aesni_alg);
1373 crypto_unregister_alg(&aesni_alg);
1375 crypto_fpu_exit();
1378 module_init(aesni_init);
1379 module_exit(aesni_exit);
1381 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1382 MODULE_LICENSE("GPL");
1383 MODULE_ALIAS("aes");