2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
32 #include <crypto/scatterwalk.h>
33 #include <crypto/internal/aead.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
37 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
41 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
45 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
49 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
53 struct async_aes_ctx
{
54 struct cryptd_ablkcipher
*cryptd_tfm
;
57 /* This data is stored at the end of the crypto_tfm struct.
58 * It's a type of per "session" data storage location.
59 * This needs to be 16 byte aligned.
61 struct aesni_rfc4106_gcm_ctx
{
63 struct crypto_aes_ctx aes_key_expanded
;
65 struct cryptd_aead
*cryptd_tfm
;
68 struct aesni_gcm_set_hash_subkey_result
{
70 struct completion completion
;
73 struct aesni_hash_subkey_req_data
{
75 struct aesni_gcm_set_hash_subkey_result result
;
76 struct scatterlist sg
;
79 #define AESNI_ALIGN (16)
80 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
81 #define RFC4106_HASH_SUBKEY_SIZE 16
83 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
84 unsigned int key_len
);
85 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
87 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
90 const u8
*in
, unsigned int len
);
91 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 const u8
*in
, unsigned int len
);
93 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
94 const u8
*in
, unsigned int len
, u8
*iv
);
95 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
102 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
103 const u8
*in
, unsigned int len
, u8
*iv
);
105 /* asmlinkage void aesni_gcm_enc()
106 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
108 * const u8 *in, Plaintext input
109 * unsigned long plaintext_len, Length of data in bytes for encryption.
110 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
111 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
112 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
113 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
114 * const u8 *aad, Additional Authentication Data (AAD)
115 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
116 * is going to be 8 or 12 bytes
117 * u8 *auth_tag, Authenticated Tag output.
118 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
119 * Valid values are 16 (most likely), 12 or 8.
121 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
122 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
123 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
124 u8
*auth_tag
, unsigned long auth_tag_len
);
126 /* asmlinkage void aesni_gcm_dec()
127 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
128 * u8 *out, Plaintext output. Decrypt in-place is allowed.
129 * const u8 *in, Ciphertext input
130 * unsigned long ciphertext_len, Length of data in bytes for decryption.
131 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
132 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
133 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
134 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
135 * const u8 *aad, Additional Authentication Data (AAD)
136 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
137 * to be 8 or 12 bytes
138 * u8 *auth_tag, Authenticated Tag output.
139 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
140 * Valid values are 16 (most likely), 12 or 8.
142 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
143 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
144 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
145 u8
*auth_tag
, unsigned long auth_tag_len
);
148 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
151 (struct aesni_rfc4106_gcm_ctx
*)
153 crypto_tfm_ctx(crypto_aead_tfm(tfm
)), AESNI_ALIGN
);
157 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
159 unsigned long addr
= (unsigned long)raw_ctx
;
160 unsigned long align
= AESNI_ALIGN
;
162 if (align
<= crypto_tfm_ctx_alignment())
164 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
167 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
168 const u8
*in_key
, unsigned int key_len
)
170 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
171 u32
*flags
= &tfm
->crt_flags
;
174 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
175 key_len
!= AES_KEYSIZE_256
) {
176 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
180 if (!irq_fpu_usable())
181 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
184 err
= aesni_set_key(ctx
, in_key
, key_len
);
191 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
192 unsigned int key_len
)
194 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
197 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
199 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
201 if (!irq_fpu_usable())
202 crypto_aes_encrypt_x86(ctx
, dst
, src
);
205 aesni_enc(ctx
, dst
, src
);
210 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
212 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
214 if (!irq_fpu_usable())
215 crypto_aes_decrypt_x86(ctx
, dst
, src
);
218 aesni_dec(ctx
, dst
, src
);
223 static struct crypto_alg aesni_alg
= {
225 .cra_driver_name
= "aes-aesni",
227 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
228 .cra_blocksize
= AES_BLOCK_SIZE
,
229 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
231 .cra_module
= THIS_MODULE
,
232 .cra_list
= LIST_HEAD_INIT(aesni_alg
.cra_list
),
235 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
236 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
237 .cia_setkey
= aes_set_key
,
238 .cia_encrypt
= aes_encrypt
,
239 .cia_decrypt
= aes_decrypt
244 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
246 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
248 aesni_enc(ctx
, dst
, src
);
251 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
253 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
255 aesni_dec(ctx
, dst
, src
);
258 static struct crypto_alg __aesni_alg
= {
259 .cra_name
= "__aes-aesni",
260 .cra_driver_name
= "__driver-aes-aesni",
262 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
263 .cra_blocksize
= AES_BLOCK_SIZE
,
264 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
266 .cra_module
= THIS_MODULE
,
267 .cra_list
= LIST_HEAD_INIT(__aesni_alg
.cra_list
),
270 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
271 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
272 .cia_setkey
= aes_set_key
,
273 .cia_encrypt
= __aes_encrypt
,
274 .cia_decrypt
= __aes_decrypt
279 static int ecb_encrypt(struct blkcipher_desc
*desc
,
280 struct scatterlist
*dst
, struct scatterlist
*src
,
283 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
284 struct blkcipher_walk walk
;
287 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
288 err
= blkcipher_walk_virt(desc
, &walk
);
289 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
292 while ((nbytes
= walk
.nbytes
)) {
293 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
294 nbytes
& AES_BLOCK_MASK
);
295 nbytes
&= AES_BLOCK_SIZE
- 1;
296 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
303 static int ecb_decrypt(struct blkcipher_desc
*desc
,
304 struct scatterlist
*dst
, struct scatterlist
*src
,
307 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
308 struct blkcipher_walk walk
;
311 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
312 err
= blkcipher_walk_virt(desc
, &walk
);
313 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
316 while ((nbytes
= walk
.nbytes
)) {
317 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
318 nbytes
& AES_BLOCK_MASK
);
319 nbytes
&= AES_BLOCK_SIZE
- 1;
320 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
327 static struct crypto_alg blk_ecb_alg
= {
328 .cra_name
= "__ecb-aes-aesni",
329 .cra_driver_name
= "__driver-ecb-aes-aesni",
331 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
332 .cra_blocksize
= AES_BLOCK_SIZE
,
333 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
335 .cra_type
= &crypto_blkcipher_type
,
336 .cra_module
= THIS_MODULE
,
337 .cra_list
= LIST_HEAD_INIT(blk_ecb_alg
.cra_list
),
340 .min_keysize
= AES_MIN_KEY_SIZE
,
341 .max_keysize
= AES_MAX_KEY_SIZE
,
342 .setkey
= aes_set_key
,
343 .encrypt
= ecb_encrypt
,
344 .decrypt
= ecb_decrypt
,
349 static int cbc_encrypt(struct blkcipher_desc
*desc
,
350 struct scatterlist
*dst
, struct scatterlist
*src
,
353 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
354 struct blkcipher_walk walk
;
357 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
358 err
= blkcipher_walk_virt(desc
, &walk
);
359 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
362 while ((nbytes
= walk
.nbytes
)) {
363 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
364 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
365 nbytes
&= AES_BLOCK_SIZE
- 1;
366 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
373 static int cbc_decrypt(struct blkcipher_desc
*desc
,
374 struct scatterlist
*dst
, struct scatterlist
*src
,
377 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
378 struct blkcipher_walk walk
;
381 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
382 err
= blkcipher_walk_virt(desc
, &walk
);
383 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
386 while ((nbytes
= walk
.nbytes
)) {
387 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
388 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
389 nbytes
&= AES_BLOCK_SIZE
- 1;
390 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
397 static struct crypto_alg blk_cbc_alg
= {
398 .cra_name
= "__cbc-aes-aesni",
399 .cra_driver_name
= "__driver-cbc-aes-aesni",
401 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
402 .cra_blocksize
= AES_BLOCK_SIZE
,
403 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
405 .cra_type
= &crypto_blkcipher_type
,
406 .cra_module
= THIS_MODULE
,
407 .cra_list
= LIST_HEAD_INIT(blk_cbc_alg
.cra_list
),
410 .min_keysize
= AES_MIN_KEY_SIZE
,
411 .max_keysize
= AES_MAX_KEY_SIZE
,
412 .setkey
= aes_set_key
,
413 .encrypt
= cbc_encrypt
,
414 .decrypt
= cbc_decrypt
,
420 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
421 struct blkcipher_walk
*walk
)
423 u8
*ctrblk
= walk
->iv
;
424 u8 keystream
[AES_BLOCK_SIZE
];
425 u8
*src
= walk
->src
.virt
.addr
;
426 u8
*dst
= walk
->dst
.virt
.addr
;
427 unsigned int nbytes
= walk
->nbytes
;
429 aesni_enc(ctx
, keystream
, ctrblk
);
430 crypto_xor(keystream
, src
, nbytes
);
431 memcpy(dst
, keystream
, nbytes
);
432 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
435 static int ctr_crypt(struct blkcipher_desc
*desc
,
436 struct scatterlist
*dst
, struct scatterlist
*src
,
439 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
440 struct blkcipher_walk walk
;
443 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
444 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
445 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
448 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
449 aesni_ctr_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
450 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
451 nbytes
&= AES_BLOCK_SIZE
- 1;
452 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
455 ctr_crypt_final(ctx
, &walk
);
456 err
= blkcipher_walk_done(desc
, &walk
, 0);
463 static struct crypto_alg blk_ctr_alg
= {
464 .cra_name
= "__ctr-aes-aesni",
465 .cra_driver_name
= "__driver-ctr-aes-aesni",
467 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
469 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
471 .cra_type
= &crypto_blkcipher_type
,
472 .cra_module
= THIS_MODULE
,
473 .cra_list
= LIST_HEAD_INIT(blk_ctr_alg
.cra_list
),
476 .min_keysize
= AES_MIN_KEY_SIZE
,
477 .max_keysize
= AES_MAX_KEY_SIZE
,
478 .ivsize
= AES_BLOCK_SIZE
,
479 .setkey
= aes_set_key
,
480 .encrypt
= ctr_crypt
,
481 .decrypt
= ctr_crypt
,
487 static int ablk_set_key(struct crypto_ablkcipher
*tfm
, const u8
*key
,
488 unsigned int key_len
)
490 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
491 struct crypto_ablkcipher
*child
= &ctx
->cryptd_tfm
->base
;
494 crypto_ablkcipher_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
495 crypto_ablkcipher_set_flags(child
, crypto_ablkcipher_get_flags(tfm
)
496 & CRYPTO_TFM_REQ_MASK
);
497 err
= crypto_ablkcipher_setkey(child
, key
, key_len
);
498 crypto_ablkcipher_set_flags(tfm
, crypto_ablkcipher_get_flags(child
)
499 & CRYPTO_TFM_RES_MASK
);
503 static int ablk_encrypt(struct ablkcipher_request
*req
)
505 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
506 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
508 if (!irq_fpu_usable()) {
509 struct ablkcipher_request
*cryptd_req
=
510 ablkcipher_request_ctx(req
);
511 memcpy(cryptd_req
, req
, sizeof(*req
));
512 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
513 return crypto_ablkcipher_encrypt(cryptd_req
);
515 struct blkcipher_desc desc
;
516 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
517 desc
.info
= req
->info
;
519 return crypto_blkcipher_crt(desc
.tfm
)->encrypt(
520 &desc
, req
->dst
, req
->src
, req
->nbytes
);
524 static int ablk_decrypt(struct ablkcipher_request
*req
)
526 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
527 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
529 if (!irq_fpu_usable()) {
530 struct ablkcipher_request
*cryptd_req
=
531 ablkcipher_request_ctx(req
);
532 memcpy(cryptd_req
, req
, sizeof(*req
));
533 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
534 return crypto_ablkcipher_decrypt(cryptd_req
);
536 struct blkcipher_desc desc
;
537 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
538 desc
.info
= req
->info
;
540 return crypto_blkcipher_crt(desc
.tfm
)->decrypt(
541 &desc
, req
->dst
, req
->src
, req
->nbytes
);
545 static void ablk_exit(struct crypto_tfm
*tfm
)
547 struct async_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
549 cryptd_free_ablkcipher(ctx
->cryptd_tfm
);
552 static void ablk_init_common(struct crypto_tfm
*tfm
,
553 struct cryptd_ablkcipher
*cryptd_tfm
)
555 struct async_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
557 ctx
->cryptd_tfm
= cryptd_tfm
;
558 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
559 crypto_ablkcipher_reqsize(&cryptd_tfm
->base
);
562 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
564 struct cryptd_ablkcipher
*cryptd_tfm
;
566 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
567 if (IS_ERR(cryptd_tfm
))
568 return PTR_ERR(cryptd_tfm
);
569 ablk_init_common(tfm
, cryptd_tfm
);
573 static struct crypto_alg ablk_ecb_alg
= {
574 .cra_name
= "ecb(aes)",
575 .cra_driver_name
= "ecb-aes-aesni",
577 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
578 .cra_blocksize
= AES_BLOCK_SIZE
,
579 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
581 .cra_type
= &crypto_ablkcipher_type
,
582 .cra_module
= THIS_MODULE
,
583 .cra_list
= LIST_HEAD_INIT(ablk_ecb_alg
.cra_list
),
584 .cra_init
= ablk_ecb_init
,
585 .cra_exit
= ablk_exit
,
588 .min_keysize
= AES_MIN_KEY_SIZE
,
589 .max_keysize
= AES_MAX_KEY_SIZE
,
590 .setkey
= ablk_set_key
,
591 .encrypt
= ablk_encrypt
,
592 .decrypt
= ablk_decrypt
,
597 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
599 struct cryptd_ablkcipher
*cryptd_tfm
;
601 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
602 if (IS_ERR(cryptd_tfm
))
603 return PTR_ERR(cryptd_tfm
);
604 ablk_init_common(tfm
, cryptd_tfm
);
608 static struct crypto_alg ablk_cbc_alg
= {
609 .cra_name
= "cbc(aes)",
610 .cra_driver_name
= "cbc-aes-aesni",
612 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
613 .cra_blocksize
= AES_BLOCK_SIZE
,
614 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
616 .cra_type
= &crypto_ablkcipher_type
,
617 .cra_module
= THIS_MODULE
,
618 .cra_list
= LIST_HEAD_INIT(ablk_cbc_alg
.cra_list
),
619 .cra_init
= ablk_cbc_init
,
620 .cra_exit
= ablk_exit
,
623 .min_keysize
= AES_MIN_KEY_SIZE
,
624 .max_keysize
= AES_MAX_KEY_SIZE
,
625 .ivsize
= AES_BLOCK_SIZE
,
626 .setkey
= ablk_set_key
,
627 .encrypt
= ablk_encrypt
,
628 .decrypt
= ablk_decrypt
,
634 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
636 struct cryptd_ablkcipher
*cryptd_tfm
;
638 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
639 if (IS_ERR(cryptd_tfm
))
640 return PTR_ERR(cryptd_tfm
);
641 ablk_init_common(tfm
, cryptd_tfm
);
645 static struct crypto_alg ablk_ctr_alg
= {
646 .cra_name
= "ctr(aes)",
647 .cra_driver_name
= "ctr-aes-aesni",
649 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
651 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
653 .cra_type
= &crypto_ablkcipher_type
,
654 .cra_module
= THIS_MODULE
,
655 .cra_list
= LIST_HEAD_INIT(ablk_ctr_alg
.cra_list
),
656 .cra_init
= ablk_ctr_init
,
657 .cra_exit
= ablk_exit
,
660 .min_keysize
= AES_MIN_KEY_SIZE
,
661 .max_keysize
= AES_MAX_KEY_SIZE
,
662 .ivsize
= AES_BLOCK_SIZE
,
663 .setkey
= ablk_set_key
,
664 .encrypt
= ablk_encrypt
,
665 .decrypt
= ablk_encrypt
,
672 static int ablk_rfc3686_ctr_init(struct crypto_tfm
*tfm
)
674 struct cryptd_ablkcipher
*cryptd_tfm
;
676 cryptd_tfm
= cryptd_alloc_ablkcipher(
677 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
678 if (IS_ERR(cryptd_tfm
))
679 return PTR_ERR(cryptd_tfm
);
680 ablk_init_common(tfm
, cryptd_tfm
);
684 static struct crypto_alg ablk_rfc3686_ctr_alg
= {
685 .cra_name
= "rfc3686(ctr(aes))",
686 .cra_driver_name
= "rfc3686-ctr-aes-aesni",
688 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
690 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
692 .cra_type
= &crypto_ablkcipher_type
,
693 .cra_module
= THIS_MODULE
,
694 .cra_list
= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg
.cra_list
),
695 .cra_init
= ablk_rfc3686_ctr_init
,
696 .cra_exit
= ablk_exit
,
699 .min_keysize
= AES_MIN_KEY_SIZE
+CTR_RFC3686_NONCE_SIZE
,
700 .max_keysize
= AES_MAX_KEY_SIZE
+CTR_RFC3686_NONCE_SIZE
,
701 .ivsize
= CTR_RFC3686_IV_SIZE
,
702 .setkey
= ablk_set_key
,
703 .encrypt
= ablk_encrypt
,
704 .decrypt
= ablk_decrypt
,
713 static int ablk_lrw_init(struct crypto_tfm
*tfm
)
715 struct cryptd_ablkcipher
*cryptd_tfm
;
717 cryptd_tfm
= cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
719 if (IS_ERR(cryptd_tfm
))
720 return PTR_ERR(cryptd_tfm
);
721 ablk_init_common(tfm
, cryptd_tfm
);
725 static struct crypto_alg ablk_lrw_alg
= {
726 .cra_name
= "lrw(aes)",
727 .cra_driver_name
= "lrw-aes-aesni",
729 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
730 .cra_blocksize
= AES_BLOCK_SIZE
,
731 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
733 .cra_type
= &crypto_ablkcipher_type
,
734 .cra_module
= THIS_MODULE
,
735 .cra_list
= LIST_HEAD_INIT(ablk_lrw_alg
.cra_list
),
736 .cra_init
= ablk_lrw_init
,
737 .cra_exit
= ablk_exit
,
740 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
741 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
742 .ivsize
= AES_BLOCK_SIZE
,
743 .setkey
= ablk_set_key
,
744 .encrypt
= ablk_encrypt
,
745 .decrypt
= ablk_decrypt
,
752 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
754 struct cryptd_ablkcipher
*cryptd_tfm
;
756 cryptd_tfm
= cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
758 if (IS_ERR(cryptd_tfm
))
759 return PTR_ERR(cryptd_tfm
);
760 ablk_init_common(tfm
, cryptd_tfm
);
764 static struct crypto_alg ablk_pcbc_alg
= {
765 .cra_name
= "pcbc(aes)",
766 .cra_driver_name
= "pcbc-aes-aesni",
768 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
769 .cra_blocksize
= AES_BLOCK_SIZE
,
770 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
772 .cra_type
= &crypto_ablkcipher_type
,
773 .cra_module
= THIS_MODULE
,
774 .cra_list
= LIST_HEAD_INIT(ablk_pcbc_alg
.cra_list
),
775 .cra_init
= ablk_pcbc_init
,
776 .cra_exit
= ablk_exit
,
779 .min_keysize
= AES_MIN_KEY_SIZE
,
780 .max_keysize
= AES_MAX_KEY_SIZE
,
781 .ivsize
= AES_BLOCK_SIZE
,
782 .setkey
= ablk_set_key
,
783 .encrypt
= ablk_encrypt
,
784 .decrypt
= ablk_decrypt
,
791 static int ablk_xts_init(struct crypto_tfm
*tfm
)
793 struct cryptd_ablkcipher
*cryptd_tfm
;
795 cryptd_tfm
= cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
797 if (IS_ERR(cryptd_tfm
))
798 return PTR_ERR(cryptd_tfm
);
799 ablk_init_common(tfm
, cryptd_tfm
);
803 static struct crypto_alg ablk_xts_alg
= {
804 .cra_name
= "xts(aes)",
805 .cra_driver_name
= "xts-aes-aesni",
807 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
808 .cra_blocksize
= AES_BLOCK_SIZE
,
809 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
811 .cra_type
= &crypto_ablkcipher_type
,
812 .cra_module
= THIS_MODULE
,
813 .cra_list
= LIST_HEAD_INIT(ablk_xts_alg
.cra_list
),
814 .cra_init
= ablk_xts_init
,
815 .cra_exit
= ablk_exit
,
818 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
819 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
820 .ivsize
= AES_BLOCK_SIZE
,
821 .setkey
= ablk_set_key
,
822 .encrypt
= ablk_encrypt
,
823 .decrypt
= ablk_decrypt
,
830 static int rfc4106_init(struct crypto_tfm
*tfm
)
832 struct cryptd_aead
*cryptd_tfm
;
833 struct aesni_rfc4106_gcm_ctx
*ctx
= (struct aesni_rfc4106_gcm_ctx
*)
834 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
835 struct crypto_aead
*cryptd_child
;
836 struct aesni_rfc4106_gcm_ctx
*child_ctx
;
837 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
838 if (IS_ERR(cryptd_tfm
))
839 return PTR_ERR(cryptd_tfm
);
841 cryptd_child
= cryptd_aead_child(cryptd_tfm
);
842 child_ctx
= aesni_rfc4106_gcm_ctx_get(cryptd_child
);
843 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
844 ctx
->cryptd_tfm
= cryptd_tfm
;
845 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
)
846 + crypto_aead_reqsize(&cryptd_tfm
->base
);
850 static void rfc4106_exit(struct crypto_tfm
*tfm
)
852 struct aesni_rfc4106_gcm_ctx
*ctx
=
853 (struct aesni_rfc4106_gcm_ctx
*)
854 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
855 if (!IS_ERR(ctx
->cryptd_tfm
))
856 cryptd_free_aead(ctx
->cryptd_tfm
);
861 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
863 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
865 if (err
== -EINPROGRESS
)
868 complete(&result
->completion
);
872 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
874 struct crypto_ablkcipher
*ctr_tfm
;
875 struct ablkcipher_request
*req
;
877 struct aesni_hash_subkey_req_data
*req_data
;
879 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
881 return PTR_ERR(ctr_tfm
);
883 crypto_ablkcipher_clear_flags(ctr_tfm
, ~0);
885 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
887 goto out_free_ablkcipher
;
890 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
892 goto out_free_ablkcipher
;
894 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
896 goto out_free_request
;
898 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
900 /* Clear the data in the hash sub key container to zero.*/
901 /* We want to cipher all zeros to create the hash sub key. */
902 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
904 init_completion(&req_data
->result
.completion
);
905 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
906 ablkcipher_request_set_tfm(req
, ctr_tfm
);
907 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
908 CRYPTO_TFM_REQ_MAY_BACKLOG
,
909 rfc4106_set_hash_subkey_done
,
912 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
913 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
915 ret
= crypto_ablkcipher_encrypt(req
);
916 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
917 ret
= wait_for_completion_interruptible
918 (&req_data
->result
.completion
);
920 ret
= req_data
->result
.err
;
924 ablkcipher_request_free(req
);
926 crypto_free_ablkcipher(ctr_tfm
);
930 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
931 unsigned int key_len
)
934 struct crypto_tfm
*tfm
= crypto_aead_tfm(parent
);
935 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
936 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
937 struct aesni_rfc4106_gcm_ctx
*child_ctx
=
938 aesni_rfc4106_gcm_ctx_get(cryptd_child
);
939 u8
*new_key_mem
= NULL
;
942 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
945 /*Account for 4 byte nonce at the end.*/
947 if (key_len
!= AES_KEYSIZE_128
) {
948 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
952 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
953 /*This must be on a 16 byte boundary!*/
954 if ((unsigned long)(&(ctx
->aes_key_expanded
.key_enc
[0])) % AESNI_ALIGN
)
957 if ((unsigned long)key
% AESNI_ALIGN
) {
958 /*key is not aligned: use an auxuliar aligned pointer*/
959 new_key_mem
= kmalloc(key_len
+AESNI_ALIGN
, GFP_KERNEL
);
963 new_key_mem
= PTR_ALIGN(new_key_mem
, AESNI_ALIGN
);
964 memcpy(new_key_mem
, key
, key_len
);
968 if (!irq_fpu_usable())
969 ret
= crypto_aes_expand_key(&(ctx
->aes_key_expanded
),
973 ret
= aesni_set_key(&(ctx
->aes_key_expanded
), key
, key_len
);
976 /*This must be on a 16 byte boundary!*/
977 if ((unsigned long)(&(ctx
->hash_subkey
[0])) % AESNI_ALIGN
) {
981 ret
= rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
982 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
988 /* This is the Integrity Check Value (aka the authentication tag length and can
989 * be 8, 12 or 16 bytes long. */
990 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
991 unsigned int authsize
)
993 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
994 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1004 crypto_aead_crt(parent
)->authsize
= authsize
;
1005 crypto_aead_crt(cryptd_child
)->authsize
= authsize
;
1009 static int rfc4106_encrypt(struct aead_request
*req
)
1012 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1013 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1015 if (!irq_fpu_usable()) {
1016 struct aead_request
*cryptd_req
=
1017 (struct aead_request
*) aead_request_ctx(req
);
1018 memcpy(cryptd_req
, req
, sizeof(*req
));
1019 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1020 return crypto_aead_encrypt(cryptd_req
);
1022 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1024 ret
= cryptd_child
->base
.crt_aead
.encrypt(req
);
1030 static int rfc4106_decrypt(struct aead_request
*req
)
1033 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1034 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1036 if (!irq_fpu_usable()) {
1037 struct aead_request
*cryptd_req
=
1038 (struct aead_request
*) aead_request_ctx(req
);
1039 memcpy(cryptd_req
, req
, sizeof(*req
));
1040 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1041 return crypto_aead_decrypt(cryptd_req
);
1043 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1045 ret
= cryptd_child
->base
.crt_aead
.decrypt(req
);
1051 static struct crypto_alg rfc4106_alg
= {
1052 .cra_name
= "rfc4106(gcm(aes))",
1053 .cra_driver_name
= "rfc4106-gcm-aesni",
1054 .cra_priority
= 400,
1055 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1057 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) + AESNI_ALIGN
,
1059 .cra_type
= &crypto_nivaead_type
,
1060 .cra_module
= THIS_MODULE
,
1061 .cra_list
= LIST_HEAD_INIT(rfc4106_alg
.cra_list
),
1062 .cra_init
= rfc4106_init
,
1063 .cra_exit
= rfc4106_exit
,
1066 .setkey
= rfc4106_set_key
,
1067 .setauthsize
= rfc4106_set_authsize
,
1068 .encrypt
= rfc4106_encrypt
,
1069 .decrypt
= rfc4106_decrypt
,
1077 static int __driver_rfc4106_encrypt(struct aead_request
*req
)
1079 u8 one_entry_in_sg
= 0;
1080 u8
*src
, *dst
, *assoc
;
1081 __be32 counter
= cpu_to_be32(1);
1082 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1083 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1084 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1085 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1086 u8 iv_tab
[16+AESNI_ALIGN
];
1087 u8
* iv
= (u8
*) PTR_ALIGN((u8
*)iv_tab
, AESNI_ALIGN
);
1088 struct scatter_walk src_sg_walk
;
1089 struct scatter_walk assoc_sg_walk
;
1090 struct scatter_walk dst_sg_walk
;
1093 /* Assuming we are supporting rfc4106 64-bit extended */
1094 /* sequence numbers We need to have the AAD length equal */
1095 /* to 8 or 12 bytes */
1096 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
1098 /* IV below built */
1099 for (i
= 0; i
< 4; i
++)
1100 *(iv
+i
) = ctx
->nonce
[i
];
1101 for (i
= 0; i
< 8; i
++)
1102 *(iv
+4+i
) = req
->iv
[i
];
1103 *((__be32
*)(iv
+12)) = counter
;
1105 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1106 one_entry_in_sg
= 1;
1107 scatterwalk_start(&src_sg_walk
, req
->src
);
1108 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1109 src
= scatterwalk_map(&src_sg_walk
, 0);
1110 assoc
= scatterwalk_map(&assoc_sg_walk
, 0);
1112 if (unlikely(req
->src
!= req
->dst
)) {
1113 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1114 dst
= scatterwalk_map(&dst_sg_walk
, 0);
1118 /* Allocate memory for src, dst, assoc */
1119 src
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
1123 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1124 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1125 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1130 aesni_gcm_enc(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
1131 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
1132 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
1134 /* The authTag (aka the Integrity Check Value) needs to be written
1135 * back to the packet. */
1136 if (one_entry_in_sg
) {
1137 if (unlikely(req
->src
!= req
->dst
)) {
1138 scatterwalk_unmap(dst
, 0);
1139 scatterwalk_done(&dst_sg_walk
, 0, 0);
1141 scatterwalk_unmap(src
, 0);
1142 scatterwalk_unmap(assoc
, 0);
1143 scatterwalk_done(&src_sg_walk
, 0, 0);
1144 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1146 scatterwalk_map_and_copy(dst
, req
->dst
, 0,
1147 req
->cryptlen
+ auth_tag_len
, 1);
1153 static int __driver_rfc4106_decrypt(struct aead_request
*req
)
1155 u8 one_entry_in_sg
= 0;
1156 u8
*src
, *dst
, *assoc
;
1157 unsigned long tempCipherLen
= 0;
1158 __be32 counter
= cpu_to_be32(1);
1160 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1161 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1162 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1163 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1164 u8 iv_and_authTag
[32+AESNI_ALIGN
];
1165 u8
*iv
= (u8
*) PTR_ALIGN((u8
*)iv_and_authTag
, AESNI_ALIGN
);
1166 u8
*authTag
= iv
+ 16;
1167 struct scatter_walk src_sg_walk
;
1168 struct scatter_walk assoc_sg_walk
;
1169 struct scatter_walk dst_sg_walk
;
1172 if (unlikely((req
->cryptlen
< auth_tag_len
) ||
1173 (req
->assoclen
!= 8 && req
->assoclen
!= 12)))
1175 /* Assuming we are supporting rfc4106 64-bit extended */
1176 /* sequence numbers We need to have the AAD length */
1177 /* equal to 8 or 12 bytes */
1179 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1180 /* IV below built */
1181 for (i
= 0; i
< 4; i
++)
1182 *(iv
+i
) = ctx
->nonce
[i
];
1183 for (i
= 0; i
< 8; i
++)
1184 *(iv
+4+i
) = req
->iv
[i
];
1185 *((__be32
*)(iv
+12)) = counter
;
1187 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1188 one_entry_in_sg
= 1;
1189 scatterwalk_start(&src_sg_walk
, req
->src
);
1190 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1191 src
= scatterwalk_map(&src_sg_walk
, 0);
1192 assoc
= scatterwalk_map(&assoc_sg_walk
, 0);
1194 if (unlikely(req
->src
!= req
->dst
)) {
1195 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1196 dst
= scatterwalk_map(&dst_sg_walk
, 0);
1200 /* Allocate memory for src, dst, assoc */
1201 src
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1204 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1205 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1206 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1211 aesni_gcm_dec(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1212 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
1213 authTag
, auth_tag_len
);
1215 /* Compare generated tag with passed in tag. */
1216 retval
= memcmp(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1219 if (one_entry_in_sg
) {
1220 if (unlikely(req
->src
!= req
->dst
)) {
1221 scatterwalk_unmap(dst
, 0);
1222 scatterwalk_done(&dst_sg_walk
, 0, 0);
1224 scatterwalk_unmap(src
, 0);
1225 scatterwalk_unmap(assoc
, 0);
1226 scatterwalk_done(&src_sg_walk
, 0, 0);
1227 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1229 scatterwalk_map_and_copy(dst
, req
->dst
, 0, req
->cryptlen
, 1);
1235 static struct crypto_alg __rfc4106_alg
= {
1236 .cra_name
= "__gcm-aes-aesni",
1237 .cra_driver_name
= "__driver-gcm-aes-aesni",
1239 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
1241 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) + AESNI_ALIGN
,
1243 .cra_type
= &crypto_aead_type
,
1244 .cra_module
= THIS_MODULE
,
1245 .cra_list
= LIST_HEAD_INIT(__rfc4106_alg
.cra_list
),
1248 .encrypt
= __driver_rfc4106_encrypt
,
1249 .decrypt
= __driver_rfc4106_decrypt
,
1255 static int __init
aesni_init(void)
1260 printk(KERN_INFO
"Intel AES-NI instructions are not detected.\n");
1264 if ((err
= crypto_fpu_init()))
1266 if ((err
= crypto_register_alg(&aesni_alg
)))
1268 if ((err
= crypto_register_alg(&__aesni_alg
)))
1270 if ((err
= crypto_register_alg(&blk_ecb_alg
)))
1272 if ((err
= crypto_register_alg(&blk_cbc_alg
)))
1274 if ((err
= crypto_register_alg(&ablk_ecb_alg
)))
1276 if ((err
= crypto_register_alg(&ablk_cbc_alg
)))
1278 #ifdef CONFIG_X86_64
1279 if ((err
= crypto_register_alg(&blk_ctr_alg
)))
1281 if ((err
= crypto_register_alg(&ablk_ctr_alg
)))
1283 if ((err
= crypto_register_alg(&__rfc4106_alg
)))
1284 goto __aead_gcm_err
;
1285 if ((err
= crypto_register_alg(&rfc4106_alg
)))
1288 if ((err
= crypto_register_alg(&ablk_rfc3686_ctr_alg
)))
1289 goto ablk_rfc3686_ctr_err
;
1293 if ((err
= crypto_register_alg(&ablk_lrw_alg
)))
1297 if ((err
= crypto_register_alg(&ablk_pcbc_alg
)))
1301 if ((err
= crypto_register_alg(&ablk_xts_alg
)))
1310 crypto_unregister_alg(&ablk_pcbc_alg
);
1314 crypto_unregister_alg(&ablk_lrw_alg
);
1317 #ifdef CONFIG_X86_64
1319 crypto_unregister_alg(&ablk_rfc3686_ctr_alg
);
1320 ablk_rfc3686_ctr_err
:
1322 crypto_unregister_alg(&rfc4106_alg
);
1324 crypto_unregister_alg(&__rfc4106_alg
);
1326 crypto_unregister_alg(&ablk_ctr_alg
);
1328 crypto_unregister_alg(&blk_ctr_alg
);
1331 crypto_unregister_alg(&ablk_cbc_alg
);
1333 crypto_unregister_alg(&ablk_ecb_alg
);
1335 crypto_unregister_alg(&blk_cbc_alg
);
1337 crypto_unregister_alg(&blk_ecb_alg
);
1339 crypto_unregister_alg(&__aesni_alg
);
1341 crypto_unregister_alg(&aesni_alg
);
1347 static void __exit
aesni_exit(void)
1350 crypto_unregister_alg(&ablk_xts_alg
);
1353 crypto_unregister_alg(&ablk_pcbc_alg
);
1356 crypto_unregister_alg(&ablk_lrw_alg
);
1358 #ifdef CONFIG_X86_64
1360 crypto_unregister_alg(&ablk_rfc3686_ctr_alg
);
1362 crypto_unregister_alg(&rfc4106_alg
);
1363 crypto_unregister_alg(&__rfc4106_alg
);
1364 crypto_unregister_alg(&ablk_ctr_alg
);
1365 crypto_unregister_alg(&blk_ctr_alg
);
1367 crypto_unregister_alg(&ablk_cbc_alg
);
1368 crypto_unregister_alg(&ablk_ecb_alg
);
1369 crypto_unregister_alg(&blk_cbc_alg
);
1370 crypto_unregister_alg(&blk_ecb_alg
);
1371 crypto_unregister_alg(&__aesni_alg
);
1372 crypto_unregister_alg(&aesni_alg
);
1377 module_init(aesni_init
);
1378 module_exit(aesni_exit
);
1380 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1381 MODULE_LICENSE("GPL");
1382 MODULE_ALIAS("aes");