2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
32 #include <crypto/scatterwalk.h>
33 #include <crypto/internal/aead.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
37 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
41 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
45 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
49 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
53 struct async_aes_ctx
{
54 struct cryptd_ablkcipher
*cryptd_tfm
;
57 /* This data is stored at the end of the crypto_tfm struct.
58 * It's a type of per "session" data storage location.
59 * This needs to be 16 byte aligned.
61 struct aesni_rfc4106_gcm_ctx
{
63 struct crypto_aes_ctx aes_key_expanded
;
65 struct cryptd_aead
*cryptd_tfm
;
68 struct aesni_gcm_set_hash_subkey_result
{
70 struct completion completion
;
73 struct aesni_hash_subkey_req_data
{
75 struct aesni_gcm_set_hash_subkey_result result
;
76 struct scatterlist sg
;
79 #define AESNI_ALIGN (16)
80 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
81 #define RFC4106_HASH_SUBKEY_SIZE 16
83 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
84 unsigned int key_len
);
85 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
87 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
90 const u8
*in
, unsigned int len
);
91 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 const u8
*in
, unsigned int len
);
93 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
94 const u8
*in
, unsigned int len
, u8
*iv
);
95 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
98 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
99 const u8
*in
, unsigned int len
, u8
*iv
);
101 /* asmlinkage void aesni_gcm_enc()
102 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
103 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
104 * const u8 *in, Plaintext input
105 * unsigned long plaintext_len, Length of data in bytes for encryption.
106 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
107 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
108 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
109 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
110 * const u8 *aad, Additional Authentication Data (AAD)
111 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
112 * is going to be 8 or 12 bytes
113 * u8 *auth_tag, Authenticated Tag output.
114 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
115 * Valid values are 16 (most likely), 12 or 8.
117 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
118 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
119 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
120 u8
*auth_tag
, unsigned long auth_tag_len
);
122 /* asmlinkage void aesni_gcm_dec()
123 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
124 * u8 *out, Plaintext output. Decrypt in-place is allowed.
125 * const u8 *in, Ciphertext input
126 * unsigned long ciphertext_len, Length of data in bytes for decryption.
127 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
128 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
129 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
130 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
131 * const u8 *aad, Additional Authentication Data (AAD)
132 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
133 * to be 8 or 12 bytes
134 * u8 *auth_tag, Authenticated Tag output.
135 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
136 * Valid values are 16 (most likely), 12 or 8.
138 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
139 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
140 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
141 u8
*auth_tag
, unsigned long auth_tag_len
);
144 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
147 (struct aesni_rfc4106_gcm_ctx
*)
149 crypto_tfm_ctx(crypto_aead_tfm(tfm
)), AESNI_ALIGN
);
153 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
155 unsigned long addr
= (unsigned long)raw_ctx
;
156 unsigned long align
= AESNI_ALIGN
;
158 if (align
<= crypto_tfm_ctx_alignment())
160 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
163 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
164 const u8
*in_key
, unsigned int key_len
)
166 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
167 u32
*flags
= &tfm
->crt_flags
;
170 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
171 key_len
!= AES_KEYSIZE_256
) {
172 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
176 if (!irq_fpu_usable())
177 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
180 err
= aesni_set_key(ctx
, in_key
, key_len
);
187 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
188 unsigned int key_len
)
190 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
193 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
195 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
197 if (!irq_fpu_usable())
198 crypto_aes_encrypt_x86(ctx
, dst
, src
);
201 aesni_enc(ctx
, dst
, src
);
206 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
208 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
210 if (!irq_fpu_usable())
211 crypto_aes_decrypt_x86(ctx
, dst
, src
);
214 aesni_dec(ctx
, dst
, src
);
219 static struct crypto_alg aesni_alg
= {
221 .cra_driver_name
= "aes-aesni",
223 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
224 .cra_blocksize
= AES_BLOCK_SIZE
,
225 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
227 .cra_module
= THIS_MODULE
,
228 .cra_list
= LIST_HEAD_INIT(aesni_alg
.cra_list
),
231 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
232 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
233 .cia_setkey
= aes_set_key
,
234 .cia_encrypt
= aes_encrypt
,
235 .cia_decrypt
= aes_decrypt
240 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
242 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
244 aesni_enc(ctx
, dst
, src
);
247 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
249 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
251 aesni_dec(ctx
, dst
, src
);
254 static struct crypto_alg __aesni_alg
= {
255 .cra_name
= "__aes-aesni",
256 .cra_driver_name
= "__driver-aes-aesni",
258 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
259 .cra_blocksize
= AES_BLOCK_SIZE
,
260 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
262 .cra_module
= THIS_MODULE
,
263 .cra_list
= LIST_HEAD_INIT(__aesni_alg
.cra_list
),
266 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
267 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
268 .cia_setkey
= aes_set_key
,
269 .cia_encrypt
= __aes_encrypt
,
270 .cia_decrypt
= __aes_decrypt
275 static int ecb_encrypt(struct blkcipher_desc
*desc
,
276 struct scatterlist
*dst
, struct scatterlist
*src
,
279 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
280 struct blkcipher_walk walk
;
283 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
284 err
= blkcipher_walk_virt(desc
, &walk
);
285 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
288 while ((nbytes
= walk
.nbytes
)) {
289 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
290 nbytes
& AES_BLOCK_MASK
);
291 nbytes
&= AES_BLOCK_SIZE
- 1;
292 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
299 static int ecb_decrypt(struct blkcipher_desc
*desc
,
300 struct scatterlist
*dst
, struct scatterlist
*src
,
303 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
304 struct blkcipher_walk walk
;
307 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
308 err
= blkcipher_walk_virt(desc
, &walk
);
309 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
312 while ((nbytes
= walk
.nbytes
)) {
313 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
314 nbytes
& AES_BLOCK_MASK
);
315 nbytes
&= AES_BLOCK_SIZE
- 1;
316 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
323 static struct crypto_alg blk_ecb_alg
= {
324 .cra_name
= "__ecb-aes-aesni",
325 .cra_driver_name
= "__driver-ecb-aes-aesni",
327 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
328 .cra_blocksize
= AES_BLOCK_SIZE
,
329 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
331 .cra_type
= &crypto_blkcipher_type
,
332 .cra_module
= THIS_MODULE
,
333 .cra_list
= LIST_HEAD_INIT(blk_ecb_alg
.cra_list
),
336 .min_keysize
= AES_MIN_KEY_SIZE
,
337 .max_keysize
= AES_MAX_KEY_SIZE
,
338 .setkey
= aes_set_key
,
339 .encrypt
= ecb_encrypt
,
340 .decrypt
= ecb_decrypt
,
345 static int cbc_encrypt(struct blkcipher_desc
*desc
,
346 struct scatterlist
*dst
, struct scatterlist
*src
,
349 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
350 struct blkcipher_walk walk
;
353 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
354 err
= blkcipher_walk_virt(desc
, &walk
);
355 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
358 while ((nbytes
= walk
.nbytes
)) {
359 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
360 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
361 nbytes
&= AES_BLOCK_SIZE
- 1;
362 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
369 static int cbc_decrypt(struct blkcipher_desc
*desc
,
370 struct scatterlist
*dst
, struct scatterlist
*src
,
373 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
374 struct blkcipher_walk walk
;
377 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
378 err
= blkcipher_walk_virt(desc
, &walk
);
379 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
382 while ((nbytes
= walk
.nbytes
)) {
383 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
384 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
385 nbytes
&= AES_BLOCK_SIZE
- 1;
386 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
393 static struct crypto_alg blk_cbc_alg
= {
394 .cra_name
= "__cbc-aes-aesni",
395 .cra_driver_name
= "__driver-cbc-aes-aesni",
397 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
398 .cra_blocksize
= AES_BLOCK_SIZE
,
399 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
401 .cra_type
= &crypto_blkcipher_type
,
402 .cra_module
= THIS_MODULE
,
403 .cra_list
= LIST_HEAD_INIT(blk_cbc_alg
.cra_list
),
406 .min_keysize
= AES_MIN_KEY_SIZE
,
407 .max_keysize
= AES_MAX_KEY_SIZE
,
408 .setkey
= aes_set_key
,
409 .encrypt
= cbc_encrypt
,
410 .decrypt
= cbc_decrypt
,
416 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
417 struct blkcipher_walk
*walk
)
419 u8
*ctrblk
= walk
->iv
;
420 u8 keystream
[AES_BLOCK_SIZE
];
421 u8
*src
= walk
->src
.virt
.addr
;
422 u8
*dst
= walk
->dst
.virt
.addr
;
423 unsigned int nbytes
= walk
->nbytes
;
425 aesni_enc(ctx
, keystream
, ctrblk
);
426 crypto_xor(keystream
, src
, nbytes
);
427 memcpy(dst
, keystream
, nbytes
);
428 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
431 static int ctr_crypt(struct blkcipher_desc
*desc
,
432 struct scatterlist
*dst
, struct scatterlist
*src
,
435 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
436 struct blkcipher_walk walk
;
439 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
440 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
441 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
444 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
445 aesni_ctr_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
446 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
447 nbytes
&= AES_BLOCK_SIZE
- 1;
448 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
451 ctr_crypt_final(ctx
, &walk
);
452 err
= blkcipher_walk_done(desc
, &walk
, 0);
459 static struct crypto_alg blk_ctr_alg
= {
460 .cra_name
= "__ctr-aes-aesni",
461 .cra_driver_name
= "__driver-ctr-aes-aesni",
463 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
465 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
)+AESNI_ALIGN
-1,
467 .cra_type
= &crypto_blkcipher_type
,
468 .cra_module
= THIS_MODULE
,
469 .cra_list
= LIST_HEAD_INIT(blk_ctr_alg
.cra_list
),
472 .min_keysize
= AES_MIN_KEY_SIZE
,
473 .max_keysize
= AES_MAX_KEY_SIZE
,
474 .ivsize
= AES_BLOCK_SIZE
,
475 .setkey
= aes_set_key
,
476 .encrypt
= ctr_crypt
,
477 .decrypt
= ctr_crypt
,
483 static int ablk_set_key(struct crypto_ablkcipher
*tfm
, const u8
*key
,
484 unsigned int key_len
)
486 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
487 struct crypto_ablkcipher
*child
= &ctx
->cryptd_tfm
->base
;
490 crypto_ablkcipher_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
491 crypto_ablkcipher_set_flags(child
, crypto_ablkcipher_get_flags(tfm
)
492 & CRYPTO_TFM_REQ_MASK
);
493 err
= crypto_ablkcipher_setkey(child
, key
, key_len
);
494 crypto_ablkcipher_set_flags(tfm
, crypto_ablkcipher_get_flags(child
)
495 & CRYPTO_TFM_RES_MASK
);
499 static int ablk_encrypt(struct ablkcipher_request
*req
)
501 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
502 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
504 if (!irq_fpu_usable()) {
505 struct ablkcipher_request
*cryptd_req
=
506 ablkcipher_request_ctx(req
);
507 memcpy(cryptd_req
, req
, sizeof(*req
));
508 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
509 return crypto_ablkcipher_encrypt(cryptd_req
);
511 struct blkcipher_desc desc
;
512 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
513 desc
.info
= req
->info
;
515 return crypto_blkcipher_crt(desc
.tfm
)->encrypt(
516 &desc
, req
->dst
, req
->src
, req
->nbytes
);
520 static int ablk_decrypt(struct ablkcipher_request
*req
)
522 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
523 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
525 if (!irq_fpu_usable()) {
526 struct ablkcipher_request
*cryptd_req
=
527 ablkcipher_request_ctx(req
);
528 memcpy(cryptd_req
, req
, sizeof(*req
));
529 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
530 return crypto_ablkcipher_decrypt(cryptd_req
);
532 struct blkcipher_desc desc
;
533 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
534 desc
.info
= req
->info
;
536 return crypto_blkcipher_crt(desc
.tfm
)->decrypt(
537 &desc
, req
->dst
, req
->src
, req
->nbytes
);
541 static void ablk_exit(struct crypto_tfm
*tfm
)
543 struct async_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
545 cryptd_free_ablkcipher(ctx
->cryptd_tfm
);
548 static void ablk_init_common(struct crypto_tfm
*tfm
,
549 struct cryptd_ablkcipher
*cryptd_tfm
)
551 struct async_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
553 ctx
->cryptd_tfm
= cryptd_tfm
;
554 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
555 crypto_ablkcipher_reqsize(&cryptd_tfm
->base
);
558 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
560 struct cryptd_ablkcipher
*cryptd_tfm
;
562 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
563 if (IS_ERR(cryptd_tfm
))
564 return PTR_ERR(cryptd_tfm
);
565 ablk_init_common(tfm
, cryptd_tfm
);
569 static struct crypto_alg ablk_ecb_alg
= {
570 .cra_name
= "ecb(aes)",
571 .cra_driver_name
= "ecb-aes-aesni",
573 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
574 .cra_blocksize
= AES_BLOCK_SIZE
,
575 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
577 .cra_type
= &crypto_ablkcipher_type
,
578 .cra_module
= THIS_MODULE
,
579 .cra_list
= LIST_HEAD_INIT(ablk_ecb_alg
.cra_list
),
580 .cra_init
= ablk_ecb_init
,
581 .cra_exit
= ablk_exit
,
584 .min_keysize
= AES_MIN_KEY_SIZE
,
585 .max_keysize
= AES_MAX_KEY_SIZE
,
586 .setkey
= ablk_set_key
,
587 .encrypt
= ablk_encrypt
,
588 .decrypt
= ablk_decrypt
,
593 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
595 struct cryptd_ablkcipher
*cryptd_tfm
;
597 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
598 if (IS_ERR(cryptd_tfm
))
599 return PTR_ERR(cryptd_tfm
);
600 ablk_init_common(tfm
, cryptd_tfm
);
604 static struct crypto_alg ablk_cbc_alg
= {
605 .cra_name
= "cbc(aes)",
606 .cra_driver_name
= "cbc-aes-aesni",
608 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
609 .cra_blocksize
= AES_BLOCK_SIZE
,
610 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
612 .cra_type
= &crypto_ablkcipher_type
,
613 .cra_module
= THIS_MODULE
,
614 .cra_list
= LIST_HEAD_INIT(ablk_cbc_alg
.cra_list
),
615 .cra_init
= ablk_cbc_init
,
616 .cra_exit
= ablk_exit
,
619 .min_keysize
= AES_MIN_KEY_SIZE
,
620 .max_keysize
= AES_MAX_KEY_SIZE
,
621 .ivsize
= AES_BLOCK_SIZE
,
622 .setkey
= ablk_set_key
,
623 .encrypt
= ablk_encrypt
,
624 .decrypt
= ablk_decrypt
,
630 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
632 struct cryptd_ablkcipher
*cryptd_tfm
;
634 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
635 if (IS_ERR(cryptd_tfm
))
636 return PTR_ERR(cryptd_tfm
);
637 ablk_init_common(tfm
, cryptd_tfm
);
641 static struct crypto_alg ablk_ctr_alg
= {
642 .cra_name
= "ctr(aes)",
643 .cra_driver_name
= "ctr-aes-aesni",
645 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
647 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
649 .cra_type
= &crypto_ablkcipher_type
,
650 .cra_module
= THIS_MODULE
,
651 .cra_list
= LIST_HEAD_INIT(ablk_ctr_alg
.cra_list
),
652 .cra_init
= ablk_ctr_init
,
653 .cra_exit
= ablk_exit
,
656 .min_keysize
= AES_MIN_KEY_SIZE
,
657 .max_keysize
= AES_MAX_KEY_SIZE
,
658 .ivsize
= AES_BLOCK_SIZE
,
659 .setkey
= ablk_set_key
,
660 .encrypt
= ablk_encrypt
,
661 .decrypt
= ablk_encrypt
,
668 static int ablk_rfc3686_ctr_init(struct crypto_tfm
*tfm
)
670 struct cryptd_ablkcipher
*cryptd_tfm
;
672 cryptd_tfm
= cryptd_alloc_ablkcipher(
673 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
674 if (IS_ERR(cryptd_tfm
))
675 return PTR_ERR(cryptd_tfm
);
676 ablk_init_common(tfm
, cryptd_tfm
);
680 static struct crypto_alg ablk_rfc3686_ctr_alg
= {
681 .cra_name
= "rfc3686(ctr(aes))",
682 .cra_driver_name
= "rfc3686-ctr-aes-aesni",
684 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
686 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
688 .cra_type
= &crypto_ablkcipher_type
,
689 .cra_module
= THIS_MODULE
,
690 .cra_list
= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg
.cra_list
),
691 .cra_init
= ablk_rfc3686_ctr_init
,
692 .cra_exit
= ablk_exit
,
695 .min_keysize
= AES_MIN_KEY_SIZE
+CTR_RFC3686_NONCE_SIZE
,
696 .max_keysize
= AES_MAX_KEY_SIZE
+CTR_RFC3686_NONCE_SIZE
,
697 .ivsize
= CTR_RFC3686_IV_SIZE
,
698 .setkey
= ablk_set_key
,
699 .encrypt
= ablk_encrypt
,
700 .decrypt
= ablk_decrypt
,
709 static int ablk_lrw_init(struct crypto_tfm
*tfm
)
711 struct cryptd_ablkcipher
*cryptd_tfm
;
713 cryptd_tfm
= cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
715 if (IS_ERR(cryptd_tfm
))
716 return PTR_ERR(cryptd_tfm
);
717 ablk_init_common(tfm
, cryptd_tfm
);
721 static struct crypto_alg ablk_lrw_alg
= {
722 .cra_name
= "lrw(aes)",
723 .cra_driver_name
= "lrw-aes-aesni",
725 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
726 .cra_blocksize
= AES_BLOCK_SIZE
,
727 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
729 .cra_type
= &crypto_ablkcipher_type
,
730 .cra_module
= THIS_MODULE
,
731 .cra_list
= LIST_HEAD_INIT(ablk_lrw_alg
.cra_list
),
732 .cra_init
= ablk_lrw_init
,
733 .cra_exit
= ablk_exit
,
736 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
737 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
738 .ivsize
= AES_BLOCK_SIZE
,
739 .setkey
= ablk_set_key
,
740 .encrypt
= ablk_encrypt
,
741 .decrypt
= ablk_decrypt
,
748 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
750 struct cryptd_ablkcipher
*cryptd_tfm
;
752 cryptd_tfm
= cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
754 if (IS_ERR(cryptd_tfm
))
755 return PTR_ERR(cryptd_tfm
);
756 ablk_init_common(tfm
, cryptd_tfm
);
760 static struct crypto_alg ablk_pcbc_alg
= {
761 .cra_name
= "pcbc(aes)",
762 .cra_driver_name
= "pcbc-aes-aesni",
764 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
765 .cra_blocksize
= AES_BLOCK_SIZE
,
766 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
768 .cra_type
= &crypto_ablkcipher_type
,
769 .cra_module
= THIS_MODULE
,
770 .cra_list
= LIST_HEAD_INIT(ablk_pcbc_alg
.cra_list
),
771 .cra_init
= ablk_pcbc_init
,
772 .cra_exit
= ablk_exit
,
775 .min_keysize
= AES_MIN_KEY_SIZE
,
776 .max_keysize
= AES_MAX_KEY_SIZE
,
777 .ivsize
= AES_BLOCK_SIZE
,
778 .setkey
= ablk_set_key
,
779 .encrypt
= ablk_encrypt
,
780 .decrypt
= ablk_decrypt
,
787 static int ablk_xts_init(struct crypto_tfm
*tfm
)
789 struct cryptd_ablkcipher
*cryptd_tfm
;
791 cryptd_tfm
= cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
793 if (IS_ERR(cryptd_tfm
))
794 return PTR_ERR(cryptd_tfm
);
795 ablk_init_common(tfm
, cryptd_tfm
);
799 static struct crypto_alg ablk_xts_alg
= {
800 .cra_name
= "xts(aes)",
801 .cra_driver_name
= "xts-aes-aesni",
803 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|CRYPTO_ALG_ASYNC
,
804 .cra_blocksize
= AES_BLOCK_SIZE
,
805 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
807 .cra_type
= &crypto_ablkcipher_type
,
808 .cra_module
= THIS_MODULE
,
809 .cra_list
= LIST_HEAD_INIT(ablk_xts_alg
.cra_list
),
810 .cra_init
= ablk_xts_init
,
811 .cra_exit
= ablk_exit
,
814 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
815 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
816 .ivsize
= AES_BLOCK_SIZE
,
817 .setkey
= ablk_set_key
,
818 .encrypt
= ablk_encrypt
,
819 .decrypt
= ablk_decrypt
,
826 static int rfc4106_init(struct crypto_tfm
*tfm
)
828 struct cryptd_aead
*cryptd_tfm
;
829 struct aesni_rfc4106_gcm_ctx
*ctx
= (struct aesni_rfc4106_gcm_ctx
*)
830 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
831 struct crypto_aead
*cryptd_child
;
832 struct aesni_rfc4106_gcm_ctx
*child_ctx
;
833 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
834 if (IS_ERR(cryptd_tfm
))
835 return PTR_ERR(cryptd_tfm
);
837 cryptd_child
= cryptd_aead_child(cryptd_tfm
);
838 child_ctx
= aesni_rfc4106_gcm_ctx_get(cryptd_child
);
839 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
840 ctx
->cryptd_tfm
= cryptd_tfm
;
841 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
)
842 + crypto_aead_reqsize(&cryptd_tfm
->base
);
846 static void rfc4106_exit(struct crypto_tfm
*tfm
)
848 struct aesni_rfc4106_gcm_ctx
*ctx
=
849 (struct aesni_rfc4106_gcm_ctx
*)
850 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
851 if (!IS_ERR(ctx
->cryptd_tfm
))
852 cryptd_free_aead(ctx
->cryptd_tfm
);
857 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
859 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
861 if (err
== -EINPROGRESS
)
864 complete(&result
->completion
);
868 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
870 struct crypto_ablkcipher
*ctr_tfm
;
871 struct ablkcipher_request
*req
;
873 struct aesni_hash_subkey_req_data
*req_data
;
875 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
877 return PTR_ERR(ctr_tfm
);
879 crypto_ablkcipher_clear_flags(ctr_tfm
, ~0);
881 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
883 goto out_free_ablkcipher
;
886 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
888 goto out_free_ablkcipher
;
890 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
892 goto out_free_request
;
894 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
896 /* Clear the data in the hash sub key container to zero.*/
897 /* We want to cipher all zeros to create the hash sub key. */
898 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
900 init_completion(&req_data
->result
.completion
);
901 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
902 ablkcipher_request_set_tfm(req
, ctr_tfm
);
903 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
904 CRYPTO_TFM_REQ_MAY_BACKLOG
,
905 rfc4106_set_hash_subkey_done
,
908 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
909 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
911 ret
= crypto_ablkcipher_encrypt(req
);
912 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
913 ret
= wait_for_completion_interruptible
914 (&req_data
->result
.completion
);
916 ret
= req_data
->result
.err
;
920 ablkcipher_request_free(req
);
922 crypto_free_ablkcipher(ctr_tfm
);
926 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
927 unsigned int key_len
)
930 struct crypto_tfm
*tfm
= crypto_aead_tfm(parent
);
931 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
932 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
933 struct aesni_rfc4106_gcm_ctx
*child_ctx
=
934 aesni_rfc4106_gcm_ctx_get(cryptd_child
);
935 u8
*new_key_mem
= NULL
;
938 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
941 /*Account for 4 byte nonce at the end.*/
943 if (key_len
!= AES_KEYSIZE_128
) {
944 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
948 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
949 /*This must be on a 16 byte boundary!*/
950 if ((unsigned long)(&(ctx
->aes_key_expanded
.key_enc
[0])) % AESNI_ALIGN
)
953 if ((unsigned long)key
% AESNI_ALIGN
) {
954 /*key is not aligned: use an auxuliar aligned pointer*/
955 new_key_mem
= kmalloc(key_len
+AESNI_ALIGN
, GFP_KERNEL
);
959 new_key_mem
= PTR_ALIGN(new_key_mem
, AESNI_ALIGN
);
960 memcpy(new_key_mem
, key
, key_len
);
964 if (!irq_fpu_usable())
965 ret
= crypto_aes_expand_key(&(ctx
->aes_key_expanded
),
969 ret
= aesni_set_key(&(ctx
->aes_key_expanded
), key
, key_len
);
972 /*This must be on a 16 byte boundary!*/
973 if ((unsigned long)(&(ctx
->hash_subkey
[0])) % AESNI_ALIGN
) {
977 ret
= rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
978 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
984 /* This is the Integrity Check Value (aka the authentication tag length and can
985 * be 8, 12 or 16 bytes long. */
986 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
987 unsigned int authsize
)
989 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
990 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1000 crypto_aead_crt(parent
)->authsize
= authsize
;
1001 crypto_aead_crt(cryptd_child
)->authsize
= authsize
;
1005 static int rfc4106_encrypt(struct aead_request
*req
)
1008 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1009 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1011 if (!irq_fpu_usable()) {
1012 struct aead_request
*cryptd_req
=
1013 (struct aead_request
*) aead_request_ctx(req
);
1014 memcpy(cryptd_req
, req
, sizeof(*req
));
1015 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1016 return crypto_aead_encrypt(cryptd_req
);
1018 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1020 ret
= cryptd_child
->base
.crt_aead
.encrypt(req
);
1026 static int rfc4106_decrypt(struct aead_request
*req
)
1029 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1030 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1032 if (!irq_fpu_usable()) {
1033 struct aead_request
*cryptd_req
=
1034 (struct aead_request
*) aead_request_ctx(req
);
1035 memcpy(cryptd_req
, req
, sizeof(*req
));
1036 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1037 return crypto_aead_decrypt(cryptd_req
);
1039 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1041 ret
= cryptd_child
->base
.crt_aead
.decrypt(req
);
1047 static struct crypto_alg rfc4106_alg
= {
1048 .cra_name
= "rfc4106(gcm(aes))",
1049 .cra_driver_name
= "rfc4106-gcm-aesni",
1050 .cra_priority
= 400,
1051 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1053 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) + AESNI_ALIGN
,
1055 .cra_type
= &crypto_nivaead_type
,
1056 .cra_module
= THIS_MODULE
,
1057 .cra_list
= LIST_HEAD_INIT(rfc4106_alg
.cra_list
),
1058 .cra_init
= rfc4106_init
,
1059 .cra_exit
= rfc4106_exit
,
1062 .setkey
= rfc4106_set_key
,
1063 .setauthsize
= rfc4106_set_authsize
,
1064 .encrypt
= rfc4106_encrypt
,
1065 .decrypt
= rfc4106_decrypt
,
1073 static int __driver_rfc4106_encrypt(struct aead_request
*req
)
1075 u8 one_entry_in_sg
= 0;
1076 u8
*src
, *dst
, *assoc
;
1077 __be32 counter
= cpu_to_be32(1);
1078 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1079 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1080 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1081 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1082 u8 iv_tab
[16+AESNI_ALIGN
];
1083 u8
* iv
= (u8
*) PTR_ALIGN((u8
*)iv_tab
, AESNI_ALIGN
);
1084 struct scatter_walk src_sg_walk
;
1085 struct scatter_walk assoc_sg_walk
;
1086 struct scatter_walk dst_sg_walk
;
1089 /* Assuming we are supporting rfc4106 64-bit extended */
1090 /* sequence numbers We need to have the AAD length equal */
1091 /* to 8 or 12 bytes */
1092 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
1094 /* IV below built */
1095 for (i
= 0; i
< 4; i
++)
1096 *(iv
+i
) = ctx
->nonce
[i
];
1097 for (i
= 0; i
< 8; i
++)
1098 *(iv
+4+i
) = req
->iv
[i
];
1099 *((__be32
*)(iv
+12)) = counter
;
1101 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1102 one_entry_in_sg
= 1;
1103 scatterwalk_start(&src_sg_walk
, req
->src
);
1104 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1105 src
= scatterwalk_map(&src_sg_walk
, 0);
1106 assoc
= scatterwalk_map(&assoc_sg_walk
, 0);
1108 if (unlikely(req
->src
!= req
->dst
)) {
1109 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1110 dst
= scatterwalk_map(&dst_sg_walk
, 0);
1114 /* Allocate memory for src, dst, assoc */
1115 src
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
1119 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1120 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1121 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1126 aesni_gcm_enc(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
1127 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
1128 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
1130 /* The authTag (aka the Integrity Check Value) needs to be written
1131 * back to the packet. */
1132 if (one_entry_in_sg
) {
1133 if (unlikely(req
->src
!= req
->dst
)) {
1134 scatterwalk_unmap(dst
, 0);
1135 scatterwalk_done(&dst_sg_walk
, 0, 0);
1137 scatterwalk_unmap(src
, 0);
1138 scatterwalk_unmap(assoc
, 0);
1139 scatterwalk_done(&src_sg_walk
, 0, 0);
1140 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1142 scatterwalk_map_and_copy(dst
, req
->dst
, 0,
1143 req
->cryptlen
+ auth_tag_len
, 1);
1149 static int __driver_rfc4106_decrypt(struct aead_request
*req
)
1151 u8 one_entry_in_sg
= 0;
1152 u8
*src
, *dst
, *assoc
;
1153 unsigned long tempCipherLen
= 0;
1154 __be32 counter
= cpu_to_be32(1);
1156 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1157 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1158 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1159 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1160 u8 iv_and_authTag
[32+AESNI_ALIGN
];
1161 u8
*iv
= (u8
*) PTR_ALIGN((u8
*)iv_and_authTag
, AESNI_ALIGN
);
1162 u8
*authTag
= iv
+ 16;
1163 struct scatter_walk src_sg_walk
;
1164 struct scatter_walk assoc_sg_walk
;
1165 struct scatter_walk dst_sg_walk
;
1168 if (unlikely((req
->cryptlen
< auth_tag_len
) ||
1169 (req
->assoclen
!= 8 && req
->assoclen
!= 12)))
1171 /* Assuming we are supporting rfc4106 64-bit extended */
1172 /* sequence numbers We need to have the AAD length */
1173 /* equal to 8 or 12 bytes */
1175 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1176 /* IV below built */
1177 for (i
= 0; i
< 4; i
++)
1178 *(iv
+i
) = ctx
->nonce
[i
];
1179 for (i
= 0; i
< 8; i
++)
1180 *(iv
+4+i
) = req
->iv
[i
];
1181 *((__be32
*)(iv
+12)) = counter
;
1183 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1184 one_entry_in_sg
= 1;
1185 scatterwalk_start(&src_sg_walk
, req
->src
);
1186 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1187 src
= scatterwalk_map(&src_sg_walk
, 0);
1188 assoc
= scatterwalk_map(&assoc_sg_walk
, 0);
1190 if (unlikely(req
->src
!= req
->dst
)) {
1191 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1192 dst
= scatterwalk_map(&dst_sg_walk
, 0);
1196 /* Allocate memory for src, dst, assoc */
1197 src
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1200 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1201 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1202 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1207 aesni_gcm_dec(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1208 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
1209 authTag
, auth_tag_len
);
1211 /* Compare generated tag with passed in tag. */
1212 retval
= memcmp(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1215 if (one_entry_in_sg
) {
1216 if (unlikely(req
->src
!= req
->dst
)) {
1217 scatterwalk_unmap(dst
, 0);
1218 scatterwalk_done(&dst_sg_walk
, 0, 0);
1220 scatterwalk_unmap(src
, 0);
1221 scatterwalk_unmap(assoc
, 0);
1222 scatterwalk_done(&src_sg_walk
, 0, 0);
1223 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1225 scatterwalk_map_and_copy(dst
, req
->dst
, 0, req
->cryptlen
, 1);
1231 static struct crypto_alg __rfc4106_alg
= {
1232 .cra_name
= "__gcm-aes-aesni",
1233 .cra_driver_name
= "__driver-gcm-aes-aesni",
1235 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
1237 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) + AESNI_ALIGN
,
1239 .cra_type
= &crypto_aead_type
,
1240 .cra_module
= THIS_MODULE
,
1241 .cra_list
= LIST_HEAD_INIT(__rfc4106_alg
.cra_list
),
1244 .encrypt
= __driver_rfc4106_encrypt
,
1245 .decrypt
= __driver_rfc4106_decrypt
,
1251 static int __init
aesni_init(void)
1256 printk(KERN_INFO
"Intel AES-NI instructions are not detected.\n");
1260 if ((err
= crypto_register_alg(&aesni_alg
)))
1262 if ((err
= crypto_register_alg(&__aesni_alg
)))
1264 if ((err
= crypto_register_alg(&blk_ecb_alg
)))
1266 if ((err
= crypto_register_alg(&blk_cbc_alg
)))
1268 if ((err
= crypto_register_alg(&ablk_ecb_alg
)))
1270 if ((err
= crypto_register_alg(&ablk_cbc_alg
)))
1272 #ifdef CONFIG_X86_64
1273 if ((err
= crypto_register_alg(&blk_ctr_alg
)))
1275 if ((err
= crypto_register_alg(&ablk_ctr_alg
)))
1277 if ((err
= crypto_register_alg(&__rfc4106_alg
)))
1278 goto __aead_gcm_err
;
1279 if ((err
= crypto_register_alg(&rfc4106_alg
)))
1282 if ((err
= crypto_register_alg(&ablk_rfc3686_ctr_alg
)))
1283 goto ablk_rfc3686_ctr_err
;
1287 if ((err
= crypto_register_alg(&ablk_lrw_alg
)))
1291 if ((err
= crypto_register_alg(&ablk_pcbc_alg
)))
1295 if ((err
= crypto_register_alg(&ablk_xts_alg
)))
1304 crypto_unregister_alg(&ablk_pcbc_alg
);
1308 crypto_unregister_alg(&ablk_lrw_alg
);
1311 #ifdef CONFIG_X86_64
1313 crypto_unregister_alg(&ablk_rfc3686_ctr_alg
);
1314 ablk_rfc3686_ctr_err
:
1316 crypto_unregister_alg(&rfc4106_alg
);
1318 crypto_unregister_alg(&__rfc4106_alg
);
1320 crypto_unregister_alg(&ablk_ctr_alg
);
1322 crypto_unregister_alg(&blk_ctr_alg
);
1325 crypto_unregister_alg(&ablk_cbc_alg
);
1327 crypto_unregister_alg(&ablk_ecb_alg
);
1329 crypto_unregister_alg(&blk_cbc_alg
);
1331 crypto_unregister_alg(&blk_ecb_alg
);
1333 crypto_unregister_alg(&__aesni_alg
);
1335 crypto_unregister_alg(&aesni_alg
);
1340 static void __exit
aesni_exit(void)
1343 crypto_unregister_alg(&ablk_xts_alg
);
1346 crypto_unregister_alg(&ablk_pcbc_alg
);
1349 crypto_unregister_alg(&ablk_lrw_alg
);
1351 #ifdef CONFIG_X86_64
1353 crypto_unregister_alg(&ablk_rfc3686_ctr_alg
);
1355 crypto_unregister_alg(&rfc4106_alg
);
1356 crypto_unregister_alg(&__rfc4106_alg
);
1357 crypto_unregister_alg(&ablk_ctr_alg
);
1358 crypto_unregister_alg(&blk_ctr_alg
);
1360 crypto_unregister_alg(&ablk_cbc_alg
);
1361 crypto_unregister_alg(&ablk_ecb_alg
);
1362 crypto_unregister_alg(&blk_cbc_alg
);
1363 crypto_unregister_alg(&blk_ecb_alg
);
1364 crypto_unregister_alg(&__aesni_alg
);
1365 crypto_unregister_alg(&aesni_alg
);
1368 module_init(aesni_init
);
1369 module_exit(aesni_exit
);
1371 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1372 MODULE_LICENSE("GPL");
1373 MODULE_ALIAS("aes");