2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
47 #define AESNI_ALIGN 16
48 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
51 /* This data is stored at the end of the crypto_tfm struct.
52 * It's a type of per "session" data storage location.
53 * This needs to be 16 byte aligned.
55 struct aesni_rfc4106_gcm_ctx
{
56 u8 hash_subkey
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
57 struct crypto_aes_ctx aes_key_expanded
58 __attribute__ ((__aligned__(AESNI_ALIGN
)));
62 struct aesni_gcm_set_hash_subkey_result
{
64 struct completion completion
;
67 struct aesni_hash_subkey_req_data
{
69 struct aesni_gcm_set_hash_subkey_result result
;
70 struct scatterlist sg
;
73 struct aesni_lrw_ctx
{
74 struct lrw_table_ctx lrw_table
;
75 u8 raw_aes_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
78 struct aesni_xts_ctx
{
79 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
80 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
83 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
84 unsigned int key_len
);
85 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
87 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
90 const u8
*in
, unsigned int len
);
91 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 const u8
*in
, unsigned int len
);
93 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
94 const u8
*in
, unsigned int len
, u8
*iv
);
95 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
106 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
107 const u8
*in
, unsigned int len
, u8
*iv
);
108 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
109 const u8
*in
, unsigned int len
, u8
*iv
);
111 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
112 const u8
*in
, bool enc
, u8
*iv
);
114 /* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
130 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
131 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
132 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
133 u8
*auth_tag
, unsigned long auth_tag_len
);
135 /* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
151 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
152 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
153 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
154 u8
*auth_tag
, unsigned long auth_tag_len
);
158 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
159 void *keys
, u8
*out
, unsigned int num_bytes
);
160 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
161 void *keys
, u8
*out
, unsigned int num_bytes
);
162 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
163 void *keys
, u8
*out
, unsigned int num_bytes
);
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
169 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
171 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
172 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
173 const u8
*aad
, unsigned long aad_len
,
174 u8
*auth_tag
, unsigned long auth_tag_len
);
176 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
177 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
178 const u8
*aad
, unsigned long aad_len
,
179 u8
*auth_tag
, unsigned long auth_tag_len
);
181 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
182 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
183 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
184 u8
*auth_tag
, unsigned long auth_tag_len
)
186 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
187 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)){
188 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
189 aad_len
, auth_tag
, auth_tag_len
);
191 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
192 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
193 aad_len
, auth_tag
, auth_tag_len
);
197 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
198 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
199 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
200 u8
*auth_tag
, unsigned long auth_tag_len
)
202 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
203 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
204 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
205 aad_len
, auth_tag
, auth_tag_len
);
207 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
208 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
209 aad_len
, auth_tag
, auth_tag_len
);
214 #ifdef CONFIG_AS_AVX2
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
220 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
222 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
223 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
224 const u8
*aad
, unsigned long aad_len
,
225 u8
*auth_tag
, unsigned long auth_tag_len
);
227 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
228 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
229 const u8
*aad
, unsigned long aad_len
,
230 u8
*auth_tag
, unsigned long auth_tag_len
);
232 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
233 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
234 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
235 u8
*auth_tag
, unsigned long auth_tag_len
)
237 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
238 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
239 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
240 aad_len
, auth_tag
, auth_tag_len
);
241 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
242 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
243 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
244 aad_len
, auth_tag
, auth_tag_len
);
246 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
247 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
248 aad_len
, auth_tag
, auth_tag_len
);
252 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
253 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
254 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
255 u8
*auth_tag
, unsigned long auth_tag_len
)
257 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
258 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
259 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
260 aad
, aad_len
, auth_tag
, auth_tag_len
);
261 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
262 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
263 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
264 aad_len
, auth_tag
, auth_tag_len
);
266 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
267 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
268 aad_len
, auth_tag
, auth_tag_len
);
273 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
274 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
275 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
276 u8
*auth_tag
, unsigned long auth_tag_len
);
278 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
279 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
280 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
281 u8
*auth_tag
, unsigned long auth_tag_len
);
284 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
286 unsigned long align
= AESNI_ALIGN
;
288 if (align
<= crypto_tfm_ctx_alignment())
290 return PTR_ALIGN(crypto_aead_ctx(tfm
), align
);
294 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
296 unsigned long addr
= (unsigned long)raw_ctx
;
297 unsigned long align
= AESNI_ALIGN
;
299 if (align
<= crypto_tfm_ctx_alignment())
301 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
304 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
305 const u8
*in_key
, unsigned int key_len
)
307 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
308 u32
*flags
= &tfm
->crt_flags
;
311 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
312 key_len
!= AES_KEYSIZE_256
) {
313 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
317 if (!irq_fpu_usable())
318 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
321 err
= aesni_set_key(ctx
, in_key
, key_len
);
328 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
329 unsigned int key_len
)
331 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
334 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
336 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
338 if (!irq_fpu_usable())
339 crypto_aes_encrypt_x86(ctx
, dst
, src
);
342 aesni_enc(ctx
, dst
, src
);
347 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
349 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
351 if (!irq_fpu_usable())
352 crypto_aes_decrypt_x86(ctx
, dst
, src
);
355 aesni_dec(ctx
, dst
, src
);
360 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
362 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
364 aesni_enc(ctx
, dst
, src
);
367 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
369 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
371 aesni_dec(ctx
, dst
, src
);
374 static int ecb_encrypt(struct blkcipher_desc
*desc
,
375 struct scatterlist
*dst
, struct scatterlist
*src
,
378 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
379 struct blkcipher_walk walk
;
382 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
383 err
= blkcipher_walk_virt(desc
, &walk
);
384 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
387 while ((nbytes
= walk
.nbytes
)) {
388 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
389 nbytes
& AES_BLOCK_MASK
);
390 nbytes
&= AES_BLOCK_SIZE
- 1;
391 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
398 static int ecb_decrypt(struct blkcipher_desc
*desc
,
399 struct scatterlist
*dst
, struct scatterlist
*src
,
402 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
403 struct blkcipher_walk walk
;
406 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
407 err
= blkcipher_walk_virt(desc
, &walk
);
408 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
411 while ((nbytes
= walk
.nbytes
)) {
412 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
413 nbytes
& AES_BLOCK_MASK
);
414 nbytes
&= AES_BLOCK_SIZE
- 1;
415 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
422 static int cbc_encrypt(struct blkcipher_desc
*desc
,
423 struct scatterlist
*dst
, struct scatterlist
*src
,
426 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
427 struct blkcipher_walk walk
;
430 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
431 err
= blkcipher_walk_virt(desc
, &walk
);
432 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
435 while ((nbytes
= walk
.nbytes
)) {
436 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
437 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
438 nbytes
&= AES_BLOCK_SIZE
- 1;
439 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
446 static int cbc_decrypt(struct blkcipher_desc
*desc
,
447 struct scatterlist
*dst
, struct scatterlist
*src
,
450 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
451 struct blkcipher_walk walk
;
454 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
455 err
= blkcipher_walk_virt(desc
, &walk
);
456 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
459 while ((nbytes
= walk
.nbytes
)) {
460 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
461 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
462 nbytes
&= AES_BLOCK_SIZE
- 1;
463 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
471 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
472 struct blkcipher_walk
*walk
)
474 u8
*ctrblk
= walk
->iv
;
475 u8 keystream
[AES_BLOCK_SIZE
];
476 u8
*src
= walk
->src
.virt
.addr
;
477 u8
*dst
= walk
->dst
.virt
.addr
;
478 unsigned int nbytes
= walk
->nbytes
;
480 aesni_enc(ctx
, keystream
, ctrblk
);
481 crypto_xor(keystream
, src
, nbytes
);
482 memcpy(dst
, keystream
, nbytes
);
483 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
488 const u8
*in
, unsigned int len
, u8
*iv
)
491 * based on key length, override with the by8 version
492 * of ctr mode encryption/decryption for improved performance
493 * aes_set_key_common() ensures that key length is one of
496 if (ctx
->key_length
== AES_KEYSIZE_128
)
497 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
498 else if (ctx
->key_length
== AES_KEYSIZE_192
)
499 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
501 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
505 static int ctr_crypt(struct blkcipher_desc
*desc
,
506 struct scatterlist
*dst
, struct scatterlist
*src
,
509 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
510 struct blkcipher_walk walk
;
513 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
514 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
515 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
518 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
519 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
520 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
521 nbytes
&= AES_BLOCK_SIZE
- 1;
522 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
525 ctr_crypt_final(ctx
, &walk
);
526 err
= blkcipher_walk_done(desc
, &walk
, 0);
534 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
536 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
539 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
541 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
545 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
547 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
552 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
553 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
555 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
559 static void lrw_xts_encrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
561 aesni_ecb_enc(ctx
, blks
, blks
, nbytes
);
564 static void lrw_xts_decrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
566 aesni_ecb_dec(ctx
, blks
, blks
, nbytes
);
569 static int lrw_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
572 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
575 err
= aes_set_key_common(tfm
, ctx
->raw_aes_ctx
, key
,
576 keylen
- AES_BLOCK_SIZE
);
580 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
- AES_BLOCK_SIZE
);
583 static void lrw_aesni_exit_tfm(struct crypto_tfm
*tfm
)
585 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
587 lrw_free_table(&ctx
->lrw_table
);
590 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
591 struct scatterlist
*src
, unsigned int nbytes
)
593 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
595 struct lrw_crypt_req req
= {
597 .tbuflen
= sizeof(buf
),
599 .table_ctx
= &ctx
->lrw_table
,
600 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
601 .crypt_fn
= lrw_xts_encrypt_callback
,
605 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
608 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
614 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
615 struct scatterlist
*src
, unsigned int nbytes
)
617 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
619 struct lrw_crypt_req req
= {
621 .tbuflen
= sizeof(buf
),
623 .table_ctx
= &ctx
->lrw_table
,
624 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
625 .crypt_fn
= lrw_xts_decrypt_callback
,
629 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
632 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
638 static int xts_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
641 struct aesni_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
644 err
= xts_check_key(tfm
, key
, keylen
);
648 /* first half of xts-key is for crypt */
649 err
= aes_set_key_common(tfm
, ctx
->raw_crypt_ctx
, key
, keylen
/ 2);
653 /* second half of xts-key is for tweak */
654 return aes_set_key_common(tfm
, ctx
->raw_tweak_ctx
, key
+ keylen
/ 2,
659 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
661 aesni_enc(ctx
, out
, in
);
666 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
668 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
671 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
673 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
676 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
678 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
681 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
683 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
686 static const struct common_glue_ctx aesni_enc_xts
= {
688 .fpu_blocks_limit
= 1,
692 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
695 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
699 static const struct common_glue_ctx aesni_dec_xts
= {
701 .fpu_blocks_limit
= 1,
705 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
708 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
712 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
713 struct scatterlist
*src
, unsigned int nbytes
)
715 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
717 return glue_xts_crypt_128bit(&aesni_enc_xts
, desc
, dst
, src
, nbytes
,
718 XTS_TWEAK_CAST(aesni_xts_tweak
),
719 aes_ctx(ctx
->raw_tweak_ctx
),
720 aes_ctx(ctx
->raw_crypt_ctx
));
723 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
724 struct scatterlist
*src
, unsigned int nbytes
)
726 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
728 return glue_xts_crypt_128bit(&aesni_dec_xts
, desc
, dst
, src
, nbytes
,
729 XTS_TWEAK_CAST(aesni_xts_tweak
),
730 aes_ctx(ctx
->raw_tweak_ctx
),
731 aes_ctx(ctx
->raw_crypt_ctx
));
736 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
737 struct scatterlist
*src
, unsigned int nbytes
)
739 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
741 struct xts_crypt_req req
= {
743 .tbuflen
= sizeof(buf
),
745 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
746 .tweak_fn
= aesni_xts_tweak
,
747 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
748 .crypt_fn
= lrw_xts_encrypt_callback
,
752 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
755 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
761 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
762 struct scatterlist
*src
, unsigned int nbytes
)
764 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
766 struct xts_crypt_req req
= {
768 .tbuflen
= sizeof(buf
),
770 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
771 .tweak_fn
= aesni_xts_tweak
,
772 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
773 .crypt_fn
= lrw_xts_decrypt_callback
,
777 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
780 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
789 static int rfc4106_init(struct crypto_aead
*aead
)
791 struct cryptd_aead
*cryptd_tfm
;
792 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
794 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni",
796 CRYPTO_ALG_INTERNAL
);
797 if (IS_ERR(cryptd_tfm
))
798 return PTR_ERR(cryptd_tfm
);
801 crypto_aead_set_reqsize(aead
, crypto_aead_reqsize(&cryptd_tfm
->base
));
805 static void rfc4106_exit(struct crypto_aead
*aead
)
807 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
809 cryptd_free_aead(*ctx
);
813 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
815 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
817 if (err
== -EINPROGRESS
)
820 complete(&result
->completion
);
824 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
826 struct crypto_ablkcipher
*ctr_tfm
;
827 struct ablkcipher_request
*req
;
829 struct aesni_hash_subkey_req_data
*req_data
;
831 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
833 return PTR_ERR(ctr_tfm
);
835 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
837 goto out_free_ablkcipher
;
840 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
842 goto out_free_ablkcipher
;
844 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
846 goto out_free_request
;
848 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
850 /* Clear the data in the hash sub key container to zero.*/
851 /* We want to cipher all zeros to create the hash sub key. */
852 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
854 init_completion(&req_data
->result
.completion
);
855 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
856 ablkcipher_request_set_tfm(req
, ctr_tfm
);
857 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
858 CRYPTO_TFM_REQ_MAY_BACKLOG
,
859 rfc4106_set_hash_subkey_done
,
862 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
863 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
865 ret
= crypto_ablkcipher_encrypt(req
);
866 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
867 ret
= wait_for_completion_interruptible
868 (&req_data
->result
.completion
);
870 ret
= req_data
->result
.err
;
874 ablkcipher_request_free(req
);
876 crypto_free_ablkcipher(ctr_tfm
);
880 static int common_rfc4106_set_key(struct crypto_aead
*aead
, const u8
*key
,
881 unsigned int key_len
)
883 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(aead
);
886 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
889 /*Account for 4 byte nonce at the end.*/
892 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
894 return aes_set_key_common(crypto_aead_tfm(aead
),
895 &ctx
->aes_key_expanded
, key
, key_len
) ?:
896 rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
899 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
900 unsigned int key_len
)
902 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
903 struct cryptd_aead
*cryptd_tfm
= *ctx
;
905 return crypto_aead_setkey(&cryptd_tfm
->base
, key
, key_len
);
908 static int common_rfc4106_set_authsize(struct crypto_aead
*aead
,
909 unsigned int authsize
)
923 /* This is the Integrity Check Value (aka the authentication tag length and can
924 * be 8, 12 or 16 bytes long. */
925 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
926 unsigned int authsize
)
928 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
929 struct cryptd_aead
*cryptd_tfm
= *ctx
;
931 return crypto_aead_setauthsize(&cryptd_tfm
->base
, authsize
);
934 static int helper_rfc4106_encrypt(struct aead_request
*req
)
936 u8 one_entry_in_sg
= 0;
937 u8
*src
, *dst
, *assoc
;
938 __be32 counter
= cpu_to_be32(1);
939 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
940 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
941 void *aes_ctx
= &(ctx
->aes_key_expanded
);
942 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
943 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
944 struct scatter_walk src_sg_walk
;
945 struct scatter_walk dst_sg_walk
;
948 /* Assuming we are supporting rfc4106 64-bit extended */
949 /* sequence numbers We need to have the AAD length equal */
950 /* to 16 or 20 bytes */
951 if (unlikely(req
->assoclen
!= 16 && req
->assoclen
!= 20))
955 for (i
= 0; i
< 4; i
++)
956 *(iv
+i
) = ctx
->nonce
[i
];
957 for (i
= 0; i
< 8; i
++)
958 *(iv
+4+i
) = req
->iv
[i
];
959 *((__be32
*)(iv
+12)) = counter
;
961 if (sg_is_last(req
->src
) &&
962 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
&&
963 sg_is_last(req
->dst
) &&
964 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
) {
966 scatterwalk_start(&src_sg_walk
, req
->src
);
967 assoc
= scatterwalk_map(&src_sg_walk
);
968 src
= assoc
+ req
->assoclen
;
970 if (unlikely(req
->src
!= req
->dst
)) {
971 scatterwalk_start(&dst_sg_walk
, req
->dst
);
972 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
975 /* Allocate memory for src, dst, assoc */
976 assoc
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
978 if (unlikely(!assoc
))
980 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
981 req
->assoclen
+ req
->cryptlen
, 0);
982 src
= assoc
+ req
->assoclen
;
987 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, req
->cryptlen
, iv
,
988 ctx
->hash_subkey
, assoc
, req
->assoclen
- 8,
989 dst
+ req
->cryptlen
, auth_tag_len
);
992 /* The authTag (aka the Integrity Check Value) needs to be written
993 * back to the packet. */
994 if (one_entry_in_sg
) {
995 if (unlikely(req
->src
!= req
->dst
)) {
996 scatterwalk_unmap(dst
- req
->assoclen
);
997 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
998 scatterwalk_done(&dst_sg_walk
, 1, 0);
1000 scatterwalk_unmap(assoc
);
1001 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
1002 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
1004 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
1005 req
->cryptlen
+ auth_tag_len
, 1);
1011 static int helper_rfc4106_decrypt(struct aead_request
*req
)
1013 u8 one_entry_in_sg
= 0;
1014 u8
*src
, *dst
, *assoc
;
1015 unsigned long tempCipherLen
= 0;
1016 __be32 counter
= cpu_to_be32(1);
1018 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1019 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1020 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1021 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1022 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
1024 struct scatter_walk src_sg_walk
;
1025 struct scatter_walk dst_sg_walk
;
1028 if (unlikely(req
->assoclen
!= 16 && req
->assoclen
!= 20))
1031 /* Assuming we are supporting rfc4106 64-bit extended */
1032 /* sequence numbers We need to have the AAD length */
1033 /* equal to 16 or 20 bytes */
1035 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1036 /* IV below built */
1037 for (i
= 0; i
< 4; i
++)
1038 *(iv
+i
) = ctx
->nonce
[i
];
1039 for (i
= 0; i
< 8; i
++)
1040 *(iv
+4+i
) = req
->iv
[i
];
1041 *((__be32
*)(iv
+12)) = counter
;
1043 if (sg_is_last(req
->src
) &&
1044 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
&&
1045 sg_is_last(req
->dst
) &&
1046 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
) {
1047 one_entry_in_sg
= 1;
1048 scatterwalk_start(&src_sg_walk
, req
->src
);
1049 assoc
= scatterwalk_map(&src_sg_walk
);
1050 src
= assoc
+ req
->assoclen
;
1052 if (unlikely(req
->src
!= req
->dst
)) {
1053 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1054 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
1058 /* Allocate memory for src, dst, assoc */
1059 assoc
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1062 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
1063 req
->assoclen
+ req
->cryptlen
, 0);
1064 src
= assoc
+ req
->assoclen
;
1069 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1070 ctx
->hash_subkey
, assoc
, req
->assoclen
- 8,
1071 authTag
, auth_tag_len
);
1074 /* Compare generated tag with passed in tag. */
1075 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1078 if (one_entry_in_sg
) {
1079 if (unlikely(req
->src
!= req
->dst
)) {
1080 scatterwalk_unmap(dst
- req
->assoclen
);
1081 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
1082 scatterwalk_done(&dst_sg_walk
, 1, 0);
1084 scatterwalk_unmap(assoc
);
1085 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
1086 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
1088 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
1095 static int rfc4106_encrypt(struct aead_request
*req
)
1097 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1098 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
1099 struct cryptd_aead
*cryptd_tfm
= *ctx
;
1101 aead_request_set_tfm(req
, irq_fpu_usable() ?
1102 cryptd_aead_child(cryptd_tfm
) :
1105 return crypto_aead_encrypt(req
);
1108 static int rfc4106_decrypt(struct aead_request
*req
)
1110 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1111 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
1112 struct cryptd_aead
*cryptd_tfm
= *ctx
;
1114 aead_request_set_tfm(req
, irq_fpu_usable() ?
1115 cryptd_aead_child(cryptd_tfm
) :
1118 return crypto_aead_decrypt(req
);
1122 static struct crypto_alg aesni_algs
[] = { {
1124 .cra_driver_name
= "aes-aesni",
1125 .cra_priority
= 300,
1126 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1127 .cra_blocksize
= AES_BLOCK_SIZE
,
1128 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1131 .cra_module
= THIS_MODULE
,
1134 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1135 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1136 .cia_setkey
= aes_set_key
,
1137 .cia_encrypt
= aes_encrypt
,
1138 .cia_decrypt
= aes_decrypt
1142 .cra_name
= "__aes-aesni",
1143 .cra_driver_name
= "__driver-aes-aesni",
1145 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
| CRYPTO_ALG_INTERNAL
,
1146 .cra_blocksize
= AES_BLOCK_SIZE
,
1147 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1150 .cra_module
= THIS_MODULE
,
1153 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1154 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1155 .cia_setkey
= aes_set_key
,
1156 .cia_encrypt
= __aes_encrypt
,
1157 .cia_decrypt
= __aes_decrypt
1161 .cra_name
= "__ecb-aes-aesni",
1162 .cra_driver_name
= "__driver-ecb-aes-aesni",
1164 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1165 CRYPTO_ALG_INTERNAL
,
1166 .cra_blocksize
= AES_BLOCK_SIZE
,
1167 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1170 .cra_type
= &crypto_blkcipher_type
,
1171 .cra_module
= THIS_MODULE
,
1174 .min_keysize
= AES_MIN_KEY_SIZE
,
1175 .max_keysize
= AES_MAX_KEY_SIZE
,
1176 .setkey
= aes_set_key
,
1177 .encrypt
= ecb_encrypt
,
1178 .decrypt
= ecb_decrypt
,
1182 .cra_name
= "__cbc-aes-aesni",
1183 .cra_driver_name
= "__driver-cbc-aes-aesni",
1185 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1186 CRYPTO_ALG_INTERNAL
,
1187 .cra_blocksize
= AES_BLOCK_SIZE
,
1188 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1191 .cra_type
= &crypto_blkcipher_type
,
1192 .cra_module
= THIS_MODULE
,
1195 .min_keysize
= AES_MIN_KEY_SIZE
,
1196 .max_keysize
= AES_MAX_KEY_SIZE
,
1197 .setkey
= aes_set_key
,
1198 .encrypt
= cbc_encrypt
,
1199 .decrypt
= cbc_decrypt
,
1203 .cra_name
= "ecb(aes)",
1204 .cra_driver_name
= "ecb-aes-aesni",
1205 .cra_priority
= 400,
1206 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1207 .cra_blocksize
= AES_BLOCK_SIZE
,
1208 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1210 .cra_type
= &crypto_ablkcipher_type
,
1211 .cra_module
= THIS_MODULE
,
1212 .cra_init
= ablk_ecb_init
,
1213 .cra_exit
= ablk_exit
,
1216 .min_keysize
= AES_MIN_KEY_SIZE
,
1217 .max_keysize
= AES_MAX_KEY_SIZE
,
1218 .setkey
= ablk_set_key
,
1219 .encrypt
= ablk_encrypt
,
1220 .decrypt
= ablk_decrypt
,
1224 .cra_name
= "cbc(aes)",
1225 .cra_driver_name
= "cbc-aes-aesni",
1226 .cra_priority
= 400,
1227 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1228 .cra_blocksize
= AES_BLOCK_SIZE
,
1229 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1231 .cra_type
= &crypto_ablkcipher_type
,
1232 .cra_module
= THIS_MODULE
,
1233 .cra_init
= ablk_cbc_init
,
1234 .cra_exit
= ablk_exit
,
1237 .min_keysize
= AES_MIN_KEY_SIZE
,
1238 .max_keysize
= AES_MAX_KEY_SIZE
,
1239 .ivsize
= AES_BLOCK_SIZE
,
1240 .setkey
= ablk_set_key
,
1241 .encrypt
= ablk_encrypt
,
1242 .decrypt
= ablk_decrypt
,
1245 #ifdef CONFIG_X86_64
1247 .cra_name
= "__ctr-aes-aesni",
1248 .cra_driver_name
= "__driver-ctr-aes-aesni",
1250 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1251 CRYPTO_ALG_INTERNAL
,
1253 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1256 .cra_type
= &crypto_blkcipher_type
,
1257 .cra_module
= THIS_MODULE
,
1260 .min_keysize
= AES_MIN_KEY_SIZE
,
1261 .max_keysize
= AES_MAX_KEY_SIZE
,
1262 .ivsize
= AES_BLOCK_SIZE
,
1263 .setkey
= aes_set_key
,
1264 .encrypt
= ctr_crypt
,
1265 .decrypt
= ctr_crypt
,
1269 .cra_name
= "ctr(aes)",
1270 .cra_driver_name
= "ctr-aes-aesni",
1271 .cra_priority
= 400,
1272 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1274 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1276 .cra_type
= &crypto_ablkcipher_type
,
1277 .cra_module
= THIS_MODULE
,
1278 .cra_init
= ablk_ctr_init
,
1279 .cra_exit
= ablk_exit
,
1282 .min_keysize
= AES_MIN_KEY_SIZE
,
1283 .max_keysize
= AES_MAX_KEY_SIZE
,
1284 .ivsize
= AES_BLOCK_SIZE
,
1285 .setkey
= ablk_set_key
,
1286 .encrypt
= ablk_encrypt
,
1287 .decrypt
= ablk_encrypt
,
1292 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1294 .cra_name
= "pcbc(aes)",
1295 .cra_driver_name
= "pcbc-aes-aesni",
1296 .cra_priority
= 400,
1297 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1298 .cra_blocksize
= AES_BLOCK_SIZE
,
1299 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1301 .cra_type
= &crypto_ablkcipher_type
,
1302 .cra_module
= THIS_MODULE
,
1303 .cra_init
= ablk_pcbc_init
,
1304 .cra_exit
= ablk_exit
,
1307 .min_keysize
= AES_MIN_KEY_SIZE
,
1308 .max_keysize
= AES_MAX_KEY_SIZE
,
1309 .ivsize
= AES_BLOCK_SIZE
,
1310 .setkey
= ablk_set_key
,
1311 .encrypt
= ablk_encrypt
,
1312 .decrypt
= ablk_decrypt
,
1317 .cra_name
= "__lrw-aes-aesni",
1318 .cra_driver_name
= "__driver-lrw-aes-aesni",
1320 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1321 CRYPTO_ALG_INTERNAL
,
1322 .cra_blocksize
= AES_BLOCK_SIZE
,
1323 .cra_ctxsize
= sizeof(struct aesni_lrw_ctx
),
1325 .cra_type
= &crypto_blkcipher_type
,
1326 .cra_module
= THIS_MODULE
,
1327 .cra_exit
= lrw_aesni_exit_tfm
,
1330 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1331 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1332 .ivsize
= AES_BLOCK_SIZE
,
1333 .setkey
= lrw_aesni_setkey
,
1334 .encrypt
= lrw_encrypt
,
1335 .decrypt
= lrw_decrypt
,
1339 .cra_name
= "__xts-aes-aesni",
1340 .cra_driver_name
= "__driver-xts-aes-aesni",
1342 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1343 CRYPTO_ALG_INTERNAL
,
1344 .cra_blocksize
= AES_BLOCK_SIZE
,
1345 .cra_ctxsize
= sizeof(struct aesni_xts_ctx
),
1347 .cra_type
= &crypto_blkcipher_type
,
1348 .cra_module
= THIS_MODULE
,
1351 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1352 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1353 .ivsize
= AES_BLOCK_SIZE
,
1354 .setkey
= xts_aesni_setkey
,
1355 .encrypt
= xts_encrypt
,
1356 .decrypt
= xts_decrypt
,
1360 .cra_name
= "lrw(aes)",
1361 .cra_driver_name
= "lrw-aes-aesni",
1362 .cra_priority
= 400,
1363 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1364 .cra_blocksize
= AES_BLOCK_SIZE
,
1365 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1367 .cra_type
= &crypto_ablkcipher_type
,
1368 .cra_module
= THIS_MODULE
,
1369 .cra_init
= ablk_init
,
1370 .cra_exit
= ablk_exit
,
1373 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1374 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1375 .ivsize
= AES_BLOCK_SIZE
,
1376 .setkey
= ablk_set_key
,
1377 .encrypt
= ablk_encrypt
,
1378 .decrypt
= ablk_decrypt
,
1382 .cra_name
= "xts(aes)",
1383 .cra_driver_name
= "xts-aes-aesni",
1384 .cra_priority
= 400,
1385 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1386 .cra_blocksize
= AES_BLOCK_SIZE
,
1387 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1389 .cra_type
= &crypto_ablkcipher_type
,
1390 .cra_module
= THIS_MODULE
,
1391 .cra_init
= ablk_init
,
1392 .cra_exit
= ablk_exit
,
1395 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1396 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1397 .ivsize
= AES_BLOCK_SIZE
,
1398 .setkey
= ablk_set_key
,
1399 .encrypt
= ablk_encrypt
,
1400 .decrypt
= ablk_decrypt
,
1405 #ifdef CONFIG_X86_64
1406 static struct aead_alg aesni_aead_algs
[] = { {
1407 .setkey
= common_rfc4106_set_key
,
1408 .setauthsize
= common_rfc4106_set_authsize
,
1409 .encrypt
= helper_rfc4106_encrypt
,
1410 .decrypt
= helper_rfc4106_decrypt
,
1414 .cra_name
= "__gcm-aes-aesni",
1415 .cra_driver_name
= "__driver-gcm-aes-aesni",
1416 .cra_flags
= CRYPTO_ALG_INTERNAL
,
1418 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
),
1419 .cra_alignmask
= AESNI_ALIGN
- 1,
1420 .cra_module
= THIS_MODULE
,
1423 .init
= rfc4106_init
,
1424 .exit
= rfc4106_exit
,
1425 .setkey
= rfc4106_set_key
,
1426 .setauthsize
= rfc4106_set_authsize
,
1427 .encrypt
= rfc4106_encrypt
,
1428 .decrypt
= rfc4106_decrypt
,
1432 .cra_name
= "rfc4106(gcm(aes))",
1433 .cra_driver_name
= "rfc4106-gcm-aesni",
1434 .cra_priority
= 400,
1435 .cra_flags
= CRYPTO_ALG_ASYNC
,
1437 .cra_ctxsize
= sizeof(struct cryptd_aead
*),
1438 .cra_module
= THIS_MODULE
,
1442 static struct aead_alg aesni_aead_algs
[0];
1446 static const struct x86_cpu_id aesni_cpu_id
[] = {
1447 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1450 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1452 static int __init
aesni_init(void)
1456 if (!x86_match_cpu(aesni_cpu_id
))
1458 #ifdef CONFIG_X86_64
1459 #ifdef CONFIG_AS_AVX2
1460 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1461 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1462 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1463 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1466 #ifdef CONFIG_AS_AVX
1467 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1468 pr_info("AVX version of gcm_enc/dec engaged.\n");
1469 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1470 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1474 pr_info("SSE version of gcm_enc/dec engaged.\n");
1475 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1476 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1478 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1479 #ifdef CONFIG_AS_AVX
1481 /* optimize performance of ctr mode encryption transform */
1482 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1483 pr_info("AES CTR mode by8 optimization enabled\n");
1488 err
= crypto_fpu_init();
1492 err
= crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1496 err
= crypto_register_aeads(aesni_aead_algs
,
1497 ARRAY_SIZE(aesni_aead_algs
));
1499 goto unregister_algs
;
1504 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1510 static void __exit
aesni_exit(void)
1512 crypto_unregister_aeads(aesni_aead_algs
, ARRAY_SIZE(aesni_aead_algs
));
1513 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1518 late_initcall(aesni_init
);
1519 module_exit(aesni_exit
);
1521 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1522 MODULE_LICENSE("GPL");
1523 MODULE_ALIAS_CRYPTO("aes");