2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
47 #define AESNI_ALIGN 16
48 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
51 /* This data is stored at the end of the crypto_tfm struct.
52 * It's a type of per "session" data storage location.
53 * This needs to be 16 byte aligned.
55 struct aesni_rfc4106_gcm_ctx
{
56 u8 hash_subkey
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
57 struct crypto_aes_ctx aes_key_expanded
58 __attribute__ ((__aligned__(AESNI_ALIGN
)));
62 struct aesni_gcm_set_hash_subkey_result
{
64 struct completion completion
;
67 struct aesni_hash_subkey_req_data
{
69 struct aesni_gcm_set_hash_subkey_result result
;
70 struct scatterlist sg
;
73 struct aesni_lrw_ctx
{
74 struct lrw_table_ctx lrw_table
;
75 u8 raw_aes_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
78 struct aesni_xts_ctx
{
79 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
80 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
83 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
84 unsigned int key_len
);
85 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
87 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
90 const u8
*in
, unsigned int len
);
91 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 const u8
*in
, unsigned int len
);
93 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
94 const u8
*in
, unsigned int len
, u8
*iv
);
95 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
106 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
107 const u8
*in
, unsigned int len
, u8
*iv
);
108 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
109 const u8
*in
, unsigned int len
, u8
*iv
);
111 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
112 const u8
*in
, bool enc
, u8
*iv
);
114 /* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
130 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
131 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
132 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
133 u8
*auth_tag
, unsigned long auth_tag_len
);
135 /* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
151 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
152 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
153 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
154 u8
*auth_tag
, unsigned long auth_tag_len
);
158 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
159 void *keys
, u8
*out
, unsigned int num_bytes
);
160 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
161 void *keys
, u8
*out
, unsigned int num_bytes
);
162 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
163 void *keys
, u8
*out
, unsigned int num_bytes
);
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
169 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
171 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
172 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
173 const u8
*aad
, unsigned long aad_len
,
174 u8
*auth_tag
, unsigned long auth_tag_len
);
176 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
177 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
178 const u8
*aad
, unsigned long aad_len
,
179 u8
*auth_tag
, unsigned long auth_tag_len
);
181 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
182 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
183 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
184 u8
*auth_tag
, unsigned long auth_tag_len
)
186 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
187 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)){
188 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
189 aad_len
, auth_tag
, auth_tag_len
);
191 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
192 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
193 aad_len
, auth_tag
, auth_tag_len
);
197 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
198 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
199 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
200 u8
*auth_tag
, unsigned long auth_tag_len
)
202 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
203 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
204 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
205 aad_len
, auth_tag
, auth_tag_len
);
207 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
208 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
209 aad_len
, auth_tag
, auth_tag_len
);
214 #ifdef CONFIG_AS_AVX2
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
220 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
222 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
223 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
224 const u8
*aad
, unsigned long aad_len
,
225 u8
*auth_tag
, unsigned long auth_tag_len
);
227 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
228 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
229 const u8
*aad
, unsigned long aad_len
,
230 u8
*auth_tag
, unsigned long auth_tag_len
);
232 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
233 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
234 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
235 u8
*auth_tag
, unsigned long auth_tag_len
)
237 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
238 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
239 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
240 aad_len
, auth_tag
, auth_tag_len
);
241 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
242 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
243 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
244 aad_len
, auth_tag
, auth_tag_len
);
246 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
247 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
248 aad_len
, auth_tag
, auth_tag_len
);
252 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
253 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
254 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
255 u8
*auth_tag
, unsigned long auth_tag_len
)
257 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
258 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
259 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
260 aad
, aad_len
, auth_tag
, auth_tag_len
);
261 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
262 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
263 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
264 aad_len
, auth_tag
, auth_tag_len
);
266 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
267 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
268 aad_len
, auth_tag
, auth_tag_len
);
273 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
274 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
275 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
276 u8
*auth_tag
, unsigned long auth_tag_len
);
278 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
279 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
280 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
281 u8
*auth_tag
, unsigned long auth_tag_len
);
284 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
286 unsigned long align
= AESNI_ALIGN
;
288 if (align
<= crypto_tfm_ctx_alignment())
290 return PTR_ALIGN(crypto_aead_ctx(tfm
), align
);
294 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
296 unsigned long addr
= (unsigned long)raw_ctx
;
297 unsigned long align
= AESNI_ALIGN
;
299 if (align
<= crypto_tfm_ctx_alignment())
301 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
304 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
305 const u8
*in_key
, unsigned int key_len
)
307 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
308 u32
*flags
= &tfm
->crt_flags
;
311 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
312 key_len
!= AES_KEYSIZE_256
) {
313 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
317 if (!irq_fpu_usable())
318 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
321 err
= aesni_set_key(ctx
, in_key
, key_len
);
328 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
329 unsigned int key_len
)
331 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
334 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
336 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
338 if (!irq_fpu_usable())
339 crypto_aes_encrypt_x86(ctx
, dst
, src
);
342 aesni_enc(ctx
, dst
, src
);
347 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
349 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
351 if (!irq_fpu_usable())
352 crypto_aes_decrypt_x86(ctx
, dst
, src
);
355 aesni_dec(ctx
, dst
, src
);
360 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
362 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
364 aesni_enc(ctx
, dst
, src
);
367 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
369 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
371 aesni_dec(ctx
, dst
, src
);
374 static int ecb_encrypt(struct blkcipher_desc
*desc
,
375 struct scatterlist
*dst
, struct scatterlist
*src
,
378 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
379 struct blkcipher_walk walk
;
382 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
383 err
= blkcipher_walk_virt(desc
, &walk
);
384 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
387 while ((nbytes
= walk
.nbytes
)) {
388 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
389 nbytes
& AES_BLOCK_MASK
);
390 nbytes
&= AES_BLOCK_SIZE
- 1;
391 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
398 static int ecb_decrypt(struct blkcipher_desc
*desc
,
399 struct scatterlist
*dst
, struct scatterlist
*src
,
402 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
403 struct blkcipher_walk walk
;
406 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
407 err
= blkcipher_walk_virt(desc
, &walk
);
408 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
411 while ((nbytes
= walk
.nbytes
)) {
412 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
413 nbytes
& AES_BLOCK_MASK
);
414 nbytes
&= AES_BLOCK_SIZE
- 1;
415 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
422 static int cbc_encrypt(struct blkcipher_desc
*desc
,
423 struct scatterlist
*dst
, struct scatterlist
*src
,
426 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
427 struct blkcipher_walk walk
;
430 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
431 err
= blkcipher_walk_virt(desc
, &walk
);
432 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
435 while ((nbytes
= walk
.nbytes
)) {
436 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
437 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
438 nbytes
&= AES_BLOCK_SIZE
- 1;
439 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
446 static int cbc_decrypt(struct blkcipher_desc
*desc
,
447 struct scatterlist
*dst
, struct scatterlist
*src
,
450 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
451 struct blkcipher_walk walk
;
454 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
455 err
= blkcipher_walk_virt(desc
, &walk
);
456 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
459 while ((nbytes
= walk
.nbytes
)) {
460 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
461 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
462 nbytes
&= AES_BLOCK_SIZE
- 1;
463 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
471 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
472 struct blkcipher_walk
*walk
)
474 u8
*ctrblk
= walk
->iv
;
475 u8 keystream
[AES_BLOCK_SIZE
];
476 u8
*src
= walk
->src
.virt
.addr
;
477 u8
*dst
= walk
->dst
.virt
.addr
;
478 unsigned int nbytes
= walk
->nbytes
;
480 aesni_enc(ctx
, keystream
, ctrblk
);
481 crypto_xor(keystream
, src
, nbytes
);
482 memcpy(dst
, keystream
, nbytes
);
483 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
488 const u8
*in
, unsigned int len
, u8
*iv
)
491 * based on key length, override with the by8 version
492 * of ctr mode encryption/decryption for improved performance
493 * aes_set_key_common() ensures that key length is one of
496 if (ctx
->key_length
== AES_KEYSIZE_128
)
497 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
498 else if (ctx
->key_length
== AES_KEYSIZE_192
)
499 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
501 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
505 static int ctr_crypt(struct blkcipher_desc
*desc
,
506 struct scatterlist
*dst
, struct scatterlist
*src
,
509 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
510 struct blkcipher_walk walk
;
513 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
514 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
515 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
518 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
519 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
520 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
521 nbytes
&= AES_BLOCK_SIZE
- 1;
522 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
525 ctr_crypt_final(ctx
, &walk
);
526 err
= blkcipher_walk_done(desc
, &walk
, 0);
534 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
536 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
539 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
541 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
545 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
547 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
552 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
553 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
555 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
559 static void lrw_xts_encrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
561 aesni_ecb_enc(ctx
, blks
, blks
, nbytes
);
564 static void lrw_xts_decrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
566 aesni_ecb_dec(ctx
, blks
, blks
, nbytes
);
569 static int lrw_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
572 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
575 err
= aes_set_key_common(tfm
, ctx
->raw_aes_ctx
, key
,
576 keylen
- AES_BLOCK_SIZE
);
580 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
- AES_BLOCK_SIZE
);
583 static void lrw_aesni_exit_tfm(struct crypto_tfm
*tfm
)
585 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
587 lrw_free_table(&ctx
->lrw_table
);
590 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
591 struct scatterlist
*src
, unsigned int nbytes
)
593 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
595 struct lrw_crypt_req req
= {
597 .tbuflen
= sizeof(buf
),
599 .table_ctx
= &ctx
->lrw_table
,
600 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
601 .crypt_fn
= lrw_xts_encrypt_callback
,
605 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
608 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
614 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
615 struct scatterlist
*src
, unsigned int nbytes
)
617 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
619 struct lrw_crypt_req req
= {
621 .tbuflen
= sizeof(buf
),
623 .table_ctx
= &ctx
->lrw_table
,
624 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
625 .crypt_fn
= lrw_xts_decrypt_callback
,
629 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
632 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
638 static int xts_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
641 struct aesni_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
642 u32
*flags
= &tfm
->crt_flags
;
645 /* key consists of keys of equal size concatenated, therefore
646 * the length must be even
649 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
653 /* first half of xts-key is for crypt */
654 err
= aes_set_key_common(tfm
, ctx
->raw_crypt_ctx
, key
, keylen
/ 2);
658 /* second half of xts-key is for tweak */
659 return aes_set_key_common(tfm
, ctx
->raw_tweak_ctx
, key
+ keylen
/ 2,
664 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
666 aesni_enc(ctx
, out
, in
);
671 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
673 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
676 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
678 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
681 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
683 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
686 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
688 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
691 static const struct common_glue_ctx aesni_enc_xts
= {
693 .fpu_blocks_limit
= 1,
697 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
700 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
704 static const struct common_glue_ctx aesni_dec_xts
= {
706 .fpu_blocks_limit
= 1,
710 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
713 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
717 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
718 struct scatterlist
*src
, unsigned int nbytes
)
720 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
722 return glue_xts_crypt_128bit(&aesni_enc_xts
, desc
, dst
, src
, nbytes
,
723 XTS_TWEAK_CAST(aesni_xts_tweak
),
724 aes_ctx(ctx
->raw_tweak_ctx
),
725 aes_ctx(ctx
->raw_crypt_ctx
));
728 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
729 struct scatterlist
*src
, unsigned int nbytes
)
731 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
733 return glue_xts_crypt_128bit(&aesni_dec_xts
, desc
, dst
, src
, nbytes
,
734 XTS_TWEAK_CAST(aesni_xts_tweak
),
735 aes_ctx(ctx
->raw_tweak_ctx
),
736 aes_ctx(ctx
->raw_crypt_ctx
));
741 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
742 struct scatterlist
*src
, unsigned int nbytes
)
744 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
746 struct xts_crypt_req req
= {
748 .tbuflen
= sizeof(buf
),
750 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
751 .tweak_fn
= aesni_xts_tweak
,
752 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
753 .crypt_fn
= lrw_xts_encrypt_callback
,
757 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
760 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
766 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
767 struct scatterlist
*src
, unsigned int nbytes
)
769 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
771 struct xts_crypt_req req
= {
773 .tbuflen
= sizeof(buf
),
775 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
776 .tweak_fn
= aesni_xts_tweak
,
777 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
778 .crypt_fn
= lrw_xts_decrypt_callback
,
782 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
785 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
794 static int rfc4106_init(struct crypto_aead
*aead
)
796 struct cryptd_aead
*cryptd_tfm
;
797 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
799 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni",
801 CRYPTO_ALG_INTERNAL
);
802 if (IS_ERR(cryptd_tfm
))
803 return PTR_ERR(cryptd_tfm
);
806 crypto_aead_set_reqsize(
808 sizeof(struct aead_request
) +
809 crypto_aead_reqsize(&cryptd_tfm
->base
));
813 static void rfc4106_exit(struct crypto_aead
*aead
)
815 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
817 cryptd_free_aead(*ctx
);
821 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
823 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
825 if (err
== -EINPROGRESS
)
828 complete(&result
->completion
);
832 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
834 struct crypto_ablkcipher
*ctr_tfm
;
835 struct ablkcipher_request
*req
;
837 struct aesni_hash_subkey_req_data
*req_data
;
839 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
841 return PTR_ERR(ctr_tfm
);
843 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
845 goto out_free_ablkcipher
;
848 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
850 goto out_free_ablkcipher
;
852 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
854 goto out_free_request
;
856 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
858 /* Clear the data in the hash sub key container to zero.*/
859 /* We want to cipher all zeros to create the hash sub key. */
860 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
862 init_completion(&req_data
->result
.completion
);
863 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
864 ablkcipher_request_set_tfm(req
, ctr_tfm
);
865 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
866 CRYPTO_TFM_REQ_MAY_BACKLOG
,
867 rfc4106_set_hash_subkey_done
,
870 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
871 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
873 ret
= crypto_ablkcipher_encrypt(req
);
874 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
875 ret
= wait_for_completion_interruptible
876 (&req_data
->result
.completion
);
878 ret
= req_data
->result
.err
;
882 ablkcipher_request_free(req
);
884 crypto_free_ablkcipher(ctr_tfm
);
888 static int common_rfc4106_set_key(struct crypto_aead
*aead
, const u8
*key
,
889 unsigned int key_len
)
891 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(aead
);
894 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
897 /*Account for 4 byte nonce at the end.*/
900 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
902 return aes_set_key_common(crypto_aead_tfm(aead
),
903 &ctx
->aes_key_expanded
, key
, key_len
) ?:
904 rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
907 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
908 unsigned int key_len
)
910 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
911 struct cryptd_aead
*cryptd_tfm
= *ctx
;
913 return crypto_aead_setkey(&cryptd_tfm
->base
, key
, key_len
);
916 static int common_rfc4106_set_authsize(struct crypto_aead
*aead
,
917 unsigned int authsize
)
931 /* This is the Integrity Check Value (aka the authentication tag length and can
932 * be 8, 12 or 16 bytes long. */
933 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
934 unsigned int authsize
)
936 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
937 struct cryptd_aead
*cryptd_tfm
= *ctx
;
939 return crypto_aead_setauthsize(&cryptd_tfm
->base
, authsize
);
942 static int helper_rfc4106_encrypt(struct aead_request
*req
)
944 u8 one_entry_in_sg
= 0;
945 u8
*src
, *dst
, *assoc
;
946 __be32 counter
= cpu_to_be32(1);
947 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
948 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
949 void *aes_ctx
= &(ctx
->aes_key_expanded
);
950 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
951 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
952 struct scatter_walk src_sg_walk
;
953 struct scatter_walk dst_sg_walk
;
956 /* Assuming we are supporting rfc4106 64-bit extended */
957 /* sequence numbers We need to have the AAD length equal */
958 /* to 8 or 12 bytes */
959 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
963 for (i
= 0; i
< 4; i
++)
964 *(iv
+i
) = ctx
->nonce
[i
];
965 for (i
= 0; i
< 8; i
++)
966 *(iv
+4+i
) = req
->iv
[i
];
967 *((__be32
*)(iv
+12)) = counter
;
969 if (sg_is_last(req
->src
) &&
970 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
&&
971 sg_is_last(req
->dst
) &&
972 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
) {
974 scatterwalk_start(&src_sg_walk
, req
->src
);
975 assoc
= scatterwalk_map(&src_sg_walk
);
976 src
= assoc
+ req
->assoclen
;
978 if (unlikely(req
->src
!= req
->dst
)) {
979 scatterwalk_start(&dst_sg_walk
, req
->dst
);
980 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
983 /* Allocate memory for src, dst, assoc */
984 assoc
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
986 if (unlikely(!assoc
))
988 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
989 req
->assoclen
+ req
->cryptlen
, 0);
990 src
= assoc
+ req
->assoclen
;
995 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
996 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
997 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
1000 /* The authTag (aka the Integrity Check Value) needs to be written
1001 * back to the packet. */
1002 if (one_entry_in_sg
) {
1003 if (unlikely(req
->src
!= req
->dst
)) {
1004 scatterwalk_unmap(dst
- req
->assoclen
);
1005 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
1006 scatterwalk_done(&dst_sg_walk
, 1, 0);
1008 scatterwalk_unmap(assoc
);
1009 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
1010 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
1012 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
1013 req
->cryptlen
+ auth_tag_len
, 1);
1019 static int helper_rfc4106_decrypt(struct aead_request
*req
)
1021 u8 one_entry_in_sg
= 0;
1022 u8
*src
, *dst
, *assoc
;
1023 unsigned long tempCipherLen
= 0;
1024 __be32 counter
= cpu_to_be32(1);
1026 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1027 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1028 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1029 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1030 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
1032 struct scatter_walk src_sg_walk
;
1033 struct scatter_walk dst_sg_walk
;
1036 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
1039 /* Assuming we are supporting rfc4106 64-bit extended */
1040 /* sequence numbers We need to have the AAD length */
1041 /* equal to 8 or 12 bytes */
1043 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1044 /* IV below built */
1045 for (i
= 0; i
< 4; i
++)
1046 *(iv
+i
) = ctx
->nonce
[i
];
1047 for (i
= 0; i
< 8; i
++)
1048 *(iv
+4+i
) = req
->iv
[i
];
1049 *((__be32
*)(iv
+12)) = counter
;
1051 if (sg_is_last(req
->src
) &&
1052 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
&&
1053 sg_is_last(req
->dst
) &&
1054 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
) {
1055 one_entry_in_sg
= 1;
1056 scatterwalk_start(&src_sg_walk
, req
->src
);
1057 assoc
= scatterwalk_map(&src_sg_walk
);
1058 src
= assoc
+ req
->assoclen
;
1060 if (unlikely(req
->src
!= req
->dst
)) {
1061 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1062 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
1066 /* Allocate memory for src, dst, assoc */
1067 assoc
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1070 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
1071 req
->assoclen
+ req
->cryptlen
, 0);
1072 src
= assoc
+ req
->assoclen
;
1077 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1078 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
1079 authTag
, auth_tag_len
);
1082 /* Compare generated tag with passed in tag. */
1083 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1086 if (one_entry_in_sg
) {
1087 if (unlikely(req
->src
!= req
->dst
)) {
1088 scatterwalk_unmap(dst
- req
->assoclen
);
1089 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
1090 scatterwalk_done(&dst_sg_walk
, 1, 0);
1092 scatterwalk_unmap(assoc
);
1093 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
1094 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
1096 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
1103 static int rfc4106_encrypt(struct aead_request
*req
)
1105 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1106 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
1107 struct cryptd_aead
*cryptd_tfm
= *ctx
;
1108 struct aead_request
*subreq
= aead_request_ctx(req
);
1110 aead_request_set_tfm(subreq
, irq_fpu_usable() ?
1111 cryptd_aead_child(cryptd_tfm
) :
1114 aead_request_set_callback(subreq
, req
->base
.flags
,
1115 req
->base
.complete
, req
->base
.data
);
1116 aead_request_set_crypt(subreq
, req
->src
, req
->dst
,
1117 req
->cryptlen
, req
->iv
);
1118 aead_request_set_ad(subreq
, req
->assoclen
);
1120 return crypto_aead_encrypt(subreq
);
1123 static int rfc4106_decrypt(struct aead_request
*req
)
1125 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1126 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
1127 struct cryptd_aead
*cryptd_tfm
= *ctx
;
1128 struct aead_request
*subreq
= aead_request_ctx(req
);
1130 aead_request_set_tfm(subreq
, irq_fpu_usable() ?
1131 cryptd_aead_child(cryptd_tfm
) :
1134 aead_request_set_callback(subreq
, req
->base
.flags
,
1135 req
->base
.complete
, req
->base
.data
);
1136 aead_request_set_crypt(subreq
, req
->src
, req
->dst
,
1137 req
->cryptlen
, req
->iv
);
1138 aead_request_set_ad(subreq
, req
->assoclen
);
1140 return crypto_aead_decrypt(subreq
);
1144 static struct crypto_alg aesni_algs
[] = { {
1146 .cra_driver_name
= "aes-aesni",
1147 .cra_priority
= 300,
1148 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1149 .cra_blocksize
= AES_BLOCK_SIZE
,
1150 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1153 .cra_module
= THIS_MODULE
,
1156 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1157 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1158 .cia_setkey
= aes_set_key
,
1159 .cia_encrypt
= aes_encrypt
,
1160 .cia_decrypt
= aes_decrypt
1164 .cra_name
= "__aes-aesni",
1165 .cra_driver_name
= "__driver-aes-aesni",
1167 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
| CRYPTO_ALG_INTERNAL
,
1168 .cra_blocksize
= AES_BLOCK_SIZE
,
1169 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1172 .cra_module
= THIS_MODULE
,
1175 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1176 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1177 .cia_setkey
= aes_set_key
,
1178 .cia_encrypt
= __aes_encrypt
,
1179 .cia_decrypt
= __aes_decrypt
1183 .cra_name
= "__ecb-aes-aesni",
1184 .cra_driver_name
= "__driver-ecb-aes-aesni",
1186 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1187 CRYPTO_ALG_INTERNAL
,
1188 .cra_blocksize
= AES_BLOCK_SIZE
,
1189 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1192 .cra_type
= &crypto_blkcipher_type
,
1193 .cra_module
= THIS_MODULE
,
1196 .min_keysize
= AES_MIN_KEY_SIZE
,
1197 .max_keysize
= AES_MAX_KEY_SIZE
,
1198 .setkey
= aes_set_key
,
1199 .encrypt
= ecb_encrypt
,
1200 .decrypt
= ecb_decrypt
,
1204 .cra_name
= "__cbc-aes-aesni",
1205 .cra_driver_name
= "__driver-cbc-aes-aesni",
1207 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1208 CRYPTO_ALG_INTERNAL
,
1209 .cra_blocksize
= AES_BLOCK_SIZE
,
1210 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1213 .cra_type
= &crypto_blkcipher_type
,
1214 .cra_module
= THIS_MODULE
,
1217 .min_keysize
= AES_MIN_KEY_SIZE
,
1218 .max_keysize
= AES_MAX_KEY_SIZE
,
1219 .setkey
= aes_set_key
,
1220 .encrypt
= cbc_encrypt
,
1221 .decrypt
= cbc_decrypt
,
1225 .cra_name
= "ecb(aes)",
1226 .cra_driver_name
= "ecb-aes-aesni",
1227 .cra_priority
= 400,
1228 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1229 .cra_blocksize
= AES_BLOCK_SIZE
,
1230 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1232 .cra_type
= &crypto_ablkcipher_type
,
1233 .cra_module
= THIS_MODULE
,
1234 .cra_init
= ablk_ecb_init
,
1235 .cra_exit
= ablk_exit
,
1238 .min_keysize
= AES_MIN_KEY_SIZE
,
1239 .max_keysize
= AES_MAX_KEY_SIZE
,
1240 .setkey
= ablk_set_key
,
1241 .encrypt
= ablk_encrypt
,
1242 .decrypt
= ablk_decrypt
,
1246 .cra_name
= "cbc(aes)",
1247 .cra_driver_name
= "cbc-aes-aesni",
1248 .cra_priority
= 400,
1249 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1250 .cra_blocksize
= AES_BLOCK_SIZE
,
1251 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1253 .cra_type
= &crypto_ablkcipher_type
,
1254 .cra_module
= THIS_MODULE
,
1255 .cra_init
= ablk_cbc_init
,
1256 .cra_exit
= ablk_exit
,
1259 .min_keysize
= AES_MIN_KEY_SIZE
,
1260 .max_keysize
= AES_MAX_KEY_SIZE
,
1261 .ivsize
= AES_BLOCK_SIZE
,
1262 .setkey
= ablk_set_key
,
1263 .encrypt
= ablk_encrypt
,
1264 .decrypt
= ablk_decrypt
,
1267 #ifdef CONFIG_X86_64
1269 .cra_name
= "__ctr-aes-aesni",
1270 .cra_driver_name
= "__driver-ctr-aes-aesni",
1272 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1273 CRYPTO_ALG_INTERNAL
,
1275 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1278 .cra_type
= &crypto_blkcipher_type
,
1279 .cra_module
= THIS_MODULE
,
1282 .min_keysize
= AES_MIN_KEY_SIZE
,
1283 .max_keysize
= AES_MAX_KEY_SIZE
,
1284 .ivsize
= AES_BLOCK_SIZE
,
1285 .setkey
= aes_set_key
,
1286 .encrypt
= ctr_crypt
,
1287 .decrypt
= ctr_crypt
,
1291 .cra_name
= "ctr(aes)",
1292 .cra_driver_name
= "ctr-aes-aesni",
1293 .cra_priority
= 400,
1294 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1296 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1298 .cra_type
= &crypto_ablkcipher_type
,
1299 .cra_module
= THIS_MODULE
,
1300 .cra_init
= ablk_ctr_init
,
1301 .cra_exit
= ablk_exit
,
1304 .min_keysize
= AES_MIN_KEY_SIZE
,
1305 .max_keysize
= AES_MAX_KEY_SIZE
,
1306 .ivsize
= AES_BLOCK_SIZE
,
1307 .setkey
= ablk_set_key
,
1308 .encrypt
= ablk_encrypt
,
1309 .decrypt
= ablk_encrypt
,
1314 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1316 .cra_name
= "pcbc(aes)",
1317 .cra_driver_name
= "pcbc-aes-aesni",
1318 .cra_priority
= 400,
1319 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1320 .cra_blocksize
= AES_BLOCK_SIZE
,
1321 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1323 .cra_type
= &crypto_ablkcipher_type
,
1324 .cra_module
= THIS_MODULE
,
1325 .cra_init
= ablk_pcbc_init
,
1326 .cra_exit
= ablk_exit
,
1329 .min_keysize
= AES_MIN_KEY_SIZE
,
1330 .max_keysize
= AES_MAX_KEY_SIZE
,
1331 .ivsize
= AES_BLOCK_SIZE
,
1332 .setkey
= ablk_set_key
,
1333 .encrypt
= ablk_encrypt
,
1334 .decrypt
= ablk_decrypt
,
1339 .cra_name
= "__lrw-aes-aesni",
1340 .cra_driver_name
= "__driver-lrw-aes-aesni",
1342 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1343 CRYPTO_ALG_INTERNAL
,
1344 .cra_blocksize
= AES_BLOCK_SIZE
,
1345 .cra_ctxsize
= sizeof(struct aesni_lrw_ctx
),
1347 .cra_type
= &crypto_blkcipher_type
,
1348 .cra_module
= THIS_MODULE
,
1349 .cra_exit
= lrw_aesni_exit_tfm
,
1352 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1353 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1354 .ivsize
= AES_BLOCK_SIZE
,
1355 .setkey
= lrw_aesni_setkey
,
1356 .encrypt
= lrw_encrypt
,
1357 .decrypt
= lrw_decrypt
,
1361 .cra_name
= "__xts-aes-aesni",
1362 .cra_driver_name
= "__driver-xts-aes-aesni",
1364 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1365 CRYPTO_ALG_INTERNAL
,
1366 .cra_blocksize
= AES_BLOCK_SIZE
,
1367 .cra_ctxsize
= sizeof(struct aesni_xts_ctx
),
1369 .cra_type
= &crypto_blkcipher_type
,
1370 .cra_module
= THIS_MODULE
,
1373 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1374 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1375 .ivsize
= AES_BLOCK_SIZE
,
1376 .setkey
= xts_aesni_setkey
,
1377 .encrypt
= xts_encrypt
,
1378 .decrypt
= xts_decrypt
,
1382 .cra_name
= "lrw(aes)",
1383 .cra_driver_name
= "lrw-aes-aesni",
1384 .cra_priority
= 400,
1385 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1386 .cra_blocksize
= AES_BLOCK_SIZE
,
1387 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1389 .cra_type
= &crypto_ablkcipher_type
,
1390 .cra_module
= THIS_MODULE
,
1391 .cra_init
= ablk_init
,
1392 .cra_exit
= ablk_exit
,
1395 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1396 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1397 .ivsize
= AES_BLOCK_SIZE
,
1398 .setkey
= ablk_set_key
,
1399 .encrypt
= ablk_encrypt
,
1400 .decrypt
= ablk_decrypt
,
1404 .cra_name
= "xts(aes)",
1405 .cra_driver_name
= "xts-aes-aesni",
1406 .cra_priority
= 400,
1407 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1408 .cra_blocksize
= AES_BLOCK_SIZE
,
1409 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1411 .cra_type
= &crypto_ablkcipher_type
,
1412 .cra_module
= THIS_MODULE
,
1413 .cra_init
= ablk_init
,
1414 .cra_exit
= ablk_exit
,
1417 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1418 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1419 .ivsize
= AES_BLOCK_SIZE
,
1420 .setkey
= ablk_set_key
,
1421 .encrypt
= ablk_encrypt
,
1422 .decrypt
= ablk_decrypt
,
1427 #ifdef CONFIG_X86_64
1428 static struct aead_alg aesni_aead_algs
[] = { {
1429 .setkey
= common_rfc4106_set_key
,
1430 .setauthsize
= common_rfc4106_set_authsize
,
1431 .encrypt
= helper_rfc4106_encrypt
,
1432 .decrypt
= helper_rfc4106_decrypt
,
1436 .cra_name
= "__gcm-aes-aesni",
1437 .cra_driver_name
= "__driver-gcm-aes-aesni",
1438 .cra_flags
= CRYPTO_ALG_INTERNAL
,
1440 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
),
1441 .cra_alignmask
= AESNI_ALIGN
- 1,
1442 .cra_module
= THIS_MODULE
,
1445 .init
= rfc4106_init
,
1446 .exit
= rfc4106_exit
,
1447 .setkey
= rfc4106_set_key
,
1448 .setauthsize
= rfc4106_set_authsize
,
1449 .encrypt
= rfc4106_encrypt
,
1450 .decrypt
= rfc4106_decrypt
,
1454 .cra_name
= "rfc4106(gcm(aes))",
1455 .cra_driver_name
= "rfc4106-gcm-aesni",
1456 .cra_priority
= 400,
1457 .cra_flags
= CRYPTO_ALG_ASYNC
,
1459 .cra_ctxsize
= sizeof(struct cryptd_aead
*),
1460 .cra_module
= THIS_MODULE
,
1464 static struct aead_alg aesni_aead_algs
[0];
1468 static const struct x86_cpu_id aesni_cpu_id
[] = {
1469 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1472 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1474 static int __init
aesni_init(void)
1478 if (!x86_match_cpu(aesni_cpu_id
))
1480 #ifdef CONFIG_X86_64
1481 #ifdef CONFIG_AS_AVX2
1482 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1483 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1484 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1485 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1488 #ifdef CONFIG_AS_AVX
1489 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1490 pr_info("AVX version of gcm_enc/dec engaged.\n");
1491 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1492 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1496 pr_info("SSE version of gcm_enc/dec engaged.\n");
1497 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1498 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1500 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1501 #ifdef CONFIG_AS_AVX
1503 /* optimize performance of ctr mode encryption transform */
1504 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1505 pr_info("AES CTR mode by8 optimization enabled\n");
1510 err
= crypto_fpu_init();
1514 err
= crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1518 err
= crypto_register_aeads(aesni_aead_algs
,
1519 ARRAY_SIZE(aesni_aead_algs
));
1521 goto unregister_algs
;
1526 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1532 static void __exit
aesni_exit(void)
1534 crypto_unregister_aeads(aesni_aead_algs
, ARRAY_SIZE(aesni_aead_algs
));
1535 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1540 late_initcall(aesni_init
);
1541 module_exit(aesni_exit
);
1543 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1544 MODULE_LICENSE("GPL");
1545 MODULE_ALIAS_CRYPTO("aes");