2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
47 #define AESNI_ALIGN 16
48 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
51 /* This data is stored at the end of the crypto_tfm struct.
52 * It's a type of per "session" data storage location.
53 * This needs to be 16 byte aligned.
55 struct aesni_rfc4106_gcm_ctx
{
56 u8 hash_subkey
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
57 struct crypto_aes_ctx aes_key_expanded
58 __attribute__ ((__aligned__(AESNI_ALIGN
)));
62 struct aesni_lrw_ctx
{
63 struct lrw_table_ctx lrw_table
;
64 u8 raw_aes_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
67 struct aesni_xts_ctx
{
68 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
69 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
72 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
73 unsigned int key_len
);
74 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
76 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
78 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
79 const u8
*in
, unsigned int len
);
80 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
81 const u8
*in
, unsigned int len
);
82 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
83 const u8
*in
, unsigned int len
, u8
*iv
);
84 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
85 const u8
*in
, unsigned int len
, u8
*iv
);
87 int crypto_fpu_init(void);
88 void crypto_fpu_exit(void);
90 #define AVX_GEN2_OPTSIZE 640
91 #define AVX_GEN4_OPTSIZE 4096
95 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
97 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
98 const u8
*in
, unsigned int len
, u8
*iv
);
100 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
101 const u8
*in
, bool enc
, u8
*iv
);
103 /* asmlinkage void aesni_gcm_enc()
104 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
105 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
106 * const u8 *in, Plaintext input
107 * unsigned long plaintext_len, Length of data in bytes for encryption.
108 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
109 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
110 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
111 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
112 * const u8 *aad, Additional Authentication Data (AAD)
113 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
114 * is going to be 8 or 12 bytes
115 * u8 *auth_tag, Authenticated Tag output.
116 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
117 * Valid values are 16 (most likely), 12 or 8.
119 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
120 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
121 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
122 u8
*auth_tag
, unsigned long auth_tag_len
);
124 /* asmlinkage void aesni_gcm_dec()
125 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
126 * u8 *out, Plaintext output. Decrypt in-place is allowed.
127 * const u8 *in, Ciphertext input
128 * unsigned long ciphertext_len, Length of data in bytes for decryption.
129 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
130 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
131 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
132 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
133 * const u8 *aad, Additional Authentication Data (AAD)
134 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
135 * to be 8 or 12 bytes
136 * u8 *auth_tag, Authenticated Tag output.
137 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
138 * Valid values are 16 (most likely), 12 or 8.
140 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
141 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
142 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
143 u8
*auth_tag
, unsigned long auth_tag_len
);
147 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
148 void *keys
, u8
*out
, unsigned int num_bytes
);
149 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
150 void *keys
, u8
*out
, unsigned int num_bytes
);
151 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
152 void *keys
, u8
*out
, unsigned int num_bytes
);
154 * asmlinkage void aesni_gcm_precomp_avx_gen2()
155 * gcm_data *my_ctx_data, context data
156 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
158 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
160 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
161 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
162 const u8
*aad
, unsigned long aad_len
,
163 u8
*auth_tag
, unsigned long auth_tag_len
);
165 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
166 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
167 const u8
*aad
, unsigned long aad_len
,
168 u8
*auth_tag
, unsigned long auth_tag_len
);
170 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
171 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
172 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
173 u8
*auth_tag
, unsigned long auth_tag_len
)
175 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
176 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)){
177 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
178 aad_len
, auth_tag
, auth_tag_len
);
180 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
181 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
182 aad_len
, auth_tag
, auth_tag_len
);
186 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
187 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
188 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
189 u8
*auth_tag
, unsigned long auth_tag_len
)
191 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
192 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
193 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
194 aad_len
, auth_tag
, auth_tag_len
);
196 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
197 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
198 aad_len
, auth_tag
, auth_tag_len
);
203 #ifdef CONFIG_AS_AVX2
205 * asmlinkage void aesni_gcm_precomp_avx_gen4()
206 * gcm_data *my_ctx_data, context data
207 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
209 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
211 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
212 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
213 const u8
*aad
, unsigned long aad_len
,
214 u8
*auth_tag
, unsigned long auth_tag_len
);
216 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
217 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
218 const u8
*aad
, unsigned long aad_len
,
219 u8
*auth_tag
, unsigned long auth_tag_len
);
221 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
222 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
223 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
224 u8
*auth_tag
, unsigned long auth_tag_len
)
226 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
227 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
228 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
229 aad_len
, auth_tag
, auth_tag_len
);
230 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
231 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
232 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
233 aad_len
, auth_tag
, auth_tag_len
);
235 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
236 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
237 aad_len
, auth_tag
, auth_tag_len
);
241 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
242 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
243 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
244 u8
*auth_tag
, unsigned long auth_tag_len
)
246 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
247 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
248 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
249 aad
, aad_len
, auth_tag
, auth_tag_len
);
250 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
251 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
252 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
253 aad_len
, auth_tag
, auth_tag_len
);
255 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
256 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
257 aad_len
, auth_tag
, auth_tag_len
);
262 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
263 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
264 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
265 u8
*auth_tag
, unsigned long auth_tag_len
);
267 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
268 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
269 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
270 u8
*auth_tag
, unsigned long auth_tag_len
);
273 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
275 unsigned long align
= AESNI_ALIGN
;
277 if (align
<= crypto_tfm_ctx_alignment())
279 return PTR_ALIGN(crypto_aead_ctx(tfm
), align
);
283 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
285 unsigned long addr
= (unsigned long)raw_ctx
;
286 unsigned long align
= AESNI_ALIGN
;
288 if (align
<= crypto_tfm_ctx_alignment())
290 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
293 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
294 const u8
*in_key
, unsigned int key_len
)
296 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
297 u32
*flags
= &tfm
->crt_flags
;
300 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
301 key_len
!= AES_KEYSIZE_256
) {
302 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
306 if (!irq_fpu_usable())
307 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
310 err
= aesni_set_key(ctx
, in_key
, key_len
);
317 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
318 unsigned int key_len
)
320 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
323 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
325 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
327 if (!irq_fpu_usable())
328 crypto_aes_encrypt_x86(ctx
, dst
, src
);
331 aesni_enc(ctx
, dst
, src
);
336 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
338 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
340 if (!irq_fpu_usable())
341 crypto_aes_decrypt_x86(ctx
, dst
, src
);
344 aesni_dec(ctx
, dst
, src
);
349 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
351 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
353 aesni_enc(ctx
, dst
, src
);
356 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
358 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
360 aesni_dec(ctx
, dst
, src
);
363 static int ecb_encrypt(struct blkcipher_desc
*desc
,
364 struct scatterlist
*dst
, struct scatterlist
*src
,
367 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
368 struct blkcipher_walk walk
;
371 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
372 err
= blkcipher_walk_virt(desc
, &walk
);
373 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
376 while ((nbytes
= walk
.nbytes
)) {
377 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
378 nbytes
& AES_BLOCK_MASK
);
379 nbytes
&= AES_BLOCK_SIZE
- 1;
380 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
387 static int ecb_decrypt(struct blkcipher_desc
*desc
,
388 struct scatterlist
*dst
, struct scatterlist
*src
,
391 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
392 struct blkcipher_walk walk
;
395 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
396 err
= blkcipher_walk_virt(desc
, &walk
);
397 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
400 while ((nbytes
= walk
.nbytes
)) {
401 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
402 nbytes
& AES_BLOCK_MASK
);
403 nbytes
&= AES_BLOCK_SIZE
- 1;
404 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
411 static int cbc_encrypt(struct blkcipher_desc
*desc
,
412 struct scatterlist
*dst
, struct scatterlist
*src
,
415 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
416 struct blkcipher_walk walk
;
419 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
420 err
= blkcipher_walk_virt(desc
, &walk
);
421 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
424 while ((nbytes
= walk
.nbytes
)) {
425 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
426 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
427 nbytes
&= AES_BLOCK_SIZE
- 1;
428 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
435 static int cbc_decrypt(struct blkcipher_desc
*desc
,
436 struct scatterlist
*dst
, struct scatterlist
*src
,
439 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
440 struct blkcipher_walk walk
;
443 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
444 err
= blkcipher_walk_virt(desc
, &walk
);
445 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
448 while ((nbytes
= walk
.nbytes
)) {
449 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
450 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
451 nbytes
&= AES_BLOCK_SIZE
- 1;
452 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
460 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
461 struct blkcipher_walk
*walk
)
463 u8
*ctrblk
= walk
->iv
;
464 u8 keystream
[AES_BLOCK_SIZE
];
465 u8
*src
= walk
->src
.virt
.addr
;
466 u8
*dst
= walk
->dst
.virt
.addr
;
467 unsigned int nbytes
= walk
->nbytes
;
469 aesni_enc(ctx
, keystream
, ctrblk
);
470 crypto_xor(keystream
, src
, nbytes
);
471 memcpy(dst
, keystream
, nbytes
);
472 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
476 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
477 const u8
*in
, unsigned int len
, u8
*iv
)
480 * based on key length, override with the by8 version
481 * of ctr mode encryption/decryption for improved performance
482 * aes_set_key_common() ensures that key length is one of
485 if (ctx
->key_length
== AES_KEYSIZE_128
)
486 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
487 else if (ctx
->key_length
== AES_KEYSIZE_192
)
488 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
490 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
494 static int ctr_crypt(struct blkcipher_desc
*desc
,
495 struct scatterlist
*dst
, struct scatterlist
*src
,
498 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
499 struct blkcipher_walk walk
;
502 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
503 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
504 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
507 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
508 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
509 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
510 nbytes
&= AES_BLOCK_SIZE
- 1;
511 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
514 ctr_crypt_final(ctx
, &walk
);
515 err
= blkcipher_walk_done(desc
, &walk
, 0);
523 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
525 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
528 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
530 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
534 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
536 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
541 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
542 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
544 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
548 static void lrw_xts_encrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
550 aesni_ecb_enc(ctx
, blks
, blks
, nbytes
);
553 static void lrw_xts_decrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
555 aesni_ecb_dec(ctx
, blks
, blks
, nbytes
);
558 static int lrw_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
561 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
564 err
= aes_set_key_common(tfm
, ctx
->raw_aes_ctx
, key
,
565 keylen
- AES_BLOCK_SIZE
);
569 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
- AES_BLOCK_SIZE
);
572 static void lrw_aesni_exit_tfm(struct crypto_tfm
*tfm
)
574 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
576 lrw_free_table(&ctx
->lrw_table
);
579 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
580 struct scatterlist
*src
, unsigned int nbytes
)
582 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
584 struct lrw_crypt_req req
= {
586 .tbuflen
= sizeof(buf
),
588 .table_ctx
= &ctx
->lrw_table
,
589 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
590 .crypt_fn
= lrw_xts_encrypt_callback
,
594 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
597 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
603 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
604 struct scatterlist
*src
, unsigned int nbytes
)
606 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
608 struct lrw_crypt_req req
= {
610 .tbuflen
= sizeof(buf
),
612 .table_ctx
= &ctx
->lrw_table
,
613 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
614 .crypt_fn
= lrw_xts_decrypt_callback
,
618 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
621 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
627 static int xts_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
630 struct aesni_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
633 err
= xts_check_key(tfm
, key
, keylen
);
637 /* first half of xts-key is for crypt */
638 err
= aes_set_key_common(tfm
, ctx
->raw_crypt_ctx
, key
, keylen
/ 2);
642 /* second half of xts-key is for tweak */
643 return aes_set_key_common(tfm
, ctx
->raw_tweak_ctx
, key
+ keylen
/ 2,
648 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
650 aesni_enc(ctx
, out
, in
);
655 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
657 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
660 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
662 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
665 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
667 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
670 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
672 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
675 static const struct common_glue_ctx aesni_enc_xts
= {
677 .fpu_blocks_limit
= 1,
681 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
684 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
688 static const struct common_glue_ctx aesni_dec_xts
= {
690 .fpu_blocks_limit
= 1,
694 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
697 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
701 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
702 struct scatterlist
*src
, unsigned int nbytes
)
704 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
706 return glue_xts_crypt_128bit(&aesni_enc_xts
, desc
, dst
, src
, nbytes
,
707 XTS_TWEAK_CAST(aesni_xts_tweak
),
708 aes_ctx(ctx
->raw_tweak_ctx
),
709 aes_ctx(ctx
->raw_crypt_ctx
));
712 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
713 struct scatterlist
*src
, unsigned int nbytes
)
715 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
717 return glue_xts_crypt_128bit(&aesni_dec_xts
, desc
, dst
, src
, nbytes
,
718 XTS_TWEAK_CAST(aesni_xts_tweak
),
719 aes_ctx(ctx
->raw_tweak_ctx
),
720 aes_ctx(ctx
->raw_crypt_ctx
));
725 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
726 struct scatterlist
*src
, unsigned int nbytes
)
728 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
730 struct xts_crypt_req req
= {
732 .tbuflen
= sizeof(buf
),
734 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
735 .tweak_fn
= aesni_xts_tweak
,
736 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
737 .crypt_fn
= lrw_xts_encrypt_callback
,
741 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
744 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
750 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
751 struct scatterlist
*src
, unsigned int nbytes
)
753 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
755 struct xts_crypt_req req
= {
757 .tbuflen
= sizeof(buf
),
759 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
760 .tweak_fn
= aesni_xts_tweak
,
761 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
762 .crypt_fn
= lrw_xts_decrypt_callback
,
766 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
769 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
778 static int rfc4106_init(struct crypto_aead
*aead
)
780 struct cryptd_aead
*cryptd_tfm
;
781 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
783 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni",
785 CRYPTO_ALG_INTERNAL
);
786 if (IS_ERR(cryptd_tfm
))
787 return PTR_ERR(cryptd_tfm
);
790 crypto_aead_set_reqsize(aead
, crypto_aead_reqsize(&cryptd_tfm
->base
));
794 static void rfc4106_exit(struct crypto_aead
*aead
)
796 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
798 cryptd_free_aead(*ctx
);
802 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
804 struct crypto_cipher
*tfm
;
807 tfm
= crypto_alloc_cipher("aes", 0, 0);
811 ret
= crypto_cipher_setkey(tfm
, key
, key_len
);
813 goto out_free_cipher
;
815 /* Clear the data in the hash sub key container to zero.*/
816 /* We want to cipher all zeros to create the hash sub key. */
817 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
819 crypto_cipher_encrypt_one(tfm
, hash_subkey
, hash_subkey
);
822 crypto_free_cipher(tfm
);
826 static int common_rfc4106_set_key(struct crypto_aead
*aead
, const u8
*key
,
827 unsigned int key_len
)
829 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(aead
);
832 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
835 /*Account for 4 byte nonce at the end.*/
838 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
840 return aes_set_key_common(crypto_aead_tfm(aead
),
841 &ctx
->aes_key_expanded
, key
, key_len
) ?:
842 rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
845 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
846 unsigned int key_len
)
848 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
849 struct cryptd_aead
*cryptd_tfm
= *ctx
;
851 return crypto_aead_setkey(&cryptd_tfm
->base
, key
, key_len
);
854 static int common_rfc4106_set_authsize(struct crypto_aead
*aead
,
855 unsigned int authsize
)
869 /* This is the Integrity Check Value (aka the authentication tag length and can
870 * be 8, 12 or 16 bytes long. */
871 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
872 unsigned int authsize
)
874 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
875 struct cryptd_aead
*cryptd_tfm
= *ctx
;
877 return crypto_aead_setauthsize(&cryptd_tfm
->base
, authsize
);
880 static int helper_rfc4106_encrypt(struct aead_request
*req
)
882 u8 one_entry_in_sg
= 0;
883 u8
*src
, *dst
, *assoc
;
884 __be32 counter
= cpu_to_be32(1);
885 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
886 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
887 void *aes_ctx
= &(ctx
->aes_key_expanded
);
888 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
889 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
890 struct scatter_walk src_sg_walk
;
891 struct scatter_walk dst_sg_walk
= {};
894 /* Assuming we are supporting rfc4106 64-bit extended */
895 /* sequence numbers We need to have the AAD length equal */
896 /* to 16 or 20 bytes */
897 if (unlikely(req
->assoclen
!= 16 && req
->assoclen
!= 20))
901 for (i
= 0; i
< 4; i
++)
902 *(iv
+i
) = ctx
->nonce
[i
];
903 for (i
= 0; i
< 8; i
++)
904 *(iv
+4+i
) = req
->iv
[i
];
905 *((__be32
*)(iv
+12)) = counter
;
907 if (sg_is_last(req
->src
) &&
908 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
&&
909 + sg_is_last(req
->dst
) && req
->dst
->length
&&
910 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
) {
912 scatterwalk_start(&src_sg_walk
, req
->src
);
913 assoc
= scatterwalk_map(&src_sg_walk
);
914 src
= assoc
+ req
->assoclen
;
916 if (unlikely(req
->src
!= req
->dst
)) {
917 scatterwalk_start(&dst_sg_walk
, req
->dst
);
918 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
921 /* Allocate memory for src, dst, assoc */
922 assoc
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
924 if (unlikely(!assoc
))
926 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
927 req
->assoclen
+ req
->cryptlen
, 0);
928 src
= assoc
+ req
->assoclen
;
933 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, req
->cryptlen
, iv
,
934 ctx
->hash_subkey
, assoc
, req
->assoclen
- 8,
935 dst
+ req
->cryptlen
, auth_tag_len
);
938 /* The authTag (aka the Integrity Check Value) needs to be written
939 * back to the packet. */
940 if (one_entry_in_sg
) {
941 if (unlikely(req
->src
!= req
->dst
)) {
942 scatterwalk_unmap(dst
- req
->assoclen
);
943 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
944 scatterwalk_done(&dst_sg_walk
, 1, 0);
946 scatterwalk_unmap(assoc
);
947 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
948 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
950 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
951 req
->cryptlen
+ auth_tag_len
, 1);
957 static int helper_rfc4106_decrypt(struct aead_request
*req
)
959 u8 one_entry_in_sg
= 0;
960 u8
*src
, *dst
, *assoc
;
961 unsigned long tempCipherLen
= 0;
962 __be32 counter
= cpu_to_be32(1);
964 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
965 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
966 void *aes_ctx
= &(ctx
->aes_key_expanded
);
967 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
968 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
970 struct scatter_walk src_sg_walk
;
971 struct scatter_walk dst_sg_walk
= {};
974 if (unlikely(req
->assoclen
!= 16 && req
->assoclen
!= 20))
977 /* Assuming we are supporting rfc4106 64-bit extended */
978 /* sequence numbers We need to have the AAD length */
979 /* equal to 16 or 20 bytes */
981 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
983 for (i
= 0; i
< 4; i
++)
984 *(iv
+i
) = ctx
->nonce
[i
];
985 for (i
= 0; i
< 8; i
++)
986 *(iv
+4+i
) = req
->iv
[i
];
987 *((__be32
*)(iv
+12)) = counter
;
989 if (sg_is_last(req
->src
) &&
990 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
&&
991 sg_is_last(req
->dst
) &&
992 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
) {
994 scatterwalk_start(&src_sg_walk
, req
->src
);
995 assoc
= scatterwalk_map(&src_sg_walk
);
996 src
= assoc
+ req
->assoclen
;
998 if (unlikely(req
->src
!= req
->dst
)) {
999 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1000 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
1004 /* Allocate memory for src, dst, assoc */
1005 assoc
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1008 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
1009 req
->assoclen
+ req
->cryptlen
, 0);
1010 src
= assoc
+ req
->assoclen
;
1015 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1016 ctx
->hash_subkey
, assoc
, req
->assoclen
- 8,
1017 authTag
, auth_tag_len
);
1020 /* Compare generated tag with passed in tag. */
1021 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1024 if (one_entry_in_sg
) {
1025 if (unlikely(req
->src
!= req
->dst
)) {
1026 scatterwalk_unmap(dst
- req
->assoclen
);
1027 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
1028 scatterwalk_done(&dst_sg_walk
, 1, 0);
1030 scatterwalk_unmap(assoc
);
1031 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
1032 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
1034 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
1041 static int rfc4106_encrypt(struct aead_request
*req
)
1043 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1044 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
1045 struct cryptd_aead
*cryptd_tfm
= *ctx
;
1047 tfm
= &cryptd_tfm
->base
;
1048 if (irq_fpu_usable() && (!in_atomic() ||
1049 !cryptd_aead_queued(cryptd_tfm
)))
1050 tfm
= cryptd_aead_child(cryptd_tfm
);
1052 aead_request_set_tfm(req
, tfm
);
1054 return crypto_aead_encrypt(req
);
1057 static int rfc4106_decrypt(struct aead_request
*req
)
1059 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1060 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
1061 struct cryptd_aead
*cryptd_tfm
= *ctx
;
1063 tfm
= &cryptd_tfm
->base
;
1064 if (irq_fpu_usable() && (!in_atomic() ||
1065 !cryptd_aead_queued(cryptd_tfm
)))
1066 tfm
= cryptd_aead_child(cryptd_tfm
);
1068 aead_request_set_tfm(req
, tfm
);
1070 return crypto_aead_decrypt(req
);
1074 static struct crypto_alg aesni_algs
[] = { {
1076 .cra_driver_name
= "aes-aesni",
1077 .cra_priority
= 300,
1078 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1079 .cra_blocksize
= AES_BLOCK_SIZE
,
1080 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1083 .cra_module
= THIS_MODULE
,
1086 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1087 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1088 .cia_setkey
= aes_set_key
,
1089 .cia_encrypt
= aes_encrypt
,
1090 .cia_decrypt
= aes_decrypt
1094 .cra_name
= "__aes-aesni",
1095 .cra_driver_name
= "__driver-aes-aesni",
1097 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
| CRYPTO_ALG_INTERNAL
,
1098 .cra_blocksize
= AES_BLOCK_SIZE
,
1099 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1102 .cra_module
= THIS_MODULE
,
1105 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1106 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1107 .cia_setkey
= aes_set_key
,
1108 .cia_encrypt
= __aes_encrypt
,
1109 .cia_decrypt
= __aes_decrypt
1113 .cra_name
= "__ecb-aes-aesni",
1114 .cra_driver_name
= "__driver-ecb-aes-aesni",
1116 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1117 CRYPTO_ALG_INTERNAL
,
1118 .cra_blocksize
= AES_BLOCK_SIZE
,
1119 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1122 .cra_type
= &crypto_blkcipher_type
,
1123 .cra_module
= THIS_MODULE
,
1126 .min_keysize
= AES_MIN_KEY_SIZE
,
1127 .max_keysize
= AES_MAX_KEY_SIZE
,
1128 .setkey
= aes_set_key
,
1129 .encrypt
= ecb_encrypt
,
1130 .decrypt
= ecb_decrypt
,
1134 .cra_name
= "__cbc-aes-aesni",
1135 .cra_driver_name
= "__driver-cbc-aes-aesni",
1137 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1138 CRYPTO_ALG_INTERNAL
,
1139 .cra_blocksize
= AES_BLOCK_SIZE
,
1140 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1143 .cra_type
= &crypto_blkcipher_type
,
1144 .cra_module
= THIS_MODULE
,
1147 .min_keysize
= AES_MIN_KEY_SIZE
,
1148 .max_keysize
= AES_MAX_KEY_SIZE
,
1149 .setkey
= aes_set_key
,
1150 .encrypt
= cbc_encrypt
,
1151 .decrypt
= cbc_decrypt
,
1155 .cra_name
= "ecb(aes)",
1156 .cra_driver_name
= "ecb-aes-aesni",
1157 .cra_priority
= 400,
1158 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1159 .cra_blocksize
= AES_BLOCK_SIZE
,
1160 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1162 .cra_type
= &crypto_ablkcipher_type
,
1163 .cra_module
= THIS_MODULE
,
1164 .cra_init
= ablk_ecb_init
,
1165 .cra_exit
= ablk_exit
,
1168 .min_keysize
= AES_MIN_KEY_SIZE
,
1169 .max_keysize
= AES_MAX_KEY_SIZE
,
1170 .setkey
= ablk_set_key
,
1171 .encrypt
= ablk_encrypt
,
1172 .decrypt
= ablk_decrypt
,
1176 .cra_name
= "cbc(aes)",
1177 .cra_driver_name
= "cbc-aes-aesni",
1178 .cra_priority
= 400,
1179 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1180 .cra_blocksize
= AES_BLOCK_SIZE
,
1181 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1183 .cra_type
= &crypto_ablkcipher_type
,
1184 .cra_module
= THIS_MODULE
,
1185 .cra_init
= ablk_cbc_init
,
1186 .cra_exit
= ablk_exit
,
1189 .min_keysize
= AES_MIN_KEY_SIZE
,
1190 .max_keysize
= AES_MAX_KEY_SIZE
,
1191 .ivsize
= AES_BLOCK_SIZE
,
1192 .setkey
= ablk_set_key
,
1193 .encrypt
= ablk_encrypt
,
1194 .decrypt
= ablk_decrypt
,
1197 #ifdef CONFIG_X86_64
1199 .cra_name
= "__ctr-aes-aesni",
1200 .cra_driver_name
= "__driver-ctr-aes-aesni",
1202 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1203 CRYPTO_ALG_INTERNAL
,
1205 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1208 .cra_type
= &crypto_blkcipher_type
,
1209 .cra_module
= THIS_MODULE
,
1212 .min_keysize
= AES_MIN_KEY_SIZE
,
1213 .max_keysize
= AES_MAX_KEY_SIZE
,
1214 .ivsize
= AES_BLOCK_SIZE
,
1215 .setkey
= aes_set_key
,
1216 .encrypt
= ctr_crypt
,
1217 .decrypt
= ctr_crypt
,
1221 .cra_name
= "ctr(aes)",
1222 .cra_driver_name
= "ctr-aes-aesni",
1223 .cra_priority
= 400,
1224 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1226 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1228 .cra_type
= &crypto_ablkcipher_type
,
1229 .cra_module
= THIS_MODULE
,
1230 .cra_init
= ablk_ctr_init
,
1231 .cra_exit
= ablk_exit
,
1234 .min_keysize
= AES_MIN_KEY_SIZE
,
1235 .max_keysize
= AES_MAX_KEY_SIZE
,
1236 .ivsize
= AES_BLOCK_SIZE
,
1237 .setkey
= ablk_set_key
,
1238 .encrypt
= ablk_encrypt
,
1239 .decrypt
= ablk_encrypt
,
1244 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1246 .cra_name
= "pcbc(aes)",
1247 .cra_driver_name
= "pcbc-aes-aesni",
1248 .cra_priority
= 400,
1249 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1250 .cra_blocksize
= AES_BLOCK_SIZE
,
1251 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1253 .cra_type
= &crypto_ablkcipher_type
,
1254 .cra_module
= THIS_MODULE
,
1255 .cra_init
= ablk_pcbc_init
,
1256 .cra_exit
= ablk_exit
,
1259 .min_keysize
= AES_MIN_KEY_SIZE
,
1260 .max_keysize
= AES_MAX_KEY_SIZE
,
1261 .ivsize
= AES_BLOCK_SIZE
,
1262 .setkey
= ablk_set_key
,
1263 .encrypt
= ablk_encrypt
,
1264 .decrypt
= ablk_decrypt
,
1269 .cra_name
= "__lrw-aes-aesni",
1270 .cra_driver_name
= "__driver-lrw-aes-aesni",
1272 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1273 CRYPTO_ALG_INTERNAL
,
1274 .cra_blocksize
= AES_BLOCK_SIZE
,
1275 .cra_ctxsize
= sizeof(struct aesni_lrw_ctx
),
1277 .cra_type
= &crypto_blkcipher_type
,
1278 .cra_module
= THIS_MODULE
,
1279 .cra_exit
= lrw_aesni_exit_tfm
,
1282 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1283 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1284 .ivsize
= AES_BLOCK_SIZE
,
1285 .setkey
= lrw_aesni_setkey
,
1286 .encrypt
= lrw_encrypt
,
1287 .decrypt
= lrw_decrypt
,
1291 .cra_name
= "__xts-aes-aesni",
1292 .cra_driver_name
= "__driver-xts-aes-aesni",
1294 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1295 CRYPTO_ALG_INTERNAL
,
1296 .cra_blocksize
= AES_BLOCK_SIZE
,
1297 .cra_ctxsize
= sizeof(struct aesni_xts_ctx
),
1299 .cra_type
= &crypto_blkcipher_type
,
1300 .cra_module
= THIS_MODULE
,
1303 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1304 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1305 .ivsize
= AES_BLOCK_SIZE
,
1306 .setkey
= xts_aesni_setkey
,
1307 .encrypt
= xts_encrypt
,
1308 .decrypt
= xts_decrypt
,
1312 .cra_name
= "lrw(aes)",
1313 .cra_driver_name
= "lrw-aes-aesni",
1314 .cra_priority
= 400,
1315 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1316 .cra_blocksize
= AES_BLOCK_SIZE
,
1317 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1319 .cra_type
= &crypto_ablkcipher_type
,
1320 .cra_module
= THIS_MODULE
,
1321 .cra_init
= ablk_init
,
1322 .cra_exit
= ablk_exit
,
1325 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1326 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1327 .ivsize
= AES_BLOCK_SIZE
,
1328 .setkey
= ablk_set_key
,
1329 .encrypt
= ablk_encrypt
,
1330 .decrypt
= ablk_decrypt
,
1334 .cra_name
= "xts(aes)",
1335 .cra_driver_name
= "xts-aes-aesni",
1336 .cra_priority
= 400,
1337 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1338 .cra_blocksize
= AES_BLOCK_SIZE
,
1339 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1341 .cra_type
= &crypto_ablkcipher_type
,
1342 .cra_module
= THIS_MODULE
,
1343 .cra_init
= ablk_init
,
1344 .cra_exit
= ablk_exit
,
1347 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1348 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1349 .ivsize
= AES_BLOCK_SIZE
,
1350 .setkey
= ablk_set_key
,
1351 .encrypt
= ablk_encrypt
,
1352 .decrypt
= ablk_decrypt
,
1357 #ifdef CONFIG_X86_64
1358 static struct aead_alg aesni_aead_algs
[] = { {
1359 .setkey
= common_rfc4106_set_key
,
1360 .setauthsize
= common_rfc4106_set_authsize
,
1361 .encrypt
= helper_rfc4106_encrypt
,
1362 .decrypt
= helper_rfc4106_decrypt
,
1366 .cra_name
= "__gcm-aes-aesni",
1367 .cra_driver_name
= "__driver-gcm-aes-aesni",
1368 .cra_flags
= CRYPTO_ALG_INTERNAL
,
1370 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
),
1371 .cra_alignmask
= AESNI_ALIGN
- 1,
1372 .cra_module
= THIS_MODULE
,
1375 .init
= rfc4106_init
,
1376 .exit
= rfc4106_exit
,
1377 .setkey
= rfc4106_set_key
,
1378 .setauthsize
= rfc4106_set_authsize
,
1379 .encrypt
= rfc4106_encrypt
,
1380 .decrypt
= rfc4106_decrypt
,
1384 .cra_name
= "rfc4106(gcm(aes))",
1385 .cra_driver_name
= "rfc4106-gcm-aesni",
1386 .cra_priority
= 400,
1387 .cra_flags
= CRYPTO_ALG_ASYNC
,
1389 .cra_ctxsize
= sizeof(struct cryptd_aead
*),
1390 .cra_module
= THIS_MODULE
,
1394 static struct aead_alg aesni_aead_algs
[0];
1398 static const struct x86_cpu_id aesni_cpu_id
[] = {
1399 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1402 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1404 static int __init
aesni_init(void)
1408 if (!x86_match_cpu(aesni_cpu_id
))
1410 #ifdef CONFIG_X86_64
1411 #ifdef CONFIG_AS_AVX2
1412 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1413 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1414 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1415 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1418 #ifdef CONFIG_AS_AVX
1419 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1420 pr_info("AVX version of gcm_enc/dec engaged.\n");
1421 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1422 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1426 pr_info("SSE version of gcm_enc/dec engaged.\n");
1427 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1428 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1430 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1431 #ifdef CONFIG_AS_AVX
1432 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1433 /* optimize performance of ctr mode encryption transform */
1434 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1435 pr_info("AES CTR mode by8 optimization enabled\n");
1440 err
= crypto_fpu_init();
1444 err
= crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1448 err
= crypto_register_aeads(aesni_aead_algs
,
1449 ARRAY_SIZE(aesni_aead_algs
));
1451 goto unregister_algs
;
1456 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1462 static void __exit
aesni_exit(void)
1464 crypto_unregister_aeads(aesni_aead_algs
, ARRAY_SIZE(aesni_aead_algs
));
1465 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1470 late_initcall(aesni_init
);
1471 module_exit(aesni_exit
);
1473 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1474 MODULE_LICENSE("GPL");
1475 MODULE_ALIAS_CRYPTO("aes");