2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
47 /* This data is stored at the end of the crypto_tfm struct.
48 * It's a type of per "session" data storage location.
49 * This needs to be 16 byte aligned.
51 struct aesni_rfc4106_gcm_ctx
{
53 struct crypto_aes_ctx aes_key_expanded
;
55 struct cryptd_aead
*cryptd_tfm
;
58 struct aesni_gcm_set_hash_subkey_result
{
60 struct completion completion
;
63 struct aesni_hash_subkey_req_data
{
65 struct aesni_gcm_set_hash_subkey_result result
;
66 struct scatterlist sg
;
69 #define AESNI_ALIGN (16)
70 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
71 #define RFC4106_HASH_SUBKEY_SIZE 16
73 struct aesni_lrw_ctx
{
74 struct lrw_table_ctx lrw_table
;
75 u8 raw_aes_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
78 struct aesni_xts_ctx
{
79 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
80 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
83 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
84 unsigned int key_len
);
85 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
87 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
90 const u8
*in
, unsigned int len
);
91 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 const u8
*in
, unsigned int len
);
93 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
94 const u8
*in
, unsigned int len
, u8
*iv
);
95 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
106 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
107 const u8
*in
, unsigned int len
, u8
*iv
);
108 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
109 const u8
*in
, unsigned int len
, u8
*iv
);
111 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
112 const u8
*in
, bool enc
, u8
*iv
);
114 /* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
130 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
131 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
132 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
133 u8
*auth_tag
, unsigned long auth_tag_len
);
135 /* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
151 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
152 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
153 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
154 u8
*auth_tag
, unsigned long auth_tag_len
);
158 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
159 void *keys
, u8
*out
, unsigned int num_bytes
);
160 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
161 void *keys
, u8
*out
, unsigned int num_bytes
);
162 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
163 void *keys
, u8
*out
, unsigned int num_bytes
);
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
169 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
171 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
172 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
173 const u8
*aad
, unsigned long aad_len
,
174 u8
*auth_tag
, unsigned long auth_tag_len
);
176 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
177 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
178 const u8
*aad
, unsigned long aad_len
,
179 u8
*auth_tag
, unsigned long auth_tag_len
);
181 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
182 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
183 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
184 u8
*auth_tag
, unsigned long auth_tag_len
)
186 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
187 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)){
188 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
189 aad_len
, auth_tag
, auth_tag_len
);
191 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
192 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
193 aad_len
, auth_tag
, auth_tag_len
);
197 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
198 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
199 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
200 u8
*auth_tag
, unsigned long auth_tag_len
)
202 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
203 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
204 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
205 aad_len
, auth_tag
, auth_tag_len
);
207 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
208 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
209 aad_len
, auth_tag
, auth_tag_len
);
214 #ifdef CONFIG_AS_AVX2
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
220 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
222 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
223 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
224 const u8
*aad
, unsigned long aad_len
,
225 u8
*auth_tag
, unsigned long auth_tag_len
);
227 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
228 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
229 const u8
*aad
, unsigned long aad_len
,
230 u8
*auth_tag
, unsigned long auth_tag_len
);
232 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
233 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
234 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
235 u8
*auth_tag
, unsigned long auth_tag_len
)
237 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
238 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
239 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
240 aad_len
, auth_tag
, auth_tag_len
);
241 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
242 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
243 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
244 aad_len
, auth_tag
, auth_tag_len
);
246 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
247 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
248 aad_len
, auth_tag
, auth_tag_len
);
252 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
253 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
254 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
255 u8
*auth_tag
, unsigned long auth_tag_len
)
257 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
258 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
259 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
260 aad
, aad_len
, auth_tag
, auth_tag_len
);
261 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
262 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
263 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
264 aad_len
, auth_tag
, auth_tag_len
);
266 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
267 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
268 aad_len
, auth_tag
, auth_tag_len
);
273 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
274 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
275 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
276 u8
*auth_tag
, unsigned long auth_tag_len
);
278 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
279 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
280 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
281 u8
*auth_tag
, unsigned long auth_tag_len
);
284 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
287 (struct aesni_rfc4106_gcm_ctx
*)
289 crypto_tfm_ctx(crypto_aead_tfm(tfm
)), AESNI_ALIGN
);
293 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
295 unsigned long addr
= (unsigned long)raw_ctx
;
296 unsigned long align
= AESNI_ALIGN
;
298 if (align
<= crypto_tfm_ctx_alignment())
300 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
303 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
304 const u8
*in_key
, unsigned int key_len
)
306 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
307 u32
*flags
= &tfm
->crt_flags
;
310 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
311 key_len
!= AES_KEYSIZE_256
) {
312 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
316 if (!irq_fpu_usable())
317 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
320 err
= aesni_set_key(ctx
, in_key
, key_len
);
327 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
328 unsigned int key_len
)
330 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
333 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
335 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
337 if (!irq_fpu_usable())
338 crypto_aes_encrypt_x86(ctx
, dst
, src
);
341 aesni_enc(ctx
, dst
, src
);
346 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
348 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
350 if (!irq_fpu_usable())
351 crypto_aes_decrypt_x86(ctx
, dst
, src
);
354 aesni_dec(ctx
, dst
, src
);
359 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
361 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
363 aesni_enc(ctx
, dst
, src
);
366 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
368 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
370 aesni_dec(ctx
, dst
, src
);
373 static int ecb_encrypt(struct blkcipher_desc
*desc
,
374 struct scatterlist
*dst
, struct scatterlist
*src
,
377 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
378 struct blkcipher_walk walk
;
381 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
382 err
= blkcipher_walk_virt(desc
, &walk
);
383 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
386 while ((nbytes
= walk
.nbytes
)) {
387 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
388 nbytes
& AES_BLOCK_MASK
);
389 nbytes
&= AES_BLOCK_SIZE
- 1;
390 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
397 static int ecb_decrypt(struct blkcipher_desc
*desc
,
398 struct scatterlist
*dst
, struct scatterlist
*src
,
401 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
402 struct blkcipher_walk walk
;
405 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
406 err
= blkcipher_walk_virt(desc
, &walk
);
407 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
410 while ((nbytes
= walk
.nbytes
)) {
411 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
412 nbytes
& AES_BLOCK_MASK
);
413 nbytes
&= AES_BLOCK_SIZE
- 1;
414 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
421 static int cbc_encrypt(struct blkcipher_desc
*desc
,
422 struct scatterlist
*dst
, struct scatterlist
*src
,
425 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
426 struct blkcipher_walk walk
;
429 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
430 err
= blkcipher_walk_virt(desc
, &walk
);
431 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
434 while ((nbytes
= walk
.nbytes
)) {
435 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
436 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
437 nbytes
&= AES_BLOCK_SIZE
- 1;
438 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
445 static int cbc_decrypt(struct blkcipher_desc
*desc
,
446 struct scatterlist
*dst
, struct scatterlist
*src
,
449 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
450 struct blkcipher_walk walk
;
453 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
454 err
= blkcipher_walk_virt(desc
, &walk
);
455 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
458 while ((nbytes
= walk
.nbytes
)) {
459 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
460 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
461 nbytes
&= AES_BLOCK_SIZE
- 1;
462 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
470 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
471 struct blkcipher_walk
*walk
)
473 u8
*ctrblk
= walk
->iv
;
474 u8 keystream
[AES_BLOCK_SIZE
];
475 u8
*src
= walk
->src
.virt
.addr
;
476 u8
*dst
= walk
->dst
.virt
.addr
;
477 unsigned int nbytes
= walk
->nbytes
;
479 aesni_enc(ctx
, keystream
, ctrblk
);
480 crypto_xor(keystream
, src
, nbytes
);
481 memcpy(dst
, keystream
, nbytes
);
482 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
486 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
487 const u8
*in
, unsigned int len
, u8
*iv
)
490 * based on key length, override with the by8 version
491 * of ctr mode encryption/decryption for improved performance
492 * aes_set_key_common() ensures that key length is one of
495 if (ctx
->key_length
== AES_KEYSIZE_128
)
496 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
497 else if (ctx
->key_length
== AES_KEYSIZE_192
)
498 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
500 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
504 static int ctr_crypt(struct blkcipher_desc
*desc
,
505 struct scatterlist
*dst
, struct scatterlist
*src
,
508 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
509 struct blkcipher_walk walk
;
512 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
513 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
514 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
517 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
518 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
519 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
520 nbytes
&= AES_BLOCK_SIZE
- 1;
521 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
524 ctr_crypt_final(ctx
, &walk
);
525 err
= blkcipher_walk_done(desc
, &walk
, 0);
533 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
535 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
538 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
540 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
544 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
546 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
551 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
552 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
554 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
558 static void lrw_xts_encrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
560 aesni_ecb_enc(ctx
, blks
, blks
, nbytes
);
563 static void lrw_xts_decrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
565 aesni_ecb_dec(ctx
, blks
, blks
, nbytes
);
568 static int lrw_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
571 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
574 err
= aes_set_key_common(tfm
, ctx
->raw_aes_ctx
, key
,
575 keylen
- AES_BLOCK_SIZE
);
579 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
- AES_BLOCK_SIZE
);
582 static void lrw_aesni_exit_tfm(struct crypto_tfm
*tfm
)
584 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
586 lrw_free_table(&ctx
->lrw_table
);
589 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
590 struct scatterlist
*src
, unsigned int nbytes
)
592 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
594 struct lrw_crypt_req req
= {
596 .tbuflen
= sizeof(buf
),
598 .table_ctx
= &ctx
->lrw_table
,
599 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
600 .crypt_fn
= lrw_xts_encrypt_callback
,
604 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
607 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
613 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
614 struct scatterlist
*src
, unsigned int nbytes
)
616 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
618 struct lrw_crypt_req req
= {
620 .tbuflen
= sizeof(buf
),
622 .table_ctx
= &ctx
->lrw_table
,
623 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
624 .crypt_fn
= lrw_xts_decrypt_callback
,
628 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
631 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
637 static int xts_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
640 struct aesni_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
641 u32
*flags
= &tfm
->crt_flags
;
644 /* key consists of keys of equal size concatenated, therefore
645 * the length must be even
648 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
652 /* first half of xts-key is for crypt */
653 err
= aes_set_key_common(tfm
, ctx
->raw_crypt_ctx
, key
, keylen
/ 2);
657 /* second half of xts-key is for tweak */
658 return aes_set_key_common(tfm
, ctx
->raw_tweak_ctx
, key
+ keylen
/ 2,
663 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
665 aesni_enc(ctx
, out
, in
);
670 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
672 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
675 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
677 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
680 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
682 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
685 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
687 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
690 static const struct common_glue_ctx aesni_enc_xts
= {
692 .fpu_blocks_limit
= 1,
696 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
699 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
703 static const struct common_glue_ctx aesni_dec_xts
= {
705 .fpu_blocks_limit
= 1,
709 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
712 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
716 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
717 struct scatterlist
*src
, unsigned int nbytes
)
719 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
721 return glue_xts_crypt_128bit(&aesni_enc_xts
, desc
, dst
, src
, nbytes
,
722 XTS_TWEAK_CAST(aesni_xts_tweak
),
723 aes_ctx(ctx
->raw_tweak_ctx
),
724 aes_ctx(ctx
->raw_crypt_ctx
));
727 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
728 struct scatterlist
*src
, unsigned int nbytes
)
730 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
732 return glue_xts_crypt_128bit(&aesni_dec_xts
, desc
, dst
, src
, nbytes
,
733 XTS_TWEAK_CAST(aesni_xts_tweak
),
734 aes_ctx(ctx
->raw_tweak_ctx
),
735 aes_ctx(ctx
->raw_crypt_ctx
));
740 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
741 struct scatterlist
*src
, unsigned int nbytes
)
743 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
745 struct xts_crypt_req req
= {
747 .tbuflen
= sizeof(buf
),
749 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
750 .tweak_fn
= aesni_xts_tweak
,
751 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
752 .crypt_fn
= lrw_xts_encrypt_callback
,
756 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
759 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
765 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
766 struct scatterlist
*src
, unsigned int nbytes
)
768 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
770 struct xts_crypt_req req
= {
772 .tbuflen
= sizeof(buf
),
774 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
775 .tweak_fn
= aesni_xts_tweak
,
776 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
777 .crypt_fn
= lrw_xts_decrypt_callback
,
781 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
784 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
793 static int rfc4106_init(struct crypto_tfm
*tfm
)
795 struct cryptd_aead
*cryptd_tfm
;
796 struct aesni_rfc4106_gcm_ctx
*ctx
= (struct aesni_rfc4106_gcm_ctx
*)
797 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
798 struct crypto_aead
*cryptd_child
;
799 struct aesni_rfc4106_gcm_ctx
*child_ctx
;
800 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni",
802 CRYPTO_ALG_INTERNAL
);
803 if (IS_ERR(cryptd_tfm
))
804 return PTR_ERR(cryptd_tfm
);
806 cryptd_child
= cryptd_aead_child(cryptd_tfm
);
807 child_ctx
= aesni_rfc4106_gcm_ctx_get(cryptd_child
);
808 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
809 ctx
->cryptd_tfm
= cryptd_tfm
;
810 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
)
811 + crypto_aead_reqsize(&cryptd_tfm
->base
);
815 static void rfc4106_exit(struct crypto_tfm
*tfm
)
817 struct aesni_rfc4106_gcm_ctx
*ctx
=
818 (struct aesni_rfc4106_gcm_ctx
*)
819 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
820 if (!IS_ERR(ctx
->cryptd_tfm
))
821 cryptd_free_aead(ctx
->cryptd_tfm
);
826 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
828 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
830 if (err
== -EINPROGRESS
)
833 complete(&result
->completion
);
837 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
839 struct crypto_ablkcipher
*ctr_tfm
;
840 struct ablkcipher_request
*req
;
842 struct aesni_hash_subkey_req_data
*req_data
;
844 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
846 return PTR_ERR(ctr_tfm
);
848 crypto_ablkcipher_clear_flags(ctr_tfm
, ~0);
850 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
852 goto out_free_ablkcipher
;
855 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
857 goto out_free_ablkcipher
;
859 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
861 goto out_free_request
;
863 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
865 /* Clear the data in the hash sub key container to zero.*/
866 /* We want to cipher all zeros to create the hash sub key. */
867 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
869 init_completion(&req_data
->result
.completion
);
870 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
871 ablkcipher_request_set_tfm(req
, ctr_tfm
);
872 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
873 CRYPTO_TFM_REQ_MAY_BACKLOG
,
874 rfc4106_set_hash_subkey_done
,
877 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
878 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
880 ret
= crypto_ablkcipher_encrypt(req
);
881 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
882 ret
= wait_for_completion_interruptible
883 (&req_data
->result
.completion
);
885 ret
= req_data
->result
.err
;
889 ablkcipher_request_free(req
);
891 crypto_free_ablkcipher(ctr_tfm
);
895 static int common_rfc4106_set_key(struct crypto_aead
*aead
, const u8
*key
,
896 unsigned int key_len
)
899 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead
);
900 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(aead
);
901 u8
*new_key_align
, *new_key_mem
= NULL
;
904 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
907 /*Account for 4 byte nonce at the end.*/
909 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
910 key_len
!= AES_KEYSIZE_256
) {
911 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
915 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
916 /*This must be on a 16 byte boundary!*/
917 if ((unsigned long)(&(ctx
->aes_key_expanded
.key_enc
[0])) % AESNI_ALIGN
)
920 if ((unsigned long)key
% AESNI_ALIGN
) {
921 /*key is not aligned: use an auxuliar aligned pointer*/
922 new_key_mem
= kmalloc(key_len
+AESNI_ALIGN
, GFP_KERNEL
);
926 new_key_align
= PTR_ALIGN(new_key_mem
, AESNI_ALIGN
);
927 memcpy(new_key_align
, key
, key_len
);
931 if (!irq_fpu_usable())
932 ret
= crypto_aes_expand_key(&(ctx
->aes_key_expanded
),
936 ret
= aesni_set_key(&(ctx
->aes_key_expanded
), key
, key_len
);
939 /*This must be on a 16 byte boundary!*/
940 if ((unsigned long)(&(ctx
->hash_subkey
[0])) % AESNI_ALIGN
) {
944 ret
= rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
950 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
951 unsigned int key_len
)
953 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
954 struct crypto_aead
*child
= cryptd_aead_child(ctx
->cryptd_tfm
);
955 struct aesni_rfc4106_gcm_ctx
*c_ctx
= aesni_rfc4106_gcm_ctx_get(child
);
956 struct cryptd_aead
*cryptd_tfm
= ctx
->cryptd_tfm
;
959 ret
= crypto_aead_setkey(child
, key
, key_len
);
961 memcpy(ctx
, c_ctx
, sizeof(*ctx
));
962 ctx
->cryptd_tfm
= cryptd_tfm
;
967 static int common_rfc4106_set_authsize(struct crypto_aead
*aead
,
968 unsigned int authsize
)
978 crypto_aead_crt(aead
)->authsize
= authsize
;
982 /* This is the Integrity Check Value (aka the authentication tag length and can
983 * be 8, 12 or 16 bytes long. */
984 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
985 unsigned int authsize
)
987 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
988 struct crypto_aead
*child
= cryptd_aead_child(ctx
->cryptd_tfm
);
991 ret
= crypto_aead_setauthsize(child
, authsize
);
993 crypto_aead_crt(parent
)->authsize
= authsize
;
997 static int __driver_rfc4106_encrypt(struct aead_request
*req
)
999 u8 one_entry_in_sg
= 0;
1000 u8
*src
, *dst
, *assoc
;
1001 __be32 counter
= cpu_to_be32(1);
1002 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1003 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1004 u32 key_len
= ctx
->aes_key_expanded
.key_length
;
1005 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1006 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1007 u8 iv_tab
[16+AESNI_ALIGN
];
1008 u8
* iv
= (u8
*) PTR_ALIGN((u8
*)iv_tab
, AESNI_ALIGN
);
1009 struct scatter_walk src_sg_walk
;
1010 struct scatter_walk assoc_sg_walk
;
1011 struct scatter_walk dst_sg_walk
;
1014 /* Assuming we are supporting rfc4106 64-bit extended */
1015 /* sequence numbers We need to have the AAD length equal */
1016 /* to 8 or 12 bytes */
1017 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
1019 if (unlikely(auth_tag_len
!= 8 && auth_tag_len
!= 12 && auth_tag_len
!= 16))
1021 if (unlikely(key_len
!= AES_KEYSIZE_128
&&
1022 key_len
!= AES_KEYSIZE_192
&&
1023 key_len
!= AES_KEYSIZE_256
))
1026 /* IV below built */
1027 for (i
= 0; i
< 4; i
++)
1028 *(iv
+i
) = ctx
->nonce
[i
];
1029 for (i
= 0; i
< 8; i
++)
1030 *(iv
+4+i
) = req
->iv
[i
];
1031 *((__be32
*)(iv
+12)) = counter
;
1033 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1034 one_entry_in_sg
= 1;
1035 scatterwalk_start(&src_sg_walk
, req
->src
);
1036 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1037 src
= scatterwalk_map(&src_sg_walk
);
1038 assoc
= scatterwalk_map(&assoc_sg_walk
);
1040 if (unlikely(req
->src
!= req
->dst
)) {
1041 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1042 dst
= scatterwalk_map(&dst_sg_walk
);
1046 /* Allocate memory for src, dst, assoc */
1047 src
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
1051 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1052 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1053 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1058 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
1059 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
1060 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
1062 /* The authTag (aka the Integrity Check Value) needs to be written
1063 * back to the packet. */
1064 if (one_entry_in_sg
) {
1065 if (unlikely(req
->src
!= req
->dst
)) {
1066 scatterwalk_unmap(dst
);
1067 scatterwalk_done(&dst_sg_walk
, 0, 0);
1069 scatterwalk_unmap(src
);
1070 scatterwalk_unmap(assoc
);
1071 scatterwalk_done(&src_sg_walk
, 0, 0);
1072 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1074 scatterwalk_map_and_copy(dst
, req
->dst
, 0,
1075 req
->cryptlen
+ auth_tag_len
, 1);
1081 static int __driver_rfc4106_decrypt(struct aead_request
*req
)
1083 u8 one_entry_in_sg
= 0;
1084 u8
*src
, *dst
, *assoc
;
1085 unsigned long tempCipherLen
= 0;
1086 __be32 counter
= cpu_to_be32(1);
1088 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1089 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1090 u32 key_len
= ctx
->aes_key_expanded
.key_length
;
1091 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1092 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1093 u8 iv_and_authTag
[32+AESNI_ALIGN
];
1094 u8
*iv
= (u8
*) PTR_ALIGN((u8
*)iv_and_authTag
, AESNI_ALIGN
);
1095 u8
*authTag
= iv
+ 16;
1096 struct scatter_walk src_sg_walk
;
1097 struct scatter_walk assoc_sg_walk
;
1098 struct scatter_walk dst_sg_walk
;
1101 if (unlikely((req
->cryptlen
< auth_tag_len
) ||
1102 (req
->assoclen
!= 8 && req
->assoclen
!= 12)))
1104 if (unlikely(auth_tag_len
!= 8 && auth_tag_len
!= 12 && auth_tag_len
!= 16))
1106 if (unlikely(key_len
!= AES_KEYSIZE_128
&&
1107 key_len
!= AES_KEYSIZE_192
&&
1108 key_len
!= AES_KEYSIZE_256
))
1111 /* Assuming we are supporting rfc4106 64-bit extended */
1112 /* sequence numbers We need to have the AAD length */
1113 /* equal to 8 or 12 bytes */
1115 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1116 /* IV below built */
1117 for (i
= 0; i
< 4; i
++)
1118 *(iv
+i
) = ctx
->nonce
[i
];
1119 for (i
= 0; i
< 8; i
++)
1120 *(iv
+4+i
) = req
->iv
[i
];
1121 *((__be32
*)(iv
+12)) = counter
;
1123 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1124 one_entry_in_sg
= 1;
1125 scatterwalk_start(&src_sg_walk
, req
->src
);
1126 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1127 src
= scatterwalk_map(&src_sg_walk
);
1128 assoc
= scatterwalk_map(&assoc_sg_walk
);
1130 if (unlikely(req
->src
!= req
->dst
)) {
1131 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1132 dst
= scatterwalk_map(&dst_sg_walk
);
1136 /* Allocate memory for src, dst, assoc */
1137 src
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1140 assoc
= (src
+ req
->cryptlen
);
1141 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1142 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1147 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1148 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
1149 authTag
, auth_tag_len
);
1151 /* Compare generated tag with passed in tag. */
1152 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1155 if (one_entry_in_sg
) {
1156 if (unlikely(req
->src
!= req
->dst
)) {
1157 scatterwalk_unmap(dst
);
1158 scatterwalk_done(&dst_sg_walk
, 0, 0);
1160 scatterwalk_unmap(src
);
1161 scatterwalk_unmap(assoc
);
1162 scatterwalk_done(&src_sg_walk
, 0, 0);
1163 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1165 scatterwalk_map_and_copy(dst
, req
->dst
, 0, tempCipherLen
, 1);
1171 static int rfc4106_encrypt(struct aead_request
*req
)
1174 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1175 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1177 if (!irq_fpu_usable()) {
1178 struct aead_request
*cryptd_req
=
1179 (struct aead_request
*) aead_request_ctx(req
);
1181 memcpy(cryptd_req
, req
, sizeof(*req
));
1182 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1183 ret
= crypto_aead_encrypt(cryptd_req
);
1186 ret
= __driver_rfc4106_encrypt(req
);
1192 static int rfc4106_decrypt(struct aead_request
*req
)
1195 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1196 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1198 if (!irq_fpu_usable()) {
1199 struct aead_request
*cryptd_req
=
1200 (struct aead_request
*) aead_request_ctx(req
);
1202 memcpy(cryptd_req
, req
, sizeof(*req
));
1203 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1204 ret
= crypto_aead_decrypt(cryptd_req
);
1207 ret
= __driver_rfc4106_decrypt(req
);
1213 static int helper_rfc4106_encrypt(struct aead_request
*req
)
1217 if (unlikely(!irq_fpu_usable())) {
1218 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1222 ret
= __driver_rfc4106_encrypt(req
);
1228 static int helper_rfc4106_decrypt(struct aead_request
*req
)
1232 if (unlikely(!irq_fpu_usable())) {
1233 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1237 ret
= __driver_rfc4106_decrypt(req
);
1244 static struct crypto_alg aesni_algs
[] = { {
1246 .cra_driver_name
= "aes-aesni",
1247 .cra_priority
= 300,
1248 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1249 .cra_blocksize
= AES_BLOCK_SIZE
,
1250 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1253 .cra_module
= THIS_MODULE
,
1256 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1257 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1258 .cia_setkey
= aes_set_key
,
1259 .cia_encrypt
= aes_encrypt
,
1260 .cia_decrypt
= aes_decrypt
1264 .cra_name
= "__aes-aesni",
1265 .cra_driver_name
= "__driver-aes-aesni",
1267 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
| CRYPTO_ALG_INTERNAL
,
1268 .cra_blocksize
= AES_BLOCK_SIZE
,
1269 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1272 .cra_module
= THIS_MODULE
,
1275 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1276 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1277 .cia_setkey
= aes_set_key
,
1278 .cia_encrypt
= __aes_encrypt
,
1279 .cia_decrypt
= __aes_decrypt
1283 .cra_name
= "__ecb-aes-aesni",
1284 .cra_driver_name
= "__driver-ecb-aes-aesni",
1286 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1287 CRYPTO_ALG_INTERNAL
,
1288 .cra_blocksize
= AES_BLOCK_SIZE
,
1289 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1292 .cra_type
= &crypto_blkcipher_type
,
1293 .cra_module
= THIS_MODULE
,
1296 .min_keysize
= AES_MIN_KEY_SIZE
,
1297 .max_keysize
= AES_MAX_KEY_SIZE
,
1298 .setkey
= aes_set_key
,
1299 .encrypt
= ecb_encrypt
,
1300 .decrypt
= ecb_decrypt
,
1304 .cra_name
= "__cbc-aes-aesni",
1305 .cra_driver_name
= "__driver-cbc-aes-aesni",
1307 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1308 CRYPTO_ALG_INTERNAL
,
1309 .cra_blocksize
= AES_BLOCK_SIZE
,
1310 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1313 .cra_type
= &crypto_blkcipher_type
,
1314 .cra_module
= THIS_MODULE
,
1317 .min_keysize
= AES_MIN_KEY_SIZE
,
1318 .max_keysize
= AES_MAX_KEY_SIZE
,
1319 .setkey
= aes_set_key
,
1320 .encrypt
= cbc_encrypt
,
1321 .decrypt
= cbc_decrypt
,
1325 .cra_name
= "ecb(aes)",
1326 .cra_driver_name
= "ecb-aes-aesni",
1327 .cra_priority
= 400,
1328 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1329 .cra_blocksize
= AES_BLOCK_SIZE
,
1330 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1332 .cra_type
= &crypto_ablkcipher_type
,
1333 .cra_module
= THIS_MODULE
,
1334 .cra_init
= ablk_ecb_init
,
1335 .cra_exit
= ablk_exit
,
1338 .min_keysize
= AES_MIN_KEY_SIZE
,
1339 .max_keysize
= AES_MAX_KEY_SIZE
,
1340 .setkey
= ablk_set_key
,
1341 .encrypt
= ablk_encrypt
,
1342 .decrypt
= ablk_decrypt
,
1346 .cra_name
= "cbc(aes)",
1347 .cra_driver_name
= "cbc-aes-aesni",
1348 .cra_priority
= 400,
1349 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1350 .cra_blocksize
= AES_BLOCK_SIZE
,
1351 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1353 .cra_type
= &crypto_ablkcipher_type
,
1354 .cra_module
= THIS_MODULE
,
1355 .cra_init
= ablk_cbc_init
,
1356 .cra_exit
= ablk_exit
,
1359 .min_keysize
= AES_MIN_KEY_SIZE
,
1360 .max_keysize
= AES_MAX_KEY_SIZE
,
1361 .ivsize
= AES_BLOCK_SIZE
,
1362 .setkey
= ablk_set_key
,
1363 .encrypt
= ablk_encrypt
,
1364 .decrypt
= ablk_decrypt
,
1367 #ifdef CONFIG_X86_64
1369 .cra_name
= "__ctr-aes-aesni",
1370 .cra_driver_name
= "__driver-ctr-aes-aesni",
1372 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1373 CRYPTO_ALG_INTERNAL
,
1375 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1378 .cra_type
= &crypto_blkcipher_type
,
1379 .cra_module
= THIS_MODULE
,
1382 .min_keysize
= AES_MIN_KEY_SIZE
,
1383 .max_keysize
= AES_MAX_KEY_SIZE
,
1384 .ivsize
= AES_BLOCK_SIZE
,
1385 .setkey
= aes_set_key
,
1386 .encrypt
= ctr_crypt
,
1387 .decrypt
= ctr_crypt
,
1391 .cra_name
= "ctr(aes)",
1392 .cra_driver_name
= "ctr-aes-aesni",
1393 .cra_priority
= 400,
1394 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1396 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1398 .cra_type
= &crypto_ablkcipher_type
,
1399 .cra_module
= THIS_MODULE
,
1400 .cra_init
= ablk_ctr_init
,
1401 .cra_exit
= ablk_exit
,
1404 .min_keysize
= AES_MIN_KEY_SIZE
,
1405 .max_keysize
= AES_MAX_KEY_SIZE
,
1406 .ivsize
= AES_BLOCK_SIZE
,
1407 .setkey
= ablk_set_key
,
1408 .encrypt
= ablk_encrypt
,
1409 .decrypt
= ablk_encrypt
,
1414 .cra_name
= "__gcm-aes-aesni",
1415 .cra_driver_name
= "__driver-gcm-aes-aesni",
1417 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_INTERNAL
,
1419 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1422 .cra_type
= &crypto_aead_type
,
1423 .cra_module
= THIS_MODULE
,
1426 .setkey
= common_rfc4106_set_key
,
1427 .setauthsize
= common_rfc4106_set_authsize
,
1428 .encrypt
= helper_rfc4106_encrypt
,
1429 .decrypt
= helper_rfc4106_decrypt
,
1435 .cra_name
= "rfc4106(gcm(aes))",
1436 .cra_driver_name
= "rfc4106-gcm-aesni",
1437 .cra_priority
= 400,
1438 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1440 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1443 .cra_type
= &crypto_nivaead_type
,
1444 .cra_module
= THIS_MODULE
,
1445 .cra_init
= rfc4106_init
,
1446 .cra_exit
= rfc4106_exit
,
1449 .setkey
= rfc4106_set_key
,
1450 .setauthsize
= rfc4106_set_authsize
,
1451 .encrypt
= rfc4106_encrypt
,
1452 .decrypt
= rfc4106_decrypt
,
1459 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1461 .cra_name
= "pcbc(aes)",
1462 .cra_driver_name
= "pcbc-aes-aesni",
1463 .cra_priority
= 400,
1464 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1465 .cra_blocksize
= AES_BLOCK_SIZE
,
1466 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1468 .cra_type
= &crypto_ablkcipher_type
,
1469 .cra_module
= THIS_MODULE
,
1470 .cra_init
= ablk_pcbc_init
,
1471 .cra_exit
= ablk_exit
,
1474 .min_keysize
= AES_MIN_KEY_SIZE
,
1475 .max_keysize
= AES_MAX_KEY_SIZE
,
1476 .ivsize
= AES_BLOCK_SIZE
,
1477 .setkey
= ablk_set_key
,
1478 .encrypt
= ablk_encrypt
,
1479 .decrypt
= ablk_decrypt
,
1484 .cra_name
= "__lrw-aes-aesni",
1485 .cra_driver_name
= "__driver-lrw-aes-aesni",
1487 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1488 CRYPTO_ALG_INTERNAL
,
1489 .cra_blocksize
= AES_BLOCK_SIZE
,
1490 .cra_ctxsize
= sizeof(struct aesni_lrw_ctx
),
1492 .cra_type
= &crypto_blkcipher_type
,
1493 .cra_module
= THIS_MODULE
,
1494 .cra_exit
= lrw_aesni_exit_tfm
,
1497 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1498 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1499 .ivsize
= AES_BLOCK_SIZE
,
1500 .setkey
= lrw_aesni_setkey
,
1501 .encrypt
= lrw_encrypt
,
1502 .decrypt
= lrw_decrypt
,
1506 .cra_name
= "__xts-aes-aesni",
1507 .cra_driver_name
= "__driver-xts-aes-aesni",
1509 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
1510 CRYPTO_ALG_INTERNAL
,
1511 .cra_blocksize
= AES_BLOCK_SIZE
,
1512 .cra_ctxsize
= sizeof(struct aesni_xts_ctx
),
1514 .cra_type
= &crypto_blkcipher_type
,
1515 .cra_module
= THIS_MODULE
,
1518 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1519 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1520 .ivsize
= AES_BLOCK_SIZE
,
1521 .setkey
= xts_aesni_setkey
,
1522 .encrypt
= xts_encrypt
,
1523 .decrypt
= xts_decrypt
,
1527 .cra_name
= "lrw(aes)",
1528 .cra_driver_name
= "lrw-aes-aesni",
1529 .cra_priority
= 400,
1530 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1531 .cra_blocksize
= AES_BLOCK_SIZE
,
1532 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1534 .cra_type
= &crypto_ablkcipher_type
,
1535 .cra_module
= THIS_MODULE
,
1536 .cra_init
= ablk_init
,
1537 .cra_exit
= ablk_exit
,
1540 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1541 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1542 .ivsize
= AES_BLOCK_SIZE
,
1543 .setkey
= ablk_set_key
,
1544 .encrypt
= ablk_encrypt
,
1545 .decrypt
= ablk_decrypt
,
1549 .cra_name
= "xts(aes)",
1550 .cra_driver_name
= "xts-aes-aesni",
1551 .cra_priority
= 400,
1552 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1553 .cra_blocksize
= AES_BLOCK_SIZE
,
1554 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1556 .cra_type
= &crypto_ablkcipher_type
,
1557 .cra_module
= THIS_MODULE
,
1558 .cra_init
= ablk_init
,
1559 .cra_exit
= ablk_exit
,
1562 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1563 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1564 .ivsize
= AES_BLOCK_SIZE
,
1565 .setkey
= ablk_set_key
,
1566 .encrypt
= ablk_encrypt
,
1567 .decrypt
= ablk_decrypt
,
1573 static const struct x86_cpu_id aesni_cpu_id
[] = {
1574 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1577 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1579 static int __init
aesni_init(void)
1583 if (!x86_match_cpu(aesni_cpu_id
))
1585 #ifdef CONFIG_X86_64
1586 #ifdef CONFIG_AS_AVX2
1587 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1588 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1589 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1590 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1593 #ifdef CONFIG_AS_AVX
1594 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1595 pr_info("AVX version of gcm_enc/dec engaged.\n");
1596 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1597 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1601 pr_info("SSE version of gcm_enc/dec engaged.\n");
1602 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1603 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1605 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1606 #ifdef CONFIG_AS_AVX
1608 /* optimize performance of ctr mode encryption transform */
1609 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1610 pr_info("AES CTR mode by8 optimization enabled\n");
1615 err
= crypto_fpu_init();
1619 return crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1622 static void __exit
aesni_exit(void)
1624 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1629 module_init(aesni_init
);
1630 module_exit(aesni_exit
);
1632 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1633 MODULE_LICENSE("GPL");
1634 MODULE_ALIAS_CRYPTO("aes");