2 * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
4 * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 #include <crypto/aes.h>
13 #include <linux/cpufeature.h>
14 #include <linux/crypto.h>
15 #include <linux/module.h>
17 #include "aes-ce-setkey.h"
19 MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
20 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
21 MODULE_LICENSE("GPL v2");
27 static int num_rounds(struct crypto_aes_ctx
*ctx
)
30 * # of rounds specified by AES:
31 * 128 bit key 10 rounds
32 * 192 bit key 12 rounds
33 * 256 bit key 14 rounds
34 * => n byte key => 6 + (n/4) rounds
36 return 6 + ctx
->key_length
/ 4;
39 static void aes_cipher_encrypt(struct crypto_tfm
*tfm
, u8 dst
[], u8
const src
[])
41 struct crypto_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
42 struct aes_block
*out
= (struct aes_block
*)dst
;
43 struct aes_block
const *in
= (struct aes_block
*)src
;
47 kernel_neon_begin_partial(4);
49 __asm__(" ld1 {v0.16b}, %[in] ;"
50 " ld1 {v1.2d}, [%[key]], #16 ;"
51 " cmp %w[rounds], #10 ;"
54 " mov v3.16b, v1.16b ;"
56 "0: mov v2.16b, v1.16b ;"
57 " ld1 {v3.2d}, [%[key]], #16 ;"
58 "1: aese v0.16b, v2.16b ;"
59 " aesmc v0.16b, v0.16b ;"
60 "2: ld1 {v1.2d}, [%[key]], #16 ;"
61 " aese v0.16b, v3.16b ;"
62 " aesmc v0.16b, v0.16b ;"
63 "3: ld1 {v2.2d}, [%[key]], #16 ;"
64 " subs %w[rounds], %w[rounds], #3 ;"
65 " aese v0.16b, v1.16b ;"
66 " aesmc v0.16b, v0.16b ;"
67 " ld1 {v3.2d}, [%[key]], #16 ;"
69 " aese v0.16b, v2.16b ;"
70 " eor v0.16b, v0.16b, v3.16b ;"
71 " st1 {v0.16b}, %[out] ;"
78 "2"(num_rounds(ctx
) - 2)
84 static void aes_cipher_decrypt(struct crypto_tfm
*tfm
, u8 dst
[], u8
const src
[])
86 struct crypto_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
87 struct aes_block
*out
= (struct aes_block
*)dst
;
88 struct aes_block
const *in
= (struct aes_block
*)src
;
92 kernel_neon_begin_partial(4);
94 __asm__(" ld1 {v0.16b}, %[in] ;"
95 " ld1 {v1.2d}, [%[key]], #16 ;"
96 " cmp %w[rounds], #10 ;"
99 " mov v3.16b, v1.16b ;"
101 "0: mov v2.16b, v1.16b ;"
102 " ld1 {v3.2d}, [%[key]], #16 ;"
103 "1: aesd v0.16b, v2.16b ;"
104 " aesimc v0.16b, v0.16b ;"
105 "2: ld1 {v1.2d}, [%[key]], #16 ;"
106 " aesd v0.16b, v3.16b ;"
107 " aesimc v0.16b, v0.16b ;"
108 "3: ld1 {v2.2d}, [%[key]], #16 ;"
109 " subs %w[rounds], %w[rounds], #3 ;"
110 " aesd v0.16b, v1.16b ;"
111 " aesimc v0.16b, v0.16b ;"
112 " ld1 {v3.2d}, [%[key]], #16 ;"
114 " aesd v0.16b, v2.16b ;"
115 " eor v0.16b, v0.16b, v3.16b ;"
116 " st1 {v0.16b}, %[out] ;"
120 [rounds
] "=r"(dummy1
)
123 "2"(num_rounds(ctx
) - 2)
130 * aes_sub() - use the aese instruction to perform the AES sbox substitution
131 * on each byte in 'input'
133 static u32
aes_sub(u32 input
)
137 __asm__("dup v1.4s, %w[in] ;"
139 "aese v0.16b, v1.16b ;"
140 "umov %w[out], v0.4s[0] ;"
149 int ce_aes_expandkey(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
150 unsigned int key_len
)
153 * The AES key schedule round constants
155 static u8
const rcon
[] = {
156 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
159 u32 kwords
= key_len
/ sizeof(u32
);
160 struct aes_block
*key_enc
, *key_dec
;
163 if (key_len
!= AES_KEYSIZE_128
&&
164 key_len
!= AES_KEYSIZE_192
&&
165 key_len
!= AES_KEYSIZE_256
)
168 memcpy(ctx
->key_enc
, in_key
, key_len
);
169 ctx
->key_length
= key_len
;
171 kernel_neon_begin_partial(2);
172 for (i
= 0; i
< sizeof(rcon
); i
++) {
173 u32
*rki
= ctx
->key_enc
+ (i
* kwords
);
174 u32
*rko
= rki
+ kwords
;
176 rko
[0] = ror32(aes_sub(rki
[kwords
- 1]), 8) ^ rcon
[i
] ^ rki
[0];
177 rko
[1] = rko
[0] ^ rki
[1];
178 rko
[2] = rko
[1] ^ rki
[2];
179 rko
[3] = rko
[2] ^ rki
[3];
181 if (key_len
== AES_KEYSIZE_192
) {
184 rko
[4] = rko
[3] ^ rki
[4];
185 rko
[5] = rko
[4] ^ rki
[5];
186 } else if (key_len
== AES_KEYSIZE_256
) {
189 rko
[4] = aes_sub(rko
[3]) ^ rki
[4];
190 rko
[5] = rko
[4] ^ rki
[5];
191 rko
[6] = rko
[5] ^ rki
[6];
192 rko
[7] = rko
[6] ^ rki
[7];
197 * Generate the decryption keys for the Equivalent Inverse Cipher.
198 * This involves reversing the order of the round keys, and applying
199 * the Inverse Mix Columns transformation on all but the first and
202 key_enc
= (struct aes_block
*)ctx
->key_enc
;
203 key_dec
= (struct aes_block
*)ctx
->key_dec
;
206 key_dec
[0] = key_enc
[j
];
207 for (i
= 1, j
--; j
> 0; i
++, j
--)
208 __asm__("ld1 {v0.16b}, %[in] ;"
209 "aesimc v1.16b, v0.16b ;"
210 "st1 {v1.16b}, %[out] ;"
212 : [out
] "=Q"(key_dec
[i
])
213 : [in
] "Q"(key_enc
[j
])
215 key_dec
[i
] = key_enc
[0];
220 EXPORT_SYMBOL(ce_aes_expandkey
);
222 int ce_aes_setkey(struct crypto_tfm
*tfm
, const u8
*in_key
,
223 unsigned int key_len
)
225 struct crypto_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
228 ret
= ce_aes_expandkey(ctx
, in_key
, key_len
);
232 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
235 EXPORT_SYMBOL(ce_aes_setkey
);
237 static struct crypto_alg aes_alg
= {
239 .cra_driver_name
= "aes-ce",
241 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
242 .cra_blocksize
= AES_BLOCK_SIZE
,
243 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
),
244 .cra_module
= THIS_MODULE
,
246 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
247 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
248 .cia_setkey
= ce_aes_setkey
,
249 .cia_encrypt
= aes_cipher_encrypt
,
250 .cia_decrypt
= aes_cipher_decrypt
254 static int __init
aes_mod_init(void)
256 return crypto_register_alg(&aes_alg
);
259 static void __exit
aes_mod_exit(void)
261 crypto_unregister_alg(&aes_alg
);
264 module_cpu_feature_match(AES
, aes_mod_init
);
265 module_exit(aes_mod_exit
);