staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / arch / arm64 / crypto / aes-ce-ccm-glue.c
blob827e5473e5de7601dc0637755502b70a999df4cf
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
5 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */
8 #include <asm/neon.h>
9 #include <asm/simd.h>
10 #include <asm/unaligned.h>
11 #include <crypto/aes.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/internal/aead.h>
14 #include <crypto/internal/simd.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/module.h>
18 #include "aes-ce-setkey.h"
20 static int num_rounds(struct crypto_aes_ctx *ctx)
23 * # of rounds specified by AES:
24 * 128 bit key 10 rounds
25 * 192 bit key 12 rounds
26 * 256 bit key 14 rounds
27 * => n byte key => 6 + (n/4) rounds
29 return 6 + ctx->key_length / 4;
32 asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
33 u32 *macp, u32 const rk[], u32 rounds);
35 asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
36 u32 const rk[], u32 rounds, u8 mac[],
37 u8 ctr[]);
39 asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
40 u32 const rk[], u32 rounds, u8 mac[],
41 u8 ctr[]);
43 asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
44 u32 rounds);
46 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
48 static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
49 unsigned int key_len)
51 struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
52 int ret;
54 ret = ce_aes_expandkey(ctx, in_key, key_len);
55 if (!ret)
56 return 0;
58 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
59 return -EINVAL;
62 static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
64 if ((authsize & 1) || authsize < 4)
65 return -EINVAL;
66 return 0;
69 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
71 struct crypto_aead *aead = crypto_aead_reqtfm(req);
72 __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
73 u32 l = req->iv[0] + 1;
75 /* verify that CCM dimension 'L' is set correctly in the IV */
76 if (l < 2 || l > 8)
77 return -EINVAL;
79 /* verify that msglen can in fact be represented in L bytes */
80 if (l < 4 && msglen >> (8 * l))
81 return -EOVERFLOW;
84 * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
85 * uses a u32 type to represent msglen so the top 4 bytes are always 0.
87 n[0] = 0;
88 n[1] = cpu_to_be32(msglen);
90 memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
93 * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
94 * - bits 0..2 : max # of bytes required to represent msglen, minus 1
95 * (already set by caller)
96 * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
97 * - bit 6 : indicates presence of authenticate-only data
99 maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
100 if (req->assoclen)
101 maciv[0] |= 0x40;
103 memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
104 return 0;
107 static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
108 u32 abytes, u32 *macp)
110 if (crypto_simd_usable()) {
111 kernel_neon_begin();
112 ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
113 num_rounds(key));
114 kernel_neon_end();
115 } else {
116 if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
117 int added = min(abytes, AES_BLOCK_SIZE - *macp);
119 crypto_xor(&mac[*macp], in, added);
121 *macp += added;
122 in += added;
123 abytes -= added;
126 while (abytes >= AES_BLOCK_SIZE) {
127 __aes_arm64_encrypt(key->key_enc, mac, mac,
128 num_rounds(key));
129 crypto_xor(mac, in, AES_BLOCK_SIZE);
131 in += AES_BLOCK_SIZE;
132 abytes -= AES_BLOCK_SIZE;
135 if (abytes > 0) {
136 __aes_arm64_encrypt(key->key_enc, mac, mac,
137 num_rounds(key));
138 crypto_xor(mac, in, abytes);
139 *macp = abytes;
144 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
146 struct crypto_aead *aead = crypto_aead_reqtfm(req);
147 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
148 struct __packed { __be16 l; __be32 h; u16 len; } ltag;
149 struct scatter_walk walk;
150 u32 len = req->assoclen;
151 u32 macp = 0;
153 /* prepend the AAD with a length tag */
154 if (len < 0xff00) {
155 ltag.l = cpu_to_be16(len);
156 ltag.len = 2;
157 } else {
158 ltag.l = cpu_to_be16(0xfffe);
159 put_unaligned_be32(len, &ltag.h);
160 ltag.len = 6;
163 ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp);
164 scatterwalk_start(&walk, req->src);
166 do {
167 u32 n = scatterwalk_clamp(&walk, len);
168 u8 *p;
170 if (!n) {
171 scatterwalk_start(&walk, sg_next(walk.sg));
172 n = scatterwalk_clamp(&walk, len);
174 p = scatterwalk_map(&walk);
175 ccm_update_mac(ctx, mac, p, n, &macp);
176 len -= n;
178 scatterwalk_unmap(p);
179 scatterwalk_advance(&walk, n);
180 scatterwalk_done(&walk, 0, len);
181 } while (len);
184 static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
185 struct crypto_aes_ctx *ctx, bool enc)
187 u8 buf[AES_BLOCK_SIZE];
188 int err = 0;
190 while (walk->nbytes) {
191 int blocks = walk->nbytes / AES_BLOCK_SIZE;
192 u32 tail = walk->nbytes % AES_BLOCK_SIZE;
193 u8 *dst = walk->dst.virt.addr;
194 u8 *src = walk->src.virt.addr;
195 u32 nbytes = walk->nbytes;
197 if (nbytes == walk->total && tail > 0) {
198 blocks++;
199 tail = 0;
202 do {
203 u32 bsize = AES_BLOCK_SIZE;
205 if (nbytes < AES_BLOCK_SIZE)
206 bsize = nbytes;
208 crypto_inc(walk->iv, AES_BLOCK_SIZE);
209 __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
210 num_rounds(ctx));
211 __aes_arm64_encrypt(ctx->key_enc, mac, mac,
212 num_rounds(ctx));
213 if (enc)
214 crypto_xor(mac, src, bsize);
215 crypto_xor_cpy(dst, src, buf, bsize);
216 if (!enc)
217 crypto_xor(mac, dst, bsize);
218 dst += bsize;
219 src += bsize;
220 nbytes -= bsize;
221 } while (--blocks);
223 err = skcipher_walk_done(walk, tail);
226 if (!err) {
227 __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
228 __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
229 crypto_xor(mac, buf, AES_BLOCK_SIZE);
231 return err;
234 static int ccm_encrypt(struct aead_request *req)
236 struct crypto_aead *aead = crypto_aead_reqtfm(req);
237 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
238 struct skcipher_walk walk;
239 u8 __aligned(8) mac[AES_BLOCK_SIZE];
240 u8 buf[AES_BLOCK_SIZE];
241 u32 len = req->cryptlen;
242 int err;
244 err = ccm_init_mac(req, mac, len);
245 if (err)
246 return err;
248 if (req->assoclen)
249 ccm_calculate_auth_mac(req, mac);
251 /* preserve the original iv for the final round */
252 memcpy(buf, req->iv, AES_BLOCK_SIZE);
254 err = skcipher_walk_aead_encrypt(&walk, req, false);
256 if (crypto_simd_usable()) {
257 while (walk.nbytes) {
258 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
260 if (walk.nbytes == walk.total)
261 tail = 0;
263 kernel_neon_begin();
264 ce_aes_ccm_encrypt(walk.dst.virt.addr,
265 walk.src.virt.addr,
266 walk.nbytes - tail, ctx->key_enc,
267 num_rounds(ctx), mac, walk.iv);
268 kernel_neon_end();
270 err = skcipher_walk_done(&walk, tail);
272 if (!err) {
273 kernel_neon_begin();
274 ce_aes_ccm_final(mac, buf, ctx->key_enc,
275 num_rounds(ctx));
276 kernel_neon_end();
278 } else {
279 err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
281 if (err)
282 return err;
284 /* copy authtag to end of dst */
285 scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
286 crypto_aead_authsize(aead), 1);
288 return 0;
291 static int ccm_decrypt(struct aead_request *req)
293 struct crypto_aead *aead = crypto_aead_reqtfm(req);
294 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
295 unsigned int authsize = crypto_aead_authsize(aead);
296 struct skcipher_walk walk;
297 u8 __aligned(8) mac[AES_BLOCK_SIZE];
298 u8 buf[AES_BLOCK_SIZE];
299 u32 len = req->cryptlen - authsize;
300 int err;
302 err = ccm_init_mac(req, mac, len);
303 if (err)
304 return err;
306 if (req->assoclen)
307 ccm_calculate_auth_mac(req, mac);
309 /* preserve the original iv for the final round */
310 memcpy(buf, req->iv, AES_BLOCK_SIZE);
312 err = skcipher_walk_aead_decrypt(&walk, req, false);
314 if (crypto_simd_usable()) {
315 while (walk.nbytes) {
316 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
318 if (walk.nbytes == walk.total)
319 tail = 0;
321 kernel_neon_begin();
322 ce_aes_ccm_decrypt(walk.dst.virt.addr,
323 walk.src.virt.addr,
324 walk.nbytes - tail, ctx->key_enc,
325 num_rounds(ctx), mac, walk.iv);
326 kernel_neon_end();
328 err = skcipher_walk_done(&walk, tail);
330 if (!err) {
331 kernel_neon_begin();
332 ce_aes_ccm_final(mac, buf, ctx->key_enc,
333 num_rounds(ctx));
334 kernel_neon_end();
336 } else {
337 err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
340 if (err)
341 return err;
343 /* compare calculated auth tag with the stored one */
344 scatterwalk_map_and_copy(buf, req->src,
345 req->assoclen + req->cryptlen - authsize,
346 authsize, 0);
348 if (crypto_memneq(mac, buf, authsize))
349 return -EBADMSG;
350 return 0;
353 static struct aead_alg ccm_aes_alg = {
354 .base = {
355 .cra_name = "ccm(aes)",
356 .cra_driver_name = "ccm-aes-ce",
357 .cra_priority = 300,
358 .cra_blocksize = 1,
359 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
360 .cra_module = THIS_MODULE,
362 .ivsize = AES_BLOCK_SIZE,
363 .chunksize = AES_BLOCK_SIZE,
364 .maxauthsize = AES_BLOCK_SIZE,
365 .setkey = ccm_setkey,
366 .setauthsize = ccm_setauthsize,
367 .encrypt = ccm_encrypt,
368 .decrypt = ccm_decrypt,
371 static int __init aes_mod_init(void)
373 if (!cpu_have_named_feature(AES))
374 return -ENODEV;
375 return crypto_register_aead(&ccm_aes_alg);
378 static void __exit aes_mod_exit(void)
380 crypto_unregister_aead(&ccm_aes_alg);
383 module_init(aes_mod_init);
384 module_exit(aes_mod_exit);
386 MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
387 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
388 MODULE_LICENSE("GPL v2");
389 MODULE_ALIAS_CRYPTO("ccm(aes)");