treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / arm64 / crypto / aes-ce-ccm-glue.c
blobf6d19b0dc893f44ba5059589754bad556d167ec1
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
5 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */
8 #include <asm/neon.h>
9 #include <asm/simd.h>
10 #include <asm/unaligned.h>
11 #include <crypto/aes.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/internal/aead.h>
14 #include <crypto/internal/simd.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/module.h>
18 #include "aes-ce-setkey.h"
20 static int num_rounds(struct crypto_aes_ctx *ctx)
23 * # of rounds specified by AES:
24 * 128 bit key 10 rounds
25 * 192 bit key 12 rounds
26 * 256 bit key 14 rounds
27 * => n byte key => 6 + (n/4) rounds
29 return 6 + ctx->key_length / 4;
32 asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
33 u32 *macp, u32 const rk[], u32 rounds);
35 asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
36 u32 const rk[], u32 rounds, u8 mac[],
37 u8 ctr[]);
39 asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
40 u32 const rk[], u32 rounds, u8 mac[],
41 u8 ctr[]);
43 asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
44 u32 rounds);
46 static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
47 unsigned int key_len)
49 struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
51 return ce_aes_expandkey(ctx, in_key, key_len);
54 static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
56 if ((authsize & 1) || authsize < 4)
57 return -EINVAL;
58 return 0;
61 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
63 struct crypto_aead *aead = crypto_aead_reqtfm(req);
64 __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
65 u32 l = req->iv[0] + 1;
67 /* verify that CCM dimension 'L' is set correctly in the IV */
68 if (l < 2 || l > 8)
69 return -EINVAL;
71 /* verify that msglen can in fact be represented in L bytes */
72 if (l < 4 && msglen >> (8 * l))
73 return -EOVERFLOW;
76 * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
77 * uses a u32 type to represent msglen so the top 4 bytes are always 0.
79 n[0] = 0;
80 n[1] = cpu_to_be32(msglen);
82 memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
85 * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
86 * - bits 0..2 : max # of bytes required to represent msglen, minus 1
87 * (already set by caller)
88 * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
89 * - bit 6 : indicates presence of authenticate-only data
91 maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
92 if (req->assoclen)
93 maciv[0] |= 0x40;
95 memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
96 return 0;
99 static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
100 u32 abytes, u32 *macp)
102 if (crypto_simd_usable()) {
103 kernel_neon_begin();
104 ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
105 num_rounds(key));
106 kernel_neon_end();
107 } else {
108 if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
109 int added = min(abytes, AES_BLOCK_SIZE - *macp);
111 crypto_xor(&mac[*macp], in, added);
113 *macp += added;
114 in += added;
115 abytes -= added;
118 while (abytes >= AES_BLOCK_SIZE) {
119 aes_encrypt(key, mac, mac);
120 crypto_xor(mac, in, AES_BLOCK_SIZE);
122 in += AES_BLOCK_SIZE;
123 abytes -= AES_BLOCK_SIZE;
126 if (abytes > 0) {
127 aes_encrypt(key, mac, mac);
128 crypto_xor(mac, in, abytes);
129 *macp = abytes;
134 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
136 struct crypto_aead *aead = crypto_aead_reqtfm(req);
137 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
138 struct __packed { __be16 l; __be32 h; u16 len; } ltag;
139 struct scatter_walk walk;
140 u32 len = req->assoclen;
141 u32 macp = 0;
143 /* prepend the AAD with a length tag */
144 if (len < 0xff00) {
145 ltag.l = cpu_to_be16(len);
146 ltag.len = 2;
147 } else {
148 ltag.l = cpu_to_be16(0xfffe);
149 put_unaligned_be32(len, &ltag.h);
150 ltag.len = 6;
153 ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp);
154 scatterwalk_start(&walk, req->src);
156 do {
157 u32 n = scatterwalk_clamp(&walk, len);
158 u8 *p;
160 if (!n) {
161 scatterwalk_start(&walk, sg_next(walk.sg));
162 n = scatterwalk_clamp(&walk, len);
164 p = scatterwalk_map(&walk);
165 ccm_update_mac(ctx, mac, p, n, &macp);
166 len -= n;
168 scatterwalk_unmap(p);
169 scatterwalk_advance(&walk, n);
170 scatterwalk_done(&walk, 0, len);
171 } while (len);
174 static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
175 struct crypto_aes_ctx *ctx, bool enc)
177 u8 buf[AES_BLOCK_SIZE];
178 int err = 0;
180 while (walk->nbytes) {
181 int blocks = walk->nbytes / AES_BLOCK_SIZE;
182 u32 tail = walk->nbytes % AES_BLOCK_SIZE;
183 u8 *dst = walk->dst.virt.addr;
184 u8 *src = walk->src.virt.addr;
185 u32 nbytes = walk->nbytes;
187 if (nbytes == walk->total && tail > 0) {
188 blocks++;
189 tail = 0;
192 do {
193 u32 bsize = AES_BLOCK_SIZE;
195 if (nbytes < AES_BLOCK_SIZE)
196 bsize = nbytes;
198 crypto_inc(walk->iv, AES_BLOCK_SIZE);
199 aes_encrypt(ctx, buf, walk->iv);
200 aes_encrypt(ctx, mac, mac);
201 if (enc)
202 crypto_xor(mac, src, bsize);
203 crypto_xor_cpy(dst, src, buf, bsize);
204 if (!enc)
205 crypto_xor(mac, dst, bsize);
206 dst += bsize;
207 src += bsize;
208 nbytes -= bsize;
209 } while (--blocks);
211 err = skcipher_walk_done(walk, tail);
214 if (!err) {
215 aes_encrypt(ctx, buf, iv0);
216 aes_encrypt(ctx, mac, mac);
217 crypto_xor(mac, buf, AES_BLOCK_SIZE);
219 return err;
222 static int ccm_encrypt(struct aead_request *req)
224 struct crypto_aead *aead = crypto_aead_reqtfm(req);
225 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
226 struct skcipher_walk walk;
227 u8 __aligned(8) mac[AES_BLOCK_SIZE];
228 u8 buf[AES_BLOCK_SIZE];
229 u32 len = req->cryptlen;
230 int err;
232 err = ccm_init_mac(req, mac, len);
233 if (err)
234 return err;
236 if (req->assoclen)
237 ccm_calculate_auth_mac(req, mac);
239 /* preserve the original iv for the final round */
240 memcpy(buf, req->iv, AES_BLOCK_SIZE);
242 err = skcipher_walk_aead_encrypt(&walk, req, false);
244 if (crypto_simd_usable()) {
245 while (walk.nbytes) {
246 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
248 if (walk.nbytes == walk.total)
249 tail = 0;
251 kernel_neon_begin();
252 ce_aes_ccm_encrypt(walk.dst.virt.addr,
253 walk.src.virt.addr,
254 walk.nbytes - tail, ctx->key_enc,
255 num_rounds(ctx), mac, walk.iv);
256 kernel_neon_end();
258 err = skcipher_walk_done(&walk, tail);
260 if (!err) {
261 kernel_neon_begin();
262 ce_aes_ccm_final(mac, buf, ctx->key_enc,
263 num_rounds(ctx));
264 kernel_neon_end();
266 } else {
267 err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
269 if (err)
270 return err;
272 /* copy authtag to end of dst */
273 scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
274 crypto_aead_authsize(aead), 1);
276 return 0;
279 static int ccm_decrypt(struct aead_request *req)
281 struct crypto_aead *aead = crypto_aead_reqtfm(req);
282 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
283 unsigned int authsize = crypto_aead_authsize(aead);
284 struct skcipher_walk walk;
285 u8 __aligned(8) mac[AES_BLOCK_SIZE];
286 u8 buf[AES_BLOCK_SIZE];
287 u32 len = req->cryptlen - authsize;
288 int err;
290 err = ccm_init_mac(req, mac, len);
291 if (err)
292 return err;
294 if (req->assoclen)
295 ccm_calculate_auth_mac(req, mac);
297 /* preserve the original iv for the final round */
298 memcpy(buf, req->iv, AES_BLOCK_SIZE);
300 err = skcipher_walk_aead_decrypt(&walk, req, false);
302 if (crypto_simd_usable()) {
303 while (walk.nbytes) {
304 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
306 if (walk.nbytes == walk.total)
307 tail = 0;
309 kernel_neon_begin();
310 ce_aes_ccm_decrypt(walk.dst.virt.addr,
311 walk.src.virt.addr,
312 walk.nbytes - tail, ctx->key_enc,
313 num_rounds(ctx), mac, walk.iv);
314 kernel_neon_end();
316 err = skcipher_walk_done(&walk, tail);
318 if (!err) {
319 kernel_neon_begin();
320 ce_aes_ccm_final(mac, buf, ctx->key_enc,
321 num_rounds(ctx));
322 kernel_neon_end();
324 } else {
325 err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
328 if (err)
329 return err;
331 /* compare calculated auth tag with the stored one */
332 scatterwalk_map_and_copy(buf, req->src,
333 req->assoclen + req->cryptlen - authsize,
334 authsize, 0);
336 if (crypto_memneq(mac, buf, authsize))
337 return -EBADMSG;
338 return 0;
341 static struct aead_alg ccm_aes_alg = {
342 .base = {
343 .cra_name = "ccm(aes)",
344 .cra_driver_name = "ccm-aes-ce",
345 .cra_priority = 300,
346 .cra_blocksize = 1,
347 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
348 .cra_module = THIS_MODULE,
350 .ivsize = AES_BLOCK_SIZE,
351 .chunksize = AES_BLOCK_SIZE,
352 .maxauthsize = AES_BLOCK_SIZE,
353 .setkey = ccm_setkey,
354 .setauthsize = ccm_setauthsize,
355 .encrypt = ccm_encrypt,
356 .decrypt = ccm_decrypt,
359 static int __init aes_mod_init(void)
361 if (!cpu_have_named_feature(AES))
362 return -ENODEV;
363 return crypto_register_aead(&ccm_aes_alg);
366 static void __exit aes_mod_exit(void)
368 crypto_unregister_aead(&ccm_aes_alg);
371 module_init(aes_mod_init);
372 module_exit(aes_mod_exit);
374 MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
375 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
376 MODULE_LICENSE("GPL v2");
377 MODULE_ALIAS_CRYPTO("ccm(aes)");