module: Convert symbol namespace to string literal
[linux.git] / arch / arm64 / crypto / aes-ce-ccm-glue.c
bloba2b5d6f20f4d151931b8f9d29dd945c674ed026a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * aes-ce-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
5 * Copyright (C) 2013 - 2017 Linaro Ltd.
6 * Copyright (C) 2024 Google LLC
8 * Author: Ard Biesheuvel <ardb@kernel.org>
9 */
11 #include <asm/neon.h>
12 #include <linux/unaligned.h>
13 #include <crypto/aes.h>
14 #include <crypto/scatterwalk.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/internal/skcipher.h>
17 #include <linux/module.h>
19 #include "aes-ce-setkey.h"
21 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
23 static int num_rounds(struct crypto_aes_ctx *ctx)
26 * # of rounds specified by AES:
27 * 128 bit key 10 rounds
28 * 192 bit key 12 rounds
29 * 256 bit key 14 rounds
30 * => n byte key => 6 + (n/4) rounds
32 return 6 + ctx->key_length / 4;
35 asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
36 int blocks, u8 dg[], int enc_before,
37 int enc_after);
39 asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
40 u32 const rk[], u32 rounds, u8 mac[],
41 u8 ctr[], u8 const final_iv[]);
43 asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
44 u32 const rk[], u32 rounds, u8 mac[],
45 u8 ctr[], u8 const final_iv[]);
47 static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
48 unsigned int key_len)
50 struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
52 return ce_aes_expandkey(ctx, in_key, key_len);
55 static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
57 if ((authsize & 1) || authsize < 4)
58 return -EINVAL;
59 return 0;
62 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
64 struct crypto_aead *aead = crypto_aead_reqtfm(req);
65 __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
66 u32 l = req->iv[0] + 1;
68 /* verify that CCM dimension 'L' is set correctly in the IV */
69 if (l < 2 || l > 8)
70 return -EINVAL;
72 /* verify that msglen can in fact be represented in L bytes */
73 if (l < 4 && msglen >> (8 * l))
74 return -EOVERFLOW;
77 * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
78 * uses a u32 type to represent msglen so the top 4 bytes are always 0.
80 n[0] = 0;
81 n[1] = cpu_to_be32(msglen);
83 memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
86 * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
87 * - bits 0..2 : max # of bytes required to represent msglen, minus 1
88 * (already set by caller)
89 * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
90 * - bit 6 : indicates presence of authenticate-only data
92 maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
93 if (req->assoclen)
94 maciv[0] |= 0x40;
96 memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
97 return 0;
100 static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
101 u32 macp, u32 const rk[], u32 rounds)
103 int enc_after = (macp + abytes) % AES_BLOCK_SIZE;
105 do {
106 u32 blocks = abytes / AES_BLOCK_SIZE;
108 if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) {
109 u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac,
110 macp, enc_after);
111 u32 adv = (blocks - rem) * AES_BLOCK_SIZE;
113 macp = enc_after ? 0 : AES_BLOCK_SIZE;
114 in += adv;
115 abytes -= adv;
117 if (unlikely(rem)) {
118 kernel_neon_end();
119 kernel_neon_begin();
120 macp = 0;
122 } else {
123 u32 l = min(AES_BLOCK_SIZE - macp, abytes);
125 crypto_xor(&mac[macp], in, l);
126 in += l;
127 macp += l;
128 abytes -= l;
130 } while (abytes > 0);
132 return macp;
135 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
137 struct crypto_aead *aead = crypto_aead_reqtfm(req);
138 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
139 struct __packed { __be16 l; __be32 h; u16 len; } ltag;
140 struct scatter_walk walk;
141 u32 len = req->assoclen;
142 u32 macp = AES_BLOCK_SIZE;
144 /* prepend the AAD with a length tag */
145 if (len < 0xff00) {
146 ltag.l = cpu_to_be16(len);
147 ltag.len = 2;
148 } else {
149 ltag.l = cpu_to_be16(0xfffe);
150 put_unaligned_be32(len, &ltag.h);
151 ltag.len = 6;
154 macp = ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, macp,
155 ctx->key_enc, num_rounds(ctx));
156 scatterwalk_start(&walk, req->src);
158 do {
159 u32 n = scatterwalk_clamp(&walk, len);
160 u8 *p;
162 if (!n) {
163 scatterwalk_start(&walk, sg_next(walk.sg));
164 n = scatterwalk_clamp(&walk, len);
166 p = scatterwalk_map(&walk);
168 macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc,
169 num_rounds(ctx));
171 len -= n;
173 scatterwalk_unmap(p);
174 scatterwalk_advance(&walk, n);
175 scatterwalk_done(&walk, 0, len);
176 } while (len);
179 static int ccm_encrypt(struct aead_request *req)
181 struct crypto_aead *aead = crypto_aead_reqtfm(req);
182 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
183 struct skcipher_walk walk;
184 u8 __aligned(8) mac[AES_BLOCK_SIZE];
185 u8 orig_iv[AES_BLOCK_SIZE];
186 u32 len = req->cryptlen;
187 int err;
189 err = ccm_init_mac(req, mac, len);
190 if (err)
191 return err;
193 /* preserve the original iv for the final round */
194 memcpy(orig_iv, req->iv, AES_BLOCK_SIZE);
196 err = skcipher_walk_aead_encrypt(&walk, req, false);
197 if (unlikely(err))
198 return err;
200 kernel_neon_begin();
202 if (req->assoclen)
203 ccm_calculate_auth_mac(req, mac);
205 do {
206 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
207 const u8 *src = walk.src.virt.addr;
208 u8 *dst = walk.dst.virt.addr;
209 u8 buf[AES_BLOCK_SIZE];
210 u8 *final_iv = NULL;
212 if (walk.nbytes == walk.total) {
213 tail = 0;
214 final_iv = orig_iv;
217 if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
218 src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
219 src, walk.nbytes);
221 ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
222 ctx->key_enc, num_rounds(ctx),
223 mac, walk.iv, final_iv);
225 if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
226 memcpy(walk.dst.virt.addr, dst, walk.nbytes);
228 if (walk.nbytes) {
229 err = skcipher_walk_done(&walk, tail);
231 } while (walk.nbytes);
233 kernel_neon_end();
235 if (unlikely(err))
236 return err;
238 /* copy authtag to end of dst */
239 scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
240 crypto_aead_authsize(aead), 1);
242 return 0;
245 static int ccm_decrypt(struct aead_request *req)
247 struct crypto_aead *aead = crypto_aead_reqtfm(req);
248 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
249 unsigned int authsize = crypto_aead_authsize(aead);
250 struct skcipher_walk walk;
251 u8 __aligned(8) mac[AES_BLOCK_SIZE];
252 u8 orig_iv[AES_BLOCK_SIZE];
253 u32 len = req->cryptlen - authsize;
254 int err;
256 err = ccm_init_mac(req, mac, len);
257 if (err)
258 return err;
260 /* preserve the original iv for the final round */
261 memcpy(orig_iv, req->iv, AES_BLOCK_SIZE);
263 err = skcipher_walk_aead_decrypt(&walk, req, false);
264 if (unlikely(err))
265 return err;
267 kernel_neon_begin();
269 if (req->assoclen)
270 ccm_calculate_auth_mac(req, mac);
272 do {
273 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
274 const u8 *src = walk.src.virt.addr;
275 u8 *dst = walk.dst.virt.addr;
276 u8 buf[AES_BLOCK_SIZE];
277 u8 *final_iv = NULL;
279 if (walk.nbytes == walk.total) {
280 tail = 0;
281 final_iv = orig_iv;
284 if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
285 src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
286 src, walk.nbytes);
288 ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
289 ctx->key_enc, num_rounds(ctx),
290 mac, walk.iv, final_iv);
292 if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
293 memcpy(walk.dst.virt.addr, dst, walk.nbytes);
295 if (walk.nbytes) {
296 err = skcipher_walk_done(&walk, tail);
298 } while (walk.nbytes);
300 kernel_neon_end();
302 if (unlikely(err))
303 return err;
305 /* compare calculated auth tag with the stored one */
306 scatterwalk_map_and_copy(orig_iv, req->src,
307 req->assoclen + req->cryptlen - authsize,
308 authsize, 0);
310 if (crypto_memneq(mac, orig_iv, authsize))
311 return -EBADMSG;
312 return 0;
315 static struct aead_alg ccm_aes_alg = {
316 .base = {
317 .cra_name = "ccm(aes)",
318 .cra_driver_name = "ccm-aes-ce",
319 .cra_priority = 300,
320 .cra_blocksize = 1,
321 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
322 .cra_module = THIS_MODULE,
324 .ivsize = AES_BLOCK_SIZE,
325 .chunksize = AES_BLOCK_SIZE,
326 .maxauthsize = AES_BLOCK_SIZE,
327 .setkey = ccm_setkey,
328 .setauthsize = ccm_setauthsize,
329 .encrypt = ccm_encrypt,
330 .decrypt = ccm_decrypt,
333 static int __init aes_mod_init(void)
335 if (!cpu_have_named_feature(AES))
336 return -ENODEV;
337 return crypto_register_aead(&ccm_aes_alg);
340 static void __exit aes_mod_exit(void)
342 crypto_unregister_aead(&ccm_aes_alg);
345 module_init(aes_mod_init);
346 module_exit(aes_mod_exit);
348 MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
349 MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
350 MODULE_LICENSE("GPL v2");
351 MODULE_ALIAS_CRYPTO("ccm(aes)");