spi: sprd: adi: Change hwlock to be optional
[linux/fpc-iii.git] / crypto / aegis128.c
blobd78f77fc5dd18f8474c537eddc727230c40268ce
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * The AEGIS-128 Authenticated-Encryption Algorithm
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
9 #include <crypto/algapi.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/skcipher.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/scatterlist.h>
19 #include "aegis.h"
21 #define AEGIS128_NONCE_SIZE 16
22 #define AEGIS128_STATE_BLOCKS 5
23 #define AEGIS128_KEY_SIZE 16
24 #define AEGIS128_MIN_AUTH_SIZE 8
25 #define AEGIS128_MAX_AUTH_SIZE 16
27 struct aegis_state {
28 union aegis_block blocks[AEGIS128_STATE_BLOCKS];
31 struct aegis_ctx {
32 union aegis_block key;
35 struct aegis128_ops {
36 int (*skcipher_walk_init)(struct skcipher_walk *walk,
37 struct aead_request *req, bool atomic);
39 void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
40 const u8 *src, unsigned int size);
43 static void crypto_aegis128_update(struct aegis_state *state)
45 union aegis_block tmp;
46 unsigned int i;
48 tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1];
49 for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--)
50 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
51 &state->blocks[i]);
52 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
55 static void crypto_aegis128_update_a(struct aegis_state *state,
56 const union aegis_block *msg)
58 crypto_aegis128_update(state);
59 crypto_aegis_block_xor(&state->blocks[0], msg);
62 static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg)
64 crypto_aegis128_update(state);
65 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE);
68 static void crypto_aegis128_init(struct aegis_state *state,
69 const union aegis_block *key,
70 const u8 *iv)
72 union aegis_block key_iv;
73 unsigned int i;
75 key_iv = *key;
76 crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE);
78 state->blocks[0] = key_iv;
79 state->blocks[1] = crypto_aegis_const[1];
80 state->blocks[2] = crypto_aegis_const[0];
81 state->blocks[3] = *key;
82 state->blocks[4] = *key;
84 crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]);
85 crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]);
87 for (i = 0; i < 5; i++) {
88 crypto_aegis128_update_a(state, key);
89 crypto_aegis128_update_a(state, &key_iv);
93 static void crypto_aegis128_ad(struct aegis_state *state,
94 const u8 *src, unsigned int size)
96 if (AEGIS_ALIGNED(src)) {
97 const union aegis_block *src_blk =
98 (const union aegis_block *)src;
100 while (size >= AEGIS_BLOCK_SIZE) {
101 crypto_aegis128_update_a(state, src_blk);
103 size -= AEGIS_BLOCK_SIZE;
104 src_blk++;
106 } else {
107 while (size >= AEGIS_BLOCK_SIZE) {
108 crypto_aegis128_update_u(state, src);
110 size -= AEGIS_BLOCK_SIZE;
111 src += AEGIS_BLOCK_SIZE;
116 static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
117 const u8 *src, unsigned int size)
119 union aegis_block tmp;
121 if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
122 while (size >= AEGIS_BLOCK_SIZE) {
123 union aegis_block *dst_blk =
124 (union aegis_block *)dst;
125 const union aegis_block *src_blk =
126 (const union aegis_block *)src;
128 tmp = state->blocks[2];
129 crypto_aegis_block_and(&tmp, &state->blocks[3]);
130 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
131 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
132 crypto_aegis_block_xor(&tmp, src_blk);
134 crypto_aegis128_update_a(state, src_blk);
136 *dst_blk = tmp;
138 size -= AEGIS_BLOCK_SIZE;
139 src += AEGIS_BLOCK_SIZE;
140 dst += AEGIS_BLOCK_SIZE;
142 } else {
143 while (size >= AEGIS_BLOCK_SIZE) {
144 tmp = state->blocks[2];
145 crypto_aegis_block_and(&tmp, &state->blocks[3]);
146 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
147 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
148 crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
150 crypto_aegis128_update_u(state, src);
152 memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
154 size -= AEGIS_BLOCK_SIZE;
155 src += AEGIS_BLOCK_SIZE;
156 dst += AEGIS_BLOCK_SIZE;
160 if (size > 0) {
161 union aegis_block msg = {};
162 memcpy(msg.bytes, src, size);
164 tmp = state->blocks[2];
165 crypto_aegis_block_and(&tmp, &state->blocks[3]);
166 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
167 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
169 crypto_aegis128_update_a(state, &msg);
171 crypto_aegis_block_xor(&msg, &tmp);
173 memcpy(dst, msg.bytes, size);
177 static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
178 const u8 *src, unsigned int size)
180 union aegis_block tmp;
182 if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
183 while (size >= AEGIS_BLOCK_SIZE) {
184 union aegis_block *dst_blk =
185 (union aegis_block *)dst;
186 const union aegis_block *src_blk =
187 (const union aegis_block *)src;
189 tmp = state->blocks[2];
190 crypto_aegis_block_and(&tmp, &state->blocks[3]);
191 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
192 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
193 crypto_aegis_block_xor(&tmp, src_blk);
195 crypto_aegis128_update_a(state, &tmp);
197 *dst_blk = tmp;
199 size -= AEGIS_BLOCK_SIZE;
200 src += AEGIS_BLOCK_SIZE;
201 dst += AEGIS_BLOCK_SIZE;
203 } else {
204 while (size >= AEGIS_BLOCK_SIZE) {
205 tmp = state->blocks[2];
206 crypto_aegis_block_and(&tmp, &state->blocks[3]);
207 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
208 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
209 crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
211 crypto_aegis128_update_a(state, &tmp);
213 memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
215 size -= AEGIS_BLOCK_SIZE;
216 src += AEGIS_BLOCK_SIZE;
217 dst += AEGIS_BLOCK_SIZE;
221 if (size > 0) {
222 union aegis_block msg = {};
223 memcpy(msg.bytes, src, size);
225 tmp = state->blocks[2];
226 crypto_aegis_block_and(&tmp, &state->blocks[3]);
227 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
228 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
229 crypto_aegis_block_xor(&msg, &tmp);
231 memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size);
233 crypto_aegis128_update_a(state, &msg);
235 memcpy(dst, msg.bytes, size);
239 static void crypto_aegis128_process_ad(struct aegis_state *state,
240 struct scatterlist *sg_src,
241 unsigned int assoclen)
243 struct scatter_walk walk;
244 union aegis_block buf;
245 unsigned int pos = 0;
247 scatterwalk_start(&walk, sg_src);
248 while (assoclen != 0) {
249 unsigned int size = scatterwalk_clamp(&walk, assoclen);
250 unsigned int left = size;
251 void *mapped = scatterwalk_map(&walk);
252 const u8 *src = (const u8 *)mapped;
254 if (pos + size >= AEGIS_BLOCK_SIZE) {
255 if (pos > 0) {
256 unsigned int fill = AEGIS_BLOCK_SIZE - pos;
257 memcpy(buf.bytes + pos, src, fill);
258 crypto_aegis128_update_a(state, &buf);
259 pos = 0;
260 left -= fill;
261 src += fill;
264 crypto_aegis128_ad(state, src, left);
265 src += left & ~(AEGIS_BLOCK_SIZE - 1);
266 left &= AEGIS_BLOCK_SIZE - 1;
269 memcpy(buf.bytes + pos, src, left);
271 pos += left;
272 assoclen -= size;
273 scatterwalk_unmap(mapped);
274 scatterwalk_advance(&walk, size);
275 scatterwalk_done(&walk, 0, assoclen);
278 if (pos > 0) {
279 memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos);
280 crypto_aegis128_update_a(state, &buf);
284 static void crypto_aegis128_process_crypt(struct aegis_state *state,
285 struct aead_request *req,
286 const struct aegis128_ops *ops)
288 struct skcipher_walk walk;
290 ops->skcipher_walk_init(&walk, req, false);
292 while (walk.nbytes) {
293 unsigned int nbytes = walk.nbytes;
295 if (nbytes < walk.total)
296 nbytes = round_down(nbytes, walk.stride);
298 ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
299 nbytes);
301 skcipher_walk_done(&walk, walk.nbytes - nbytes);
305 static void crypto_aegis128_final(struct aegis_state *state,
306 union aegis_block *tag_xor,
307 u64 assoclen, u64 cryptlen)
309 u64 assocbits = assoclen * 8;
310 u64 cryptbits = cryptlen * 8;
312 union aegis_block tmp;
313 unsigned int i;
315 tmp.words64[0] = cpu_to_le64(assocbits);
316 tmp.words64[1] = cpu_to_le64(cryptbits);
318 crypto_aegis_block_xor(&tmp, &state->blocks[3]);
320 for (i = 0; i < 7; i++)
321 crypto_aegis128_update_a(state, &tmp);
323 for (i = 0; i < AEGIS128_STATE_BLOCKS; i++)
324 crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
327 static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key,
328 unsigned int keylen)
330 struct aegis_ctx *ctx = crypto_aead_ctx(aead);
332 if (keylen != AEGIS128_KEY_SIZE) {
333 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
334 return -EINVAL;
337 memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
338 return 0;
341 static int crypto_aegis128_setauthsize(struct crypto_aead *tfm,
342 unsigned int authsize)
344 if (authsize > AEGIS128_MAX_AUTH_SIZE)
345 return -EINVAL;
346 if (authsize < AEGIS128_MIN_AUTH_SIZE)
347 return -EINVAL;
348 return 0;
351 static void crypto_aegis128_crypt(struct aead_request *req,
352 union aegis_block *tag_xor,
353 unsigned int cryptlen,
354 const struct aegis128_ops *ops)
356 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
357 struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
358 struct aegis_state state;
360 crypto_aegis128_init(&state, &ctx->key, req->iv);
361 crypto_aegis128_process_ad(&state, req->src, req->assoclen);
362 crypto_aegis128_process_crypt(&state, req, ops);
363 crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen);
366 static int crypto_aegis128_encrypt(struct aead_request *req)
368 static const struct aegis128_ops ops = {
369 .skcipher_walk_init = skcipher_walk_aead_encrypt,
370 .crypt_chunk = crypto_aegis128_encrypt_chunk,
373 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
374 union aegis_block tag = {};
375 unsigned int authsize = crypto_aead_authsize(tfm);
376 unsigned int cryptlen = req->cryptlen;
378 crypto_aegis128_crypt(req, &tag, cryptlen, &ops);
380 scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
381 authsize, 1);
382 return 0;
385 static int crypto_aegis128_decrypt(struct aead_request *req)
387 static const struct aegis128_ops ops = {
388 .skcipher_walk_init = skcipher_walk_aead_decrypt,
389 .crypt_chunk = crypto_aegis128_decrypt_chunk,
391 static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {};
393 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
394 union aegis_block tag;
395 unsigned int authsize = crypto_aead_authsize(tfm);
396 unsigned int cryptlen = req->cryptlen - authsize;
398 scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
399 authsize, 0);
401 crypto_aegis128_crypt(req, &tag, cryptlen, &ops);
403 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
406 static int crypto_aegis128_init_tfm(struct crypto_aead *tfm)
408 return 0;
411 static void crypto_aegis128_exit_tfm(struct crypto_aead *tfm)
415 static struct aead_alg crypto_aegis128_alg = {
416 .setkey = crypto_aegis128_setkey,
417 .setauthsize = crypto_aegis128_setauthsize,
418 .encrypt = crypto_aegis128_encrypt,
419 .decrypt = crypto_aegis128_decrypt,
420 .init = crypto_aegis128_init_tfm,
421 .exit = crypto_aegis128_exit_tfm,
423 .ivsize = AEGIS128_NONCE_SIZE,
424 .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
425 .chunksize = AEGIS_BLOCK_SIZE,
427 .base = {
428 .cra_blocksize = 1,
429 .cra_ctxsize = sizeof(struct aegis_ctx),
430 .cra_alignmask = 0,
432 .cra_priority = 100,
434 .cra_name = "aegis128",
435 .cra_driver_name = "aegis128-generic",
437 .cra_module = THIS_MODULE,
441 static int __init crypto_aegis128_module_init(void)
443 return crypto_register_aead(&crypto_aegis128_alg);
446 static void __exit crypto_aegis128_module_exit(void)
448 crypto_unregister_aead(&crypto_aegis128_alg);
451 subsys_initcall(crypto_aegis128_module_init);
452 module_exit(crypto_aegis128_module_exit);
454 MODULE_LICENSE("GPL");
455 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
456 MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm");
457 MODULE_ALIAS_CRYPTO("aegis128");
458 MODULE_ALIAS_CRYPTO("aegis128-generic");