ACPI / LPSS: Make acpi_lpss_find_device() also find PCI devices
[linux/fpc-iii.git] / arch / x86 / crypto / morus1280_glue.c
blob0dccdda1eb3a1fd5cd6bc143d3287137e5e359ea
1 /*
2 * The MORUS-1280 Authenticated-Encryption Algorithm
3 * Common x86 SIMD glue skeleton
5 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
14 #include <crypto/cryptd.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/morus1280_glue.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <asm/fpu/api.h>
26 struct morus1280_state {
27 struct morus1280_block s[MORUS_STATE_BLOCKS];
30 struct morus1280_ops {
31 int (*skcipher_walk_init)(struct skcipher_walk *walk,
32 struct aead_request *req, bool atomic);
34 void (*crypt_blocks)(void *state, const void *src, void *dst,
35 unsigned int length);
36 void (*crypt_tail)(void *state, const void *src, void *dst,
37 unsigned int length);
40 static void crypto_morus1280_glue_process_ad(
41 struct morus1280_state *state,
42 const struct morus1280_glue_ops *ops,
43 struct scatterlist *sg_src, unsigned int assoclen)
45 struct scatter_walk walk;
46 struct morus1280_block buf;
47 unsigned int pos = 0;
49 scatterwalk_start(&walk, sg_src);
50 while (assoclen != 0) {
51 unsigned int size = scatterwalk_clamp(&walk, assoclen);
52 unsigned int left = size;
53 void *mapped = scatterwalk_map(&walk);
54 const u8 *src = (const u8 *)mapped;
56 if (pos + size >= MORUS1280_BLOCK_SIZE) {
57 if (pos > 0) {
58 unsigned int fill = MORUS1280_BLOCK_SIZE - pos;
59 memcpy(buf.bytes + pos, src, fill);
60 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
61 pos = 0;
62 left -= fill;
63 src += fill;
66 ops->ad(state, src, left);
67 src += left & ~(MORUS1280_BLOCK_SIZE - 1);
68 left &= MORUS1280_BLOCK_SIZE - 1;
71 memcpy(buf.bytes + pos, src, left);
73 pos += left;
74 assoclen -= size;
75 scatterwalk_unmap(mapped);
76 scatterwalk_advance(&walk, size);
77 scatterwalk_done(&walk, 0, assoclen);
80 if (pos > 0) {
81 memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos);
82 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
86 static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
87 struct morus1280_ops ops,
88 struct aead_request *req)
90 struct skcipher_walk walk;
91 u8 *cursor_src, *cursor_dst;
92 unsigned int chunksize, base;
94 ops.skcipher_walk_init(&walk, req, false);
96 while (walk.nbytes) {
97 cursor_src = walk.src.virt.addr;
98 cursor_dst = walk.dst.virt.addr;
99 chunksize = walk.nbytes;
101 ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
103 base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
104 cursor_src += base;
105 cursor_dst += base;
106 chunksize &= MORUS1280_BLOCK_SIZE - 1;
108 if (chunksize > 0)
109 ops.crypt_tail(state, cursor_src, cursor_dst,
110 chunksize);
112 skcipher_walk_done(&walk, 0);
116 int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
117 unsigned int keylen)
119 struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
121 if (keylen == MORUS1280_BLOCK_SIZE) {
122 memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE);
123 } else if (keylen == MORUS1280_BLOCK_SIZE / 2) {
124 memcpy(ctx->key.bytes, key, keylen);
125 memcpy(ctx->key.bytes + keylen, key, keylen);
126 } else {
127 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
128 return -EINVAL;
131 return 0;
133 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey);
135 int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
136 unsigned int authsize)
138 return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
140 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize);
142 static void crypto_morus1280_glue_crypt(struct aead_request *req,
143 struct morus1280_ops ops,
144 unsigned int cryptlen,
145 struct morus1280_block *tag_xor)
147 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
148 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
149 struct morus1280_state state;
151 kernel_fpu_begin();
153 ctx->ops->init(&state, &ctx->key, req->iv);
154 crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
155 crypto_morus1280_glue_process_crypt(&state, ops, req);
156 ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
158 kernel_fpu_end();
161 int crypto_morus1280_glue_encrypt(struct aead_request *req)
163 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
164 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
165 struct morus1280_ops OPS = {
166 .skcipher_walk_init = skcipher_walk_aead_encrypt,
167 .crypt_blocks = ctx->ops->enc,
168 .crypt_tail = ctx->ops->enc_tail,
171 struct morus1280_block tag = {};
172 unsigned int authsize = crypto_aead_authsize(tfm);
173 unsigned int cryptlen = req->cryptlen;
175 crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
177 scatterwalk_map_and_copy(tag.bytes, req->dst,
178 req->assoclen + cryptlen, authsize, 1);
179 return 0;
181 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt);
183 int crypto_morus1280_glue_decrypt(struct aead_request *req)
185 static const u8 zeros[MORUS1280_BLOCK_SIZE] = {};
187 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
188 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
189 struct morus1280_ops OPS = {
190 .skcipher_walk_init = skcipher_walk_aead_decrypt,
191 .crypt_blocks = ctx->ops->dec,
192 .crypt_tail = ctx->ops->dec_tail,
195 struct morus1280_block tag;
196 unsigned int authsize = crypto_aead_authsize(tfm);
197 unsigned int cryptlen = req->cryptlen - authsize;
199 scatterwalk_map_and_copy(tag.bytes, req->src,
200 req->assoclen + cryptlen, authsize, 0);
202 crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
204 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
206 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt);
208 void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
209 const struct morus1280_glue_ops *ops)
211 struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
212 ctx->ops = ops;
214 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
216 int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
217 unsigned int keylen)
219 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
220 struct cryptd_aead *cryptd_tfm = *ctx;
222 return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
224 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
226 int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
227 unsigned int authsize)
229 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
230 struct cryptd_aead *cryptd_tfm = *ctx;
232 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
234 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
236 int cryptd_morus1280_glue_encrypt(struct aead_request *req)
238 struct crypto_aead *aead = crypto_aead_reqtfm(req);
239 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
240 struct cryptd_aead *cryptd_tfm = *ctx;
242 aead = &cryptd_tfm->base;
243 if (irq_fpu_usable() && (!in_atomic() ||
244 !cryptd_aead_queued(cryptd_tfm)))
245 aead = cryptd_aead_child(cryptd_tfm);
247 aead_request_set_tfm(req, aead);
249 return crypto_aead_encrypt(req);
251 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
253 int cryptd_morus1280_glue_decrypt(struct aead_request *req)
255 struct crypto_aead *aead = crypto_aead_reqtfm(req);
256 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
257 struct cryptd_aead *cryptd_tfm = *ctx;
259 aead = &cryptd_tfm->base;
260 if (irq_fpu_usable() && (!in_atomic() ||
261 !cryptd_aead_queued(cryptd_tfm)))
262 aead = cryptd_aead_child(cryptd_tfm);
264 aead_request_set_tfm(req, aead);
266 return crypto_aead_decrypt(req);
268 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
270 int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
272 struct cryptd_aead *cryptd_tfm;
273 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
274 const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
275 char internal_name[CRYPTO_MAX_ALG_NAME];
277 if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
278 >= CRYPTO_MAX_ALG_NAME)
279 return -ENAMETOOLONG;
281 cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
282 CRYPTO_ALG_INTERNAL);
283 if (IS_ERR(cryptd_tfm))
284 return PTR_ERR(cryptd_tfm);
286 *ctx = cryptd_tfm;
287 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
288 return 0;
290 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
292 void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
294 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
296 cryptd_free_aead(*ctx);
298 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
300 MODULE_LICENSE("GPL");
301 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
302 MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");