2 * The MORUS-640 Authenticated-Encryption Algorithm
3 * Common x86 SIMD glue skeleton
5 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
14 #include <crypto/cryptd.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/morus640_glue.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <asm/fpu/api.h>
26 struct morus640_state
{
27 struct morus640_block s
[MORUS_STATE_BLOCKS
];
31 int (*skcipher_walk_init
)(struct skcipher_walk
*walk
,
32 struct aead_request
*req
, bool atomic
);
34 void (*crypt_blocks
)(void *state
, const void *src
, void *dst
,
36 void (*crypt_tail
)(void *state
, const void *src
, void *dst
,
40 static void crypto_morus640_glue_process_ad(
41 struct morus640_state
*state
,
42 const struct morus640_glue_ops
*ops
,
43 struct scatterlist
*sg_src
, unsigned int assoclen
)
45 struct scatter_walk walk
;
46 struct morus640_block buf
;
49 scatterwalk_start(&walk
, sg_src
);
50 while (assoclen
!= 0) {
51 unsigned int size
= scatterwalk_clamp(&walk
, assoclen
);
52 unsigned int left
= size
;
53 void *mapped
= scatterwalk_map(&walk
);
54 const u8
*src
= (const u8
*)mapped
;
56 if (pos
+ size
>= MORUS640_BLOCK_SIZE
) {
58 unsigned int fill
= MORUS640_BLOCK_SIZE
- pos
;
59 memcpy(buf
.bytes
+ pos
, src
, fill
);
60 ops
->ad(state
, buf
.bytes
, MORUS640_BLOCK_SIZE
);
66 ops
->ad(state
, src
, left
);
67 src
+= left
& ~(MORUS640_BLOCK_SIZE
- 1);
68 left
&= MORUS640_BLOCK_SIZE
- 1;
71 memcpy(buf
.bytes
+ pos
, src
, left
);
75 scatterwalk_unmap(mapped
);
76 scatterwalk_advance(&walk
, size
);
77 scatterwalk_done(&walk
, 0, assoclen
);
81 memset(buf
.bytes
+ pos
, 0, MORUS640_BLOCK_SIZE
- pos
);
82 ops
->ad(state
, buf
.bytes
, MORUS640_BLOCK_SIZE
);
86 static void crypto_morus640_glue_process_crypt(struct morus640_state
*state
,
87 struct morus640_ops ops
,
88 struct aead_request
*req
)
90 struct skcipher_walk walk
;
91 u8
*cursor_src
, *cursor_dst
;
92 unsigned int chunksize
, base
;
94 ops
.skcipher_walk_init(&walk
, req
, false);
97 cursor_src
= walk
.src
.virt
.addr
;
98 cursor_dst
= walk
.dst
.virt
.addr
;
99 chunksize
= walk
.nbytes
;
101 ops
.crypt_blocks(state
, cursor_src
, cursor_dst
, chunksize
);
103 base
= chunksize
& ~(MORUS640_BLOCK_SIZE
- 1);
106 chunksize
&= MORUS640_BLOCK_SIZE
- 1;
109 ops
.crypt_tail(state
, cursor_src
, cursor_dst
,
112 skcipher_walk_done(&walk
, 0);
116 int crypto_morus640_glue_setkey(struct crypto_aead
*aead
, const u8
*key
,
119 struct morus640_ctx
*ctx
= crypto_aead_ctx(aead
);
121 if (keylen
!= MORUS640_BLOCK_SIZE
) {
122 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
126 memcpy(ctx
->key
.bytes
, key
, MORUS640_BLOCK_SIZE
);
129 EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey
);
131 int crypto_morus640_glue_setauthsize(struct crypto_aead
*tfm
,
132 unsigned int authsize
)
134 return (authsize
<= MORUS_MAX_AUTH_SIZE
) ? 0 : -EINVAL
;
136 EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize
);
138 static void crypto_morus640_glue_crypt(struct aead_request
*req
,
139 struct morus640_ops ops
,
140 unsigned int cryptlen
,
141 struct morus640_block
*tag_xor
)
143 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
144 struct morus640_ctx
*ctx
= crypto_aead_ctx(tfm
);
145 struct morus640_state state
;
149 ctx
->ops
->init(&state
, &ctx
->key
, req
->iv
);
150 crypto_morus640_glue_process_ad(&state
, ctx
->ops
, req
->src
, req
->assoclen
);
151 crypto_morus640_glue_process_crypt(&state
, ops
, req
);
152 ctx
->ops
->final(&state
, tag_xor
, req
->assoclen
, cryptlen
);
157 int crypto_morus640_glue_encrypt(struct aead_request
*req
)
159 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
160 struct morus640_ctx
*ctx
= crypto_aead_ctx(tfm
);
161 struct morus640_ops OPS
= {
162 .skcipher_walk_init
= skcipher_walk_aead_encrypt
,
163 .crypt_blocks
= ctx
->ops
->enc
,
164 .crypt_tail
= ctx
->ops
->enc_tail
,
167 struct morus640_block tag
= {};
168 unsigned int authsize
= crypto_aead_authsize(tfm
);
169 unsigned int cryptlen
= req
->cryptlen
;
171 crypto_morus640_glue_crypt(req
, OPS
, cryptlen
, &tag
);
173 scatterwalk_map_and_copy(tag
.bytes
, req
->dst
,
174 req
->assoclen
+ cryptlen
, authsize
, 1);
177 EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt
);
179 int crypto_morus640_glue_decrypt(struct aead_request
*req
)
181 static const u8 zeros
[MORUS640_BLOCK_SIZE
] = {};
183 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
184 struct morus640_ctx
*ctx
= crypto_aead_ctx(tfm
);
185 struct morus640_ops OPS
= {
186 .skcipher_walk_init
= skcipher_walk_aead_decrypt
,
187 .crypt_blocks
= ctx
->ops
->dec
,
188 .crypt_tail
= ctx
->ops
->dec_tail
,
191 struct morus640_block tag
;
192 unsigned int authsize
= crypto_aead_authsize(tfm
);
193 unsigned int cryptlen
= req
->cryptlen
- authsize
;
195 scatterwalk_map_and_copy(tag
.bytes
, req
->src
,
196 req
->assoclen
+ cryptlen
, authsize
, 0);
198 crypto_morus640_glue_crypt(req
, OPS
, cryptlen
, &tag
);
200 return crypto_memneq(tag
.bytes
, zeros
, authsize
) ? -EBADMSG
: 0;
202 EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt
);
204 void crypto_morus640_glue_init_ops(struct crypto_aead
*aead
,
205 const struct morus640_glue_ops
*ops
)
207 struct morus640_ctx
*ctx
= crypto_aead_ctx(aead
);
210 EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops
);
212 int cryptd_morus640_glue_setkey(struct crypto_aead
*aead
, const u8
*key
,
215 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
216 struct cryptd_aead
*cryptd_tfm
= *ctx
;
218 return crypto_aead_setkey(&cryptd_tfm
->base
, key
, keylen
);
220 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey
);
222 int cryptd_morus640_glue_setauthsize(struct crypto_aead
*aead
,
223 unsigned int authsize
)
225 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
226 struct cryptd_aead
*cryptd_tfm
= *ctx
;
228 return crypto_aead_setauthsize(&cryptd_tfm
->base
, authsize
);
230 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize
);
232 int cryptd_morus640_glue_encrypt(struct aead_request
*req
)
234 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
235 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
236 struct cryptd_aead
*cryptd_tfm
= *ctx
;
238 aead
= &cryptd_tfm
->base
;
239 if (irq_fpu_usable() && (!in_atomic() ||
240 !cryptd_aead_queued(cryptd_tfm
)))
241 aead
= cryptd_aead_child(cryptd_tfm
);
243 aead_request_set_tfm(req
, aead
);
245 return crypto_aead_encrypt(req
);
247 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt
);
249 int cryptd_morus640_glue_decrypt(struct aead_request
*req
)
251 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
252 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
253 struct cryptd_aead
*cryptd_tfm
= *ctx
;
255 aead
= &cryptd_tfm
->base
;
256 if (irq_fpu_usable() && (!in_atomic() ||
257 !cryptd_aead_queued(cryptd_tfm
)))
258 aead
= cryptd_aead_child(cryptd_tfm
);
260 aead_request_set_tfm(req
, aead
);
262 return crypto_aead_decrypt(req
);
264 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt
);
266 int cryptd_morus640_glue_init_tfm(struct crypto_aead
*aead
)
268 struct cryptd_aead
*cryptd_tfm
;
269 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
270 const char *name
= crypto_aead_alg(aead
)->base
.cra_driver_name
;
271 char internal_name
[CRYPTO_MAX_ALG_NAME
];
273 if (snprintf(internal_name
, CRYPTO_MAX_ALG_NAME
, "__%s", name
)
274 >= CRYPTO_MAX_ALG_NAME
)
275 return -ENAMETOOLONG
;
277 cryptd_tfm
= cryptd_alloc_aead(internal_name
, CRYPTO_ALG_INTERNAL
,
278 CRYPTO_ALG_INTERNAL
);
279 if (IS_ERR(cryptd_tfm
))
280 return PTR_ERR(cryptd_tfm
);
283 crypto_aead_set_reqsize(aead
, crypto_aead_reqsize(&cryptd_tfm
->base
));
286 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm
);
288 void cryptd_morus640_glue_exit_tfm(struct crypto_aead
*aead
)
290 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
292 cryptd_free_aead(*ctx
);
294 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm
);
296 MODULE_LICENSE("GPL");
297 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
298 MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");