Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / x86 / crypto / glue_helper.c
blobd3d91a0abf88f7f4507222e536b95a872fde856f
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Shared glue code for 128bit block ciphers
5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
7 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * CTR part based on code (crypto/ctr.c) by:
10 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
13 #include <linux/module.h>
14 #include <crypto/b128ops.h>
15 #include <crypto/gf128mul.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <asm/crypto/glue_helper.h>
21 int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
22 struct skcipher_request *req)
24 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
25 const unsigned int bsize = 128 / 8;
26 struct skcipher_walk walk;
27 bool fpu_enabled = false;
28 unsigned int nbytes;
29 int err;
31 err = skcipher_walk_virt(&walk, req, false);
33 while ((nbytes = walk.nbytes)) {
34 const u8 *src = walk.src.virt.addr;
35 u8 *dst = walk.dst.virt.addr;
36 unsigned int func_bytes;
37 unsigned int i;
39 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
40 &walk, fpu_enabled, nbytes);
41 for (i = 0; i < gctx->num_funcs; i++) {
42 func_bytes = bsize * gctx->funcs[i].num_blocks;
44 if (nbytes < func_bytes)
45 continue;
47 /* Process multi-block batch */
48 do {
49 gctx->funcs[i].fn_u.ecb(ctx, dst, src);
50 src += func_bytes;
51 dst += func_bytes;
52 nbytes -= func_bytes;
53 } while (nbytes >= func_bytes);
55 if (nbytes < bsize)
56 break;
58 err = skcipher_walk_done(&walk, nbytes);
61 glue_fpu_end(fpu_enabled);
62 return err;
64 EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
66 int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
67 struct skcipher_request *req)
69 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
70 const unsigned int bsize = 128 / 8;
71 struct skcipher_walk walk;
72 unsigned int nbytes;
73 int err;
75 err = skcipher_walk_virt(&walk, req, false);
77 while ((nbytes = walk.nbytes)) {
78 const u128 *src = (u128 *)walk.src.virt.addr;
79 u128 *dst = (u128 *)walk.dst.virt.addr;
80 u128 *iv = (u128 *)walk.iv;
82 do {
83 u128_xor(dst, src, iv);
84 fn(ctx, (u8 *)dst, (u8 *)dst);
85 iv = dst;
86 src++;
87 dst++;
88 nbytes -= bsize;
89 } while (nbytes >= bsize);
91 *(u128 *)walk.iv = *iv;
92 err = skcipher_walk_done(&walk, nbytes);
94 return err;
96 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
98 int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
99 struct skcipher_request *req)
101 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
102 const unsigned int bsize = 128 / 8;
103 struct skcipher_walk walk;
104 bool fpu_enabled = false;
105 unsigned int nbytes;
106 int err;
108 err = skcipher_walk_virt(&walk, req, false);
110 while ((nbytes = walk.nbytes)) {
111 const u128 *src = walk.src.virt.addr;
112 u128 *dst = walk.dst.virt.addr;
113 unsigned int func_bytes, num_blocks;
114 unsigned int i;
115 u128 last_iv;
117 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
118 &walk, fpu_enabled, nbytes);
119 /* Start of the last block. */
120 src += nbytes / bsize - 1;
121 dst += nbytes / bsize - 1;
123 last_iv = *src;
125 for (i = 0; i < gctx->num_funcs; i++) {
126 num_blocks = gctx->funcs[i].num_blocks;
127 func_bytes = bsize * num_blocks;
129 if (nbytes < func_bytes)
130 continue;
132 /* Process multi-block batch */
133 do {
134 src -= num_blocks - 1;
135 dst -= num_blocks - 1;
137 gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
138 (const u8 *)src);
140 nbytes -= func_bytes;
141 if (nbytes < bsize)
142 goto done;
144 u128_xor(dst, dst, --src);
145 dst--;
146 } while (nbytes >= func_bytes);
148 done:
149 u128_xor(dst, dst, (u128 *)walk.iv);
150 *(u128 *)walk.iv = last_iv;
151 err = skcipher_walk_done(&walk, nbytes);
154 glue_fpu_end(fpu_enabled);
155 return err;
157 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
159 int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
160 struct skcipher_request *req)
162 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
163 const unsigned int bsize = 128 / 8;
164 struct skcipher_walk walk;
165 bool fpu_enabled = false;
166 unsigned int nbytes;
167 int err;
169 err = skcipher_walk_virt(&walk, req, false);
171 while ((nbytes = walk.nbytes) >= bsize) {
172 const u128 *src = walk.src.virt.addr;
173 u128 *dst = walk.dst.virt.addr;
174 unsigned int func_bytes, num_blocks;
175 unsigned int i;
176 le128 ctrblk;
178 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
179 &walk, fpu_enabled, nbytes);
181 be128_to_le128(&ctrblk, (be128 *)walk.iv);
183 for (i = 0; i < gctx->num_funcs; i++) {
184 num_blocks = gctx->funcs[i].num_blocks;
185 func_bytes = bsize * num_blocks;
187 if (nbytes < func_bytes)
188 continue;
190 /* Process multi-block batch */
191 do {
192 gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
193 (const u8 *)src,
194 &ctrblk);
195 src += num_blocks;
196 dst += num_blocks;
197 nbytes -= func_bytes;
198 } while (nbytes >= func_bytes);
200 if (nbytes < bsize)
201 break;
204 le128_to_be128((be128 *)walk.iv, &ctrblk);
205 err = skcipher_walk_done(&walk, nbytes);
208 glue_fpu_end(fpu_enabled);
210 if (nbytes) {
211 le128 ctrblk;
212 u128 tmp;
214 be128_to_le128(&ctrblk, (be128 *)walk.iv);
215 memcpy(&tmp, walk.src.virt.addr, nbytes);
216 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
217 (const u8 *)&tmp,
218 &ctrblk);
219 memcpy(walk.dst.virt.addr, &tmp, nbytes);
220 le128_to_be128((be128 *)walk.iv, &ctrblk);
222 err = skcipher_walk_done(&walk, 0);
225 return err;
227 EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
229 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
230 void *ctx,
231 struct skcipher_walk *walk)
233 const unsigned int bsize = 128 / 8;
234 unsigned int nbytes = walk->nbytes;
235 u128 *src = walk->src.virt.addr;
236 u128 *dst = walk->dst.virt.addr;
237 unsigned int num_blocks, func_bytes;
238 unsigned int i;
240 /* Process multi-block batch */
241 for (i = 0; i < gctx->num_funcs; i++) {
242 num_blocks = gctx->funcs[i].num_blocks;
243 func_bytes = bsize * num_blocks;
245 if (nbytes >= func_bytes) {
246 do {
247 gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst,
248 (const u8 *)src,
249 walk->iv);
251 src += num_blocks;
252 dst += num_blocks;
253 nbytes -= func_bytes;
254 } while (nbytes >= func_bytes);
256 if (nbytes < bsize)
257 goto done;
261 done:
262 return nbytes;
265 int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
266 struct skcipher_request *req,
267 common_glue_func_t tweak_fn, void *tweak_ctx,
268 void *crypt_ctx, bool decrypt)
270 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
271 const unsigned int bsize = 128 / 8;
272 struct skcipher_request subreq;
273 struct skcipher_walk walk;
274 bool fpu_enabled = false;
275 unsigned int nbytes, tail;
276 int err;
278 if (req->cryptlen < XTS_BLOCK_SIZE)
279 return -EINVAL;
281 if (unlikely(cts)) {
282 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
284 tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE;
286 skcipher_request_set_tfm(&subreq, tfm);
287 skcipher_request_set_callback(&subreq,
288 crypto_skcipher_get_flags(tfm),
289 NULL, NULL);
290 skcipher_request_set_crypt(&subreq, req->src, req->dst,
291 req->cryptlen - tail, req->iv);
292 req = &subreq;
295 err = skcipher_walk_virt(&walk, req, false);
296 nbytes = walk.nbytes;
297 if (err)
298 return err;
300 /* set minimum length to bsize, for tweak_fn */
301 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
302 &walk, fpu_enabled,
303 nbytes < bsize ? bsize : nbytes);
305 /* calculate first value of T */
306 tweak_fn(tweak_ctx, walk.iv, walk.iv);
308 while (nbytes) {
309 nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
311 err = skcipher_walk_done(&walk, nbytes);
312 nbytes = walk.nbytes;
315 if (unlikely(cts)) {
316 u8 *next_tweak, *final_tweak = req->iv;
317 struct scatterlist *src, *dst;
318 struct scatterlist s[2], d[2];
319 le128 b[2];
321 dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen);
322 if (req->dst != req->src)
323 dst = scatterwalk_ffwd(d, req->dst, req->cryptlen);
325 if (decrypt) {
326 next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE);
327 gf128mul_x_ble(b, b);
328 } else {
329 next_tweak = req->iv;
332 skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE,
333 next_tweak);
335 err = skcipher_walk_virt(&walk, req, false) ?:
336 skcipher_walk_done(&walk,
337 __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
338 if (err)
339 goto out;
341 scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0);
342 memcpy(b + 1, b, tail - XTS_BLOCK_SIZE);
343 scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE,
344 tail - XTS_BLOCK_SIZE, 0);
345 scatterwalk_map_and_copy(b, dst, 0, tail, 1);
347 skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE,
348 final_tweak);
350 err = skcipher_walk_virt(&walk, req, false) ?:
351 skcipher_walk_done(&walk,
352 __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
355 out:
356 glue_fpu_end(fpu_enabled);
358 return err;
360 EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
362 void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src,
363 le128 *iv, common_glue_func_t fn)
365 le128 ivblk = *iv;
367 /* generate next IV */
368 gf128mul_x_ble(iv, &ivblk);
370 /* CC <- T xor C */
371 u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk);
373 /* PP <- D(Key2,CC) */
374 fn(ctx, dst, dst);
376 /* P <- T xor PP */
377 u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk);
379 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
381 MODULE_LICENSE("GPL");