x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / x86 / crypto / glue_helper.c
blob260a060d72750b60cc036f8d0597775460cacd19
1 /*
2 * Shared glue code for 128bit block ciphers
4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 * USA
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/internal/skcipher.h>
31 #include <crypto/lrw.h>
32 #include <crypto/xts.h>
33 #include <asm/crypto/glue_helper.h>
35 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
36 struct blkcipher_desc *desc,
37 struct blkcipher_walk *walk)
39 void *ctx = crypto_blkcipher_ctx(desc->tfm);
40 const unsigned int bsize = 128 / 8;
41 unsigned int nbytes, i, func_bytes;
42 bool fpu_enabled = false;
43 int err;
45 err = blkcipher_walk_virt(desc, walk);
47 while ((nbytes = walk->nbytes)) {
48 u8 *wsrc = walk->src.virt.addr;
49 u8 *wdst = walk->dst.virt.addr;
51 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
52 desc, fpu_enabled, nbytes);
54 for (i = 0; i < gctx->num_funcs; i++) {
55 func_bytes = bsize * gctx->funcs[i].num_blocks;
57 /* Process multi-block batch */
58 if (nbytes >= func_bytes) {
59 do {
60 gctx->funcs[i].fn_u.ecb(ctx, wdst,
61 wsrc);
63 wsrc += func_bytes;
64 wdst += func_bytes;
65 nbytes -= func_bytes;
66 } while (nbytes >= func_bytes);
68 if (nbytes < bsize)
69 goto done;
73 done:
74 err = blkcipher_walk_done(desc, walk, nbytes);
77 glue_fpu_end(fpu_enabled);
78 return err;
81 int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
82 struct blkcipher_desc *desc, struct scatterlist *dst,
83 struct scatterlist *src, unsigned int nbytes)
85 struct blkcipher_walk walk;
87 blkcipher_walk_init(&walk, dst, src, nbytes);
88 return __glue_ecb_crypt_128bit(gctx, desc, &walk);
90 EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
92 static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
93 struct blkcipher_desc *desc,
94 struct blkcipher_walk *walk)
96 void *ctx = crypto_blkcipher_ctx(desc->tfm);
97 const unsigned int bsize = 128 / 8;
98 unsigned int nbytes = walk->nbytes;
99 u128 *src = (u128 *)walk->src.virt.addr;
100 u128 *dst = (u128 *)walk->dst.virt.addr;
101 u128 *iv = (u128 *)walk->iv;
103 do {
104 u128_xor(dst, src, iv);
105 fn(ctx, (u8 *)dst, (u8 *)dst);
106 iv = dst;
108 src += 1;
109 dst += 1;
110 nbytes -= bsize;
111 } while (nbytes >= bsize);
113 *(u128 *)walk->iv = *iv;
114 return nbytes;
117 int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
118 struct blkcipher_desc *desc,
119 struct scatterlist *dst,
120 struct scatterlist *src, unsigned int nbytes)
122 struct blkcipher_walk walk;
123 int err;
125 blkcipher_walk_init(&walk, dst, src, nbytes);
126 err = blkcipher_walk_virt(desc, &walk);
128 while ((nbytes = walk.nbytes)) {
129 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
130 err = blkcipher_walk_done(desc, &walk, nbytes);
133 return err;
135 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
137 static unsigned int
138 __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
139 struct blkcipher_desc *desc,
140 struct blkcipher_walk *walk)
142 void *ctx = crypto_blkcipher_ctx(desc->tfm);
143 const unsigned int bsize = 128 / 8;
144 unsigned int nbytes = walk->nbytes;
145 u128 *src = (u128 *)walk->src.virt.addr;
146 u128 *dst = (u128 *)walk->dst.virt.addr;
147 u128 last_iv;
148 unsigned int num_blocks, func_bytes;
149 unsigned int i;
151 /* Start of the last block. */
152 src += nbytes / bsize - 1;
153 dst += nbytes / bsize - 1;
155 last_iv = *src;
157 for (i = 0; i < gctx->num_funcs; i++) {
158 num_blocks = gctx->funcs[i].num_blocks;
159 func_bytes = bsize * num_blocks;
161 /* Process multi-block batch */
162 if (nbytes >= func_bytes) {
163 do {
164 nbytes -= func_bytes - bsize;
165 src -= num_blocks - 1;
166 dst -= num_blocks - 1;
168 gctx->funcs[i].fn_u.cbc(ctx, dst, src);
170 nbytes -= bsize;
171 if (nbytes < bsize)
172 goto done;
174 u128_xor(dst, dst, src - 1);
175 src -= 1;
176 dst -= 1;
177 } while (nbytes >= func_bytes);
179 if (nbytes < bsize)
180 goto done;
184 done:
185 u128_xor(dst, dst, (u128 *)walk->iv);
186 *(u128 *)walk->iv = last_iv;
188 return nbytes;
191 int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
192 struct blkcipher_desc *desc,
193 struct scatterlist *dst,
194 struct scatterlist *src, unsigned int nbytes)
196 const unsigned int bsize = 128 / 8;
197 bool fpu_enabled = false;
198 struct blkcipher_walk walk;
199 int err;
201 blkcipher_walk_init(&walk, dst, src, nbytes);
202 err = blkcipher_walk_virt(desc, &walk);
204 while ((nbytes = walk.nbytes)) {
205 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
206 desc, fpu_enabled, nbytes);
207 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
208 err = blkcipher_walk_done(desc, &walk, nbytes);
211 glue_fpu_end(fpu_enabled);
212 return err;
214 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
216 static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
217 struct blkcipher_desc *desc,
218 struct blkcipher_walk *walk)
220 void *ctx = crypto_blkcipher_ctx(desc->tfm);
221 u8 *src = (u8 *)walk->src.virt.addr;
222 u8 *dst = (u8 *)walk->dst.virt.addr;
223 unsigned int nbytes = walk->nbytes;
224 le128 ctrblk;
225 u128 tmp;
227 be128_to_le128(&ctrblk, (be128 *)walk->iv);
229 memcpy(&tmp, src, nbytes);
230 fn_ctr(ctx, &tmp, &tmp, &ctrblk);
231 memcpy(dst, &tmp, nbytes);
233 le128_to_be128((be128 *)walk->iv, &ctrblk);
236 static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
237 struct blkcipher_desc *desc,
238 struct blkcipher_walk *walk)
240 const unsigned int bsize = 128 / 8;
241 void *ctx = crypto_blkcipher_ctx(desc->tfm);
242 unsigned int nbytes = walk->nbytes;
243 u128 *src = (u128 *)walk->src.virt.addr;
244 u128 *dst = (u128 *)walk->dst.virt.addr;
245 le128 ctrblk;
246 unsigned int num_blocks, func_bytes;
247 unsigned int i;
249 be128_to_le128(&ctrblk, (be128 *)walk->iv);
251 /* Process multi-block batch */
252 for (i = 0; i < gctx->num_funcs; i++) {
253 num_blocks = gctx->funcs[i].num_blocks;
254 func_bytes = bsize * num_blocks;
256 if (nbytes >= func_bytes) {
257 do {
258 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
260 src += num_blocks;
261 dst += num_blocks;
262 nbytes -= func_bytes;
263 } while (nbytes >= func_bytes);
265 if (nbytes < bsize)
266 goto done;
270 done:
271 le128_to_be128((be128 *)walk->iv, &ctrblk);
272 return nbytes;
275 int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
276 struct blkcipher_desc *desc, struct scatterlist *dst,
277 struct scatterlist *src, unsigned int nbytes)
279 const unsigned int bsize = 128 / 8;
280 bool fpu_enabled = false;
281 struct blkcipher_walk walk;
282 int err;
284 blkcipher_walk_init(&walk, dst, src, nbytes);
285 err = blkcipher_walk_virt_block(desc, &walk, bsize);
287 while ((nbytes = walk.nbytes) >= bsize) {
288 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
289 desc, fpu_enabled, nbytes);
290 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
291 err = blkcipher_walk_done(desc, &walk, nbytes);
294 glue_fpu_end(fpu_enabled);
296 if (walk.nbytes) {
297 glue_ctr_crypt_final_128bit(
298 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
299 err = blkcipher_walk_done(desc, &walk, 0);
302 return err;
304 EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
306 static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
307 void *ctx,
308 struct blkcipher_desc *desc,
309 struct blkcipher_walk *walk)
311 const unsigned int bsize = 128 / 8;
312 unsigned int nbytes = walk->nbytes;
313 u128 *src = (u128 *)walk->src.virt.addr;
314 u128 *dst = (u128 *)walk->dst.virt.addr;
315 unsigned int num_blocks, func_bytes;
316 unsigned int i;
318 /* Process multi-block batch */
319 for (i = 0; i < gctx->num_funcs; i++) {
320 num_blocks = gctx->funcs[i].num_blocks;
321 func_bytes = bsize * num_blocks;
323 if (nbytes >= func_bytes) {
324 do {
325 gctx->funcs[i].fn_u.xts(ctx, dst, src,
326 (le128 *)walk->iv);
328 src += num_blocks;
329 dst += num_blocks;
330 nbytes -= func_bytes;
331 } while (nbytes >= func_bytes);
333 if (nbytes < bsize)
334 goto done;
338 done:
339 return nbytes;
342 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
343 void *ctx,
344 struct skcipher_walk *walk)
346 const unsigned int bsize = 128 / 8;
347 unsigned int nbytes = walk->nbytes;
348 u128 *src = walk->src.virt.addr;
349 u128 *dst = walk->dst.virt.addr;
350 unsigned int num_blocks, func_bytes;
351 unsigned int i;
353 /* Process multi-block batch */
354 for (i = 0; i < gctx->num_funcs; i++) {
355 num_blocks = gctx->funcs[i].num_blocks;
356 func_bytes = bsize * num_blocks;
358 if (nbytes >= func_bytes) {
359 do {
360 gctx->funcs[i].fn_u.xts(ctx, dst, src,
361 walk->iv);
363 src += num_blocks;
364 dst += num_blocks;
365 nbytes -= func_bytes;
366 } while (nbytes >= func_bytes);
368 if (nbytes < bsize)
369 goto done;
373 done:
374 return nbytes;
377 /* for implementations implementing faster XTS IV generator */
378 int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
379 struct blkcipher_desc *desc, struct scatterlist *dst,
380 struct scatterlist *src, unsigned int nbytes,
381 void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
382 void *tweak_ctx, void *crypt_ctx)
384 const unsigned int bsize = 128 / 8;
385 bool fpu_enabled = false;
386 struct blkcipher_walk walk;
387 int err;
389 blkcipher_walk_init(&walk, dst, src, nbytes);
391 err = blkcipher_walk_virt(desc, &walk);
392 nbytes = walk.nbytes;
393 if (!nbytes)
394 return err;
396 /* set minimum length to bsize, for tweak_fn */
397 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
398 desc, fpu_enabled,
399 nbytes < bsize ? bsize : nbytes);
401 /* calculate first value of T */
402 tweak_fn(tweak_ctx, walk.iv, walk.iv);
404 while (nbytes) {
405 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
407 err = blkcipher_walk_done(desc, &walk, nbytes);
408 nbytes = walk.nbytes;
411 glue_fpu_end(fpu_enabled);
413 return err;
415 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
417 int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
418 struct skcipher_request *req,
419 common_glue_func_t tweak_fn, void *tweak_ctx,
420 void *crypt_ctx)
422 const unsigned int bsize = 128 / 8;
423 struct skcipher_walk walk;
424 bool fpu_enabled = false;
425 unsigned int nbytes;
426 int err;
428 err = skcipher_walk_virt(&walk, req, false);
429 nbytes = walk.nbytes;
430 if (!nbytes)
431 return err;
433 /* set minimum length to bsize, for tweak_fn */
434 fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
435 &walk, fpu_enabled,
436 nbytes < bsize ? bsize : nbytes);
438 /* calculate first value of T */
439 tweak_fn(tweak_ctx, walk.iv, walk.iv);
441 while (nbytes) {
442 nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
444 err = skcipher_walk_done(&walk, nbytes);
445 nbytes = walk.nbytes;
448 glue_fpu_end(fpu_enabled);
450 return err;
452 EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
454 void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
455 common_glue_func_t fn)
457 le128 ivblk = *iv;
459 /* generate next IV */
460 le128_gf128mul_x_ble(iv, &ivblk);
462 /* CC <- T xor C */
463 u128_xor(dst, src, (u128 *)&ivblk);
465 /* PP <- D(Key2,CC) */
466 fn(ctx, (u8 *)dst, (u8 *)dst);
468 /* P <- T xor PP */
469 u128_xor(dst, dst, (u128 *)&ivblk);
471 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
473 MODULE_LICENSE("GPL");