Linux 4.18.10
[linux/fpc-iii.git] / arch / x86 / crypto / blowfish_glue.c
blob3e0c07cc9124f0889db2f8942428f5fe2c75b6ed
1 /*
2 * Glue Code for assembler optimized version of Blowfish
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 * USA
28 #include <crypto/algapi.h>
29 #include <crypto/blowfish.h>
30 #include <crypto/internal/skcipher.h>
31 #include <linux/crypto.h>
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/types.h>
36 /* regular block cipher functions */
37 asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
38 bool xor);
39 asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src);
41 /* 4-way parallel cipher functions */
42 asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
43 const u8 *src, bool xor);
44 asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst,
45 const u8 *src);
47 static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src)
49 __blowfish_enc_blk(ctx, dst, src, false);
52 static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst,
53 const u8 *src)
55 __blowfish_enc_blk(ctx, dst, src, true);
58 static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
59 const u8 *src)
61 __blowfish_enc_blk_4way(ctx, dst, src, false);
64 static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst,
65 const u8 *src)
67 __blowfish_enc_blk_4way(ctx, dst, src, true);
70 static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
72 blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
75 static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
77 blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
80 static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
81 const u8 *key, unsigned int keylen)
83 return blowfish_setkey(&tfm->base, key, keylen);
86 static int ecb_crypt(struct skcipher_request *req,
87 void (*fn)(struct bf_ctx *, u8 *, const u8 *),
88 void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
90 unsigned int bsize = BF_BLOCK_SIZE;
91 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
92 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
93 struct skcipher_walk walk;
94 unsigned int nbytes;
95 int err;
97 err = skcipher_walk_virt(&walk, req, false);
99 while ((nbytes = walk.nbytes)) {
100 u8 *wsrc = walk.src.virt.addr;
101 u8 *wdst = walk.dst.virt.addr;
103 /* Process four block batch */
104 if (nbytes >= bsize * 4) {
105 do {
106 fn_4way(ctx, wdst, wsrc);
108 wsrc += bsize * 4;
109 wdst += bsize * 4;
110 nbytes -= bsize * 4;
111 } while (nbytes >= bsize * 4);
113 if (nbytes < bsize)
114 goto done;
117 /* Handle leftovers */
118 do {
119 fn(ctx, wdst, wsrc);
121 wsrc += bsize;
122 wdst += bsize;
123 nbytes -= bsize;
124 } while (nbytes >= bsize);
126 done:
127 err = skcipher_walk_done(&walk, nbytes);
130 return err;
133 static int ecb_encrypt(struct skcipher_request *req)
135 return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
138 static int ecb_decrypt(struct skcipher_request *req)
140 return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
143 static unsigned int __cbc_encrypt(struct bf_ctx *ctx,
144 struct skcipher_walk *walk)
146 unsigned int bsize = BF_BLOCK_SIZE;
147 unsigned int nbytes = walk->nbytes;
148 u64 *src = (u64 *)walk->src.virt.addr;
149 u64 *dst = (u64 *)walk->dst.virt.addr;
150 u64 *iv = (u64 *)walk->iv;
152 do {
153 *dst = *src ^ *iv;
154 blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
155 iv = dst;
157 src += 1;
158 dst += 1;
159 nbytes -= bsize;
160 } while (nbytes >= bsize);
162 *(u64 *)walk->iv = *iv;
163 return nbytes;
166 static int cbc_encrypt(struct skcipher_request *req)
168 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
169 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
170 struct skcipher_walk walk;
171 unsigned int nbytes;
172 int err;
174 err = skcipher_walk_virt(&walk, req, false);
176 while ((nbytes = walk.nbytes)) {
177 nbytes = __cbc_encrypt(ctx, &walk);
178 err = skcipher_walk_done(&walk, nbytes);
181 return err;
184 static unsigned int __cbc_decrypt(struct bf_ctx *ctx,
185 struct skcipher_walk *walk)
187 unsigned int bsize = BF_BLOCK_SIZE;
188 unsigned int nbytes = walk->nbytes;
189 u64 *src = (u64 *)walk->src.virt.addr;
190 u64 *dst = (u64 *)walk->dst.virt.addr;
191 u64 ivs[4 - 1];
192 u64 last_iv;
194 /* Start of the last block. */
195 src += nbytes / bsize - 1;
196 dst += nbytes / bsize - 1;
198 last_iv = *src;
200 /* Process four block batch */
201 if (nbytes >= bsize * 4) {
202 do {
203 nbytes -= bsize * 4 - bsize;
204 src -= 4 - 1;
205 dst -= 4 - 1;
207 ivs[0] = src[0];
208 ivs[1] = src[1];
209 ivs[2] = src[2];
211 blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src);
213 dst[1] ^= ivs[0];
214 dst[2] ^= ivs[1];
215 dst[3] ^= ivs[2];
217 nbytes -= bsize;
218 if (nbytes < bsize)
219 goto done;
221 *dst ^= *(src - 1);
222 src -= 1;
223 dst -= 1;
224 } while (nbytes >= bsize * 4);
227 /* Handle leftovers */
228 for (;;) {
229 blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
231 nbytes -= bsize;
232 if (nbytes < bsize)
233 break;
235 *dst ^= *(src - 1);
236 src -= 1;
237 dst -= 1;
240 done:
241 *dst ^= *(u64 *)walk->iv;
242 *(u64 *)walk->iv = last_iv;
244 return nbytes;
247 static int cbc_decrypt(struct skcipher_request *req)
249 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
250 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
251 struct skcipher_walk walk;
252 unsigned int nbytes;
253 int err;
255 err = skcipher_walk_virt(&walk, req, false);
257 while ((nbytes = walk.nbytes)) {
258 nbytes = __cbc_decrypt(ctx, &walk);
259 err = skcipher_walk_done(&walk, nbytes);
262 return err;
265 static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
267 u8 *ctrblk = walk->iv;
268 u8 keystream[BF_BLOCK_SIZE];
269 u8 *src = walk->src.virt.addr;
270 u8 *dst = walk->dst.virt.addr;
271 unsigned int nbytes = walk->nbytes;
273 blowfish_enc_blk(ctx, keystream, ctrblk);
274 crypto_xor_cpy(dst, keystream, src, nbytes);
276 crypto_inc(ctrblk, BF_BLOCK_SIZE);
279 static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
281 unsigned int bsize = BF_BLOCK_SIZE;
282 unsigned int nbytes = walk->nbytes;
283 u64 *src = (u64 *)walk->src.virt.addr;
284 u64 *dst = (u64 *)walk->dst.virt.addr;
285 u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
286 __be64 ctrblocks[4];
288 /* Process four block batch */
289 if (nbytes >= bsize * 4) {
290 do {
291 if (dst != src) {
292 dst[0] = src[0];
293 dst[1] = src[1];
294 dst[2] = src[2];
295 dst[3] = src[3];
298 /* create ctrblks for parallel encrypt */
299 ctrblocks[0] = cpu_to_be64(ctrblk++);
300 ctrblocks[1] = cpu_to_be64(ctrblk++);
301 ctrblocks[2] = cpu_to_be64(ctrblk++);
302 ctrblocks[3] = cpu_to_be64(ctrblk++);
304 blowfish_enc_blk_xor_4way(ctx, (u8 *)dst,
305 (u8 *)ctrblocks);
307 src += 4;
308 dst += 4;
309 } while ((nbytes -= bsize * 4) >= bsize * 4);
311 if (nbytes < bsize)
312 goto done;
315 /* Handle leftovers */
316 do {
317 if (dst != src)
318 *dst = *src;
320 ctrblocks[0] = cpu_to_be64(ctrblk++);
322 blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks);
324 src += 1;
325 dst += 1;
326 } while ((nbytes -= bsize) >= bsize);
328 done:
329 *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
330 return nbytes;
333 static int ctr_crypt(struct skcipher_request *req)
335 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
336 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
337 struct skcipher_walk walk;
338 unsigned int nbytes;
339 int err;
341 err = skcipher_walk_virt(&walk, req, false);
343 while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
344 nbytes = __ctr_crypt(ctx, &walk);
345 err = skcipher_walk_done(&walk, nbytes);
348 if (nbytes) {
349 ctr_crypt_final(ctx, &walk);
350 err = skcipher_walk_done(&walk, 0);
353 return err;
356 static struct crypto_alg bf_cipher_alg = {
357 .cra_name = "blowfish",
358 .cra_driver_name = "blowfish-asm",
359 .cra_priority = 200,
360 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
361 .cra_blocksize = BF_BLOCK_SIZE,
362 .cra_ctxsize = sizeof(struct bf_ctx),
363 .cra_alignmask = 0,
364 .cra_module = THIS_MODULE,
365 .cra_u = {
366 .cipher = {
367 .cia_min_keysize = BF_MIN_KEY_SIZE,
368 .cia_max_keysize = BF_MAX_KEY_SIZE,
369 .cia_setkey = blowfish_setkey,
370 .cia_encrypt = blowfish_encrypt,
371 .cia_decrypt = blowfish_decrypt,
376 static struct skcipher_alg bf_skcipher_algs[] = {
378 .base.cra_name = "ecb(blowfish)",
379 .base.cra_driver_name = "ecb-blowfish-asm",
380 .base.cra_priority = 300,
381 .base.cra_blocksize = BF_BLOCK_SIZE,
382 .base.cra_ctxsize = sizeof(struct bf_ctx),
383 .base.cra_module = THIS_MODULE,
384 .min_keysize = BF_MIN_KEY_SIZE,
385 .max_keysize = BF_MAX_KEY_SIZE,
386 .setkey = blowfish_setkey_skcipher,
387 .encrypt = ecb_encrypt,
388 .decrypt = ecb_decrypt,
389 }, {
390 .base.cra_name = "cbc(blowfish)",
391 .base.cra_driver_name = "cbc-blowfish-asm",
392 .base.cra_priority = 300,
393 .base.cra_blocksize = BF_BLOCK_SIZE,
394 .base.cra_ctxsize = sizeof(struct bf_ctx),
395 .base.cra_module = THIS_MODULE,
396 .min_keysize = BF_MIN_KEY_SIZE,
397 .max_keysize = BF_MAX_KEY_SIZE,
398 .ivsize = BF_BLOCK_SIZE,
399 .setkey = blowfish_setkey_skcipher,
400 .encrypt = cbc_encrypt,
401 .decrypt = cbc_decrypt,
402 }, {
403 .base.cra_name = "ctr(blowfish)",
404 .base.cra_driver_name = "ctr-blowfish-asm",
405 .base.cra_priority = 300,
406 .base.cra_blocksize = 1,
407 .base.cra_ctxsize = sizeof(struct bf_ctx),
408 .base.cra_module = THIS_MODULE,
409 .min_keysize = BF_MIN_KEY_SIZE,
410 .max_keysize = BF_MAX_KEY_SIZE,
411 .ivsize = BF_BLOCK_SIZE,
412 .chunksize = BF_BLOCK_SIZE,
413 .setkey = blowfish_setkey_skcipher,
414 .encrypt = ctr_crypt,
415 .decrypt = ctr_crypt,
419 static bool is_blacklisted_cpu(void)
421 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
422 return false;
424 if (boot_cpu_data.x86 == 0x0f) {
426 * On Pentium 4, blowfish-x86_64 is slower than generic C
427 * implementation because use of 64bit rotates (which are really
428 * slow on P4). Therefore blacklist P4s.
430 return true;
433 return false;
436 static int force;
437 module_param(force, int, 0);
438 MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
440 static int __init init(void)
442 int err;
444 if (!force && is_blacklisted_cpu()) {
445 printk(KERN_INFO
446 "blowfish-x86_64: performance on this CPU "
447 "would be suboptimal: disabling "
448 "blowfish-x86_64.\n");
449 return -ENODEV;
452 err = crypto_register_alg(&bf_cipher_alg);
453 if (err)
454 return err;
456 err = crypto_register_skciphers(bf_skcipher_algs,
457 ARRAY_SIZE(bf_skcipher_algs));
458 if (err)
459 crypto_unregister_alg(&bf_cipher_alg);
461 return err;
464 static void __exit fini(void)
466 crypto_unregister_alg(&bf_cipher_alg);
467 crypto_unregister_skciphers(bf_skcipher_algs,
468 ARRAY_SIZE(bf_skcipher_algs));
471 module_init(init);
472 module_exit(fini);
474 MODULE_LICENSE("GPL");
475 MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
476 MODULE_ALIAS_CRYPTO("blowfish");
477 MODULE_ALIAS_CRYPTO("blowfish-asm");