2 * Glue Code for SSE2 assembler versions of Serpent Cipher
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 #include <linux/module.h>
33 #include <linux/hardirq.h>
34 #include <linux/types.h>
35 #include <linux/crypto.h>
36 #include <linux/err.h>
37 #include <crypto/algapi.h>
38 #include <crypto/serpent.h>
39 #include <crypto/cryptd.h>
40 #include <crypto/b128ops.h>
41 #include <crypto/ctr.h>
42 #include <crypto/lrw.h>
43 #include <crypto/xts.h>
45 #include <asm/serpent.h>
46 #include <crypto/scatterwalk.h>
47 #include <linux/workqueue.h>
48 #include <linux/spinlock.h>
50 struct async_serpent_ctx
{
51 struct cryptd_ablkcipher
*cryptd_tfm
;
54 static inline bool serpent_fpu_begin(bool fpu_enabled
, unsigned int nbytes
)
59 /* SSE2 is only used when chunk to be processed is large enough, so
60 * do not enable FPU until it is necessary.
62 if (nbytes
< SERPENT_BLOCK_SIZE
* SERPENT_PARALLEL_BLOCKS
)
69 static inline void serpent_fpu_end(bool fpu_enabled
)
75 static int ecb_crypt(struct blkcipher_desc
*desc
, struct blkcipher_walk
*walk
,
78 bool fpu_enabled
= false;
79 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
80 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
84 err
= blkcipher_walk_virt(desc
, walk
);
85 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
87 while ((nbytes
= walk
->nbytes
)) {
88 u8
*wsrc
= walk
->src
.virt
.addr
;
89 u8
*wdst
= walk
->dst
.virt
.addr
;
91 fpu_enabled
= serpent_fpu_begin(fpu_enabled
, nbytes
);
93 /* Process multi-block batch */
94 if (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
) {
97 serpent_enc_blk_xway(ctx
, wdst
, wsrc
);
99 serpent_dec_blk_xway(ctx
, wdst
, wsrc
);
101 wsrc
+= bsize
* SERPENT_PARALLEL_BLOCKS
;
102 wdst
+= bsize
* SERPENT_PARALLEL_BLOCKS
;
103 nbytes
-= bsize
* SERPENT_PARALLEL_BLOCKS
;
104 } while (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
);
110 /* Handle leftovers */
113 __serpent_encrypt(ctx
, wdst
, wsrc
);
115 __serpent_decrypt(ctx
, wdst
, wsrc
);
120 } while (nbytes
>= bsize
);
123 err
= blkcipher_walk_done(desc
, walk
, nbytes
);
126 serpent_fpu_end(fpu_enabled
);
130 static int ecb_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
131 struct scatterlist
*src
, unsigned int nbytes
)
133 struct blkcipher_walk walk
;
135 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
136 return ecb_crypt(desc
, &walk
, true);
139 static int ecb_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
140 struct scatterlist
*src
, unsigned int nbytes
)
142 struct blkcipher_walk walk
;
144 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
145 return ecb_crypt(desc
, &walk
, false);
148 static struct crypto_alg blk_ecb_alg
= {
149 .cra_name
= "__ecb-serpent-sse2",
150 .cra_driver_name
= "__driver-ecb-serpent-sse2",
152 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
153 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
154 .cra_ctxsize
= sizeof(struct serpent_ctx
),
156 .cra_type
= &crypto_blkcipher_type
,
157 .cra_module
= THIS_MODULE
,
158 .cra_list
= LIST_HEAD_INIT(blk_ecb_alg
.cra_list
),
161 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
162 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
163 .setkey
= serpent_setkey
,
164 .encrypt
= ecb_encrypt
,
165 .decrypt
= ecb_decrypt
,
170 static unsigned int __cbc_encrypt(struct blkcipher_desc
*desc
,
171 struct blkcipher_walk
*walk
)
173 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
174 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
175 unsigned int nbytes
= walk
->nbytes
;
176 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
177 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
178 u128
*iv
= (u128
*)walk
->iv
;
181 u128_xor(dst
, src
, iv
);
182 __serpent_encrypt(ctx
, (u8
*)dst
, (u8
*)dst
);
188 } while (nbytes
>= bsize
);
190 u128_xor((u128
*)walk
->iv
, (u128
*)walk
->iv
, iv
);
194 static int cbc_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
195 struct scatterlist
*src
, unsigned int nbytes
)
197 struct blkcipher_walk walk
;
200 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
201 err
= blkcipher_walk_virt(desc
, &walk
);
203 while ((nbytes
= walk
.nbytes
)) {
204 nbytes
= __cbc_encrypt(desc
, &walk
);
205 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
211 static unsigned int __cbc_decrypt(struct blkcipher_desc
*desc
,
212 struct blkcipher_walk
*walk
)
214 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
215 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
216 unsigned int nbytes
= walk
->nbytes
;
217 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
218 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
219 u128 ivs
[SERPENT_PARALLEL_BLOCKS
- 1];
223 /* Start of the last block. */
224 src
+= nbytes
/ bsize
- 1;
225 dst
+= nbytes
/ bsize
- 1;
229 /* Process multi-block batch */
230 if (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
) {
232 nbytes
-= bsize
* (SERPENT_PARALLEL_BLOCKS
- 1);
233 src
-= SERPENT_PARALLEL_BLOCKS
- 1;
234 dst
-= SERPENT_PARALLEL_BLOCKS
- 1;
236 for (i
= 0; i
< SERPENT_PARALLEL_BLOCKS
- 1; i
++)
239 serpent_dec_blk_xway(ctx
, (u8
*)dst
, (u8
*)src
);
241 for (i
= 0; i
< SERPENT_PARALLEL_BLOCKS
- 1; i
++)
242 u128_xor(dst
+ (i
+ 1), dst
+ (i
+ 1), ivs
+ i
);
248 u128_xor(dst
, dst
, src
- 1);
251 } while (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
);
257 /* Handle leftovers */
259 __serpent_decrypt(ctx
, (u8
*)dst
, (u8
*)src
);
265 u128_xor(dst
, dst
, src
- 1);
271 u128_xor(dst
, dst
, (u128
*)walk
->iv
);
272 *(u128
*)walk
->iv
= last_iv
;
277 static int cbc_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
278 struct scatterlist
*src
, unsigned int nbytes
)
280 bool fpu_enabled
= false;
281 struct blkcipher_walk walk
;
284 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
285 err
= blkcipher_walk_virt(desc
, &walk
);
286 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
288 while ((nbytes
= walk
.nbytes
)) {
289 fpu_enabled
= serpent_fpu_begin(fpu_enabled
, nbytes
);
290 nbytes
= __cbc_decrypt(desc
, &walk
);
291 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
294 serpent_fpu_end(fpu_enabled
);
298 static struct crypto_alg blk_cbc_alg
= {
299 .cra_name
= "__cbc-serpent-sse2",
300 .cra_driver_name
= "__driver-cbc-serpent-sse2",
302 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
303 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
304 .cra_ctxsize
= sizeof(struct serpent_ctx
),
306 .cra_type
= &crypto_blkcipher_type
,
307 .cra_module
= THIS_MODULE
,
308 .cra_list
= LIST_HEAD_INIT(blk_cbc_alg
.cra_list
),
311 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
312 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
313 .setkey
= serpent_setkey
,
314 .encrypt
= cbc_encrypt
,
315 .decrypt
= cbc_decrypt
,
320 static inline void u128_to_be128(be128
*dst
, const u128
*src
)
322 dst
->a
= cpu_to_be64(src
->a
);
323 dst
->b
= cpu_to_be64(src
->b
);
326 static inline void be128_to_u128(u128
*dst
, const be128
*src
)
328 dst
->a
= be64_to_cpu(src
->a
);
329 dst
->b
= be64_to_cpu(src
->b
);
332 static inline void u128_inc(u128
*i
)
339 static void ctr_crypt_final(struct blkcipher_desc
*desc
,
340 struct blkcipher_walk
*walk
)
342 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
343 u8
*ctrblk
= walk
->iv
;
344 u8 keystream
[SERPENT_BLOCK_SIZE
];
345 u8
*src
= walk
->src
.virt
.addr
;
346 u8
*dst
= walk
->dst
.virt
.addr
;
347 unsigned int nbytes
= walk
->nbytes
;
349 __serpent_encrypt(ctx
, keystream
, ctrblk
);
350 crypto_xor(keystream
, src
, nbytes
);
351 memcpy(dst
, keystream
, nbytes
);
353 crypto_inc(ctrblk
, SERPENT_BLOCK_SIZE
);
356 static unsigned int __ctr_crypt(struct blkcipher_desc
*desc
,
357 struct blkcipher_walk
*walk
)
359 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
360 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
361 unsigned int nbytes
= walk
->nbytes
;
362 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
363 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
365 be128 ctrblocks
[SERPENT_PARALLEL_BLOCKS
];
368 be128_to_u128(&ctrblk
, (be128
*)walk
->iv
);
370 /* Process multi-block batch */
371 if (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
) {
373 /* create ctrblks for parallel encrypt */
374 for (i
= 0; i
< SERPENT_PARALLEL_BLOCKS
; i
++) {
378 u128_to_be128(&ctrblocks
[i
], &ctrblk
);
382 serpent_enc_blk_xway_xor(ctx
, (u8
*)dst
,
385 src
+= SERPENT_PARALLEL_BLOCKS
;
386 dst
+= SERPENT_PARALLEL_BLOCKS
;
387 nbytes
-= bsize
* SERPENT_PARALLEL_BLOCKS
;
388 } while (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
);
394 /* Handle leftovers */
399 u128_to_be128(&ctrblocks
[0], &ctrblk
);
402 __serpent_encrypt(ctx
, (u8
*)ctrblocks
, (u8
*)ctrblocks
);
403 u128_xor(dst
, dst
, (u128
*)ctrblocks
);
408 } while (nbytes
>= bsize
);
411 u128_to_be128((be128
*)walk
->iv
, &ctrblk
);
415 static int ctr_crypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
416 struct scatterlist
*src
, unsigned int nbytes
)
418 bool fpu_enabled
= false;
419 struct blkcipher_walk walk
;
422 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
423 err
= blkcipher_walk_virt_block(desc
, &walk
, SERPENT_BLOCK_SIZE
);
424 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
426 while ((nbytes
= walk
.nbytes
) >= SERPENT_BLOCK_SIZE
) {
427 fpu_enabled
= serpent_fpu_begin(fpu_enabled
, nbytes
);
428 nbytes
= __ctr_crypt(desc
, &walk
);
429 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
432 serpent_fpu_end(fpu_enabled
);
435 ctr_crypt_final(desc
, &walk
);
436 err
= blkcipher_walk_done(desc
, &walk
, 0);
442 static struct crypto_alg blk_ctr_alg
= {
443 .cra_name
= "__ctr-serpent-sse2",
444 .cra_driver_name
= "__driver-ctr-serpent-sse2",
446 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
448 .cra_ctxsize
= sizeof(struct serpent_ctx
),
450 .cra_type
= &crypto_blkcipher_type
,
451 .cra_module
= THIS_MODULE
,
452 .cra_list
= LIST_HEAD_INIT(blk_ctr_alg
.cra_list
),
455 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
456 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
457 .ivsize
= SERPENT_BLOCK_SIZE
,
458 .setkey
= serpent_setkey
,
459 .encrypt
= ctr_crypt
,
460 .decrypt
= ctr_crypt
,
466 struct serpent_ctx
*ctx
;
470 static void encrypt_callback(void *priv
, u8
*srcdst
, unsigned int nbytes
)
472 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
473 struct crypt_priv
*ctx
= priv
;
476 ctx
->fpu_enabled
= serpent_fpu_begin(ctx
->fpu_enabled
, nbytes
);
478 if (nbytes
== bsize
* SERPENT_PARALLEL_BLOCKS
) {
479 serpent_enc_blk_xway(ctx
->ctx
, srcdst
, srcdst
);
483 for (i
= 0; i
< nbytes
/ bsize
; i
++, srcdst
+= bsize
)
484 __serpent_encrypt(ctx
->ctx
, srcdst
, srcdst
);
487 static void decrypt_callback(void *priv
, u8
*srcdst
, unsigned int nbytes
)
489 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
490 struct crypt_priv
*ctx
= priv
;
493 ctx
->fpu_enabled
= serpent_fpu_begin(ctx
->fpu_enabled
, nbytes
);
495 if (nbytes
== bsize
* SERPENT_PARALLEL_BLOCKS
) {
496 serpent_dec_blk_xway(ctx
->ctx
, srcdst
, srcdst
);
500 for (i
= 0; i
< nbytes
/ bsize
; i
++, srcdst
+= bsize
)
501 __serpent_decrypt(ctx
->ctx
, srcdst
, srcdst
);
504 struct serpent_lrw_ctx
{
505 struct lrw_table_ctx lrw_table
;
506 struct serpent_ctx serpent_ctx
;
509 static int lrw_serpent_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
512 struct serpent_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
515 err
= __serpent_setkey(&ctx
->serpent_ctx
, key
, keylen
-
520 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
-
524 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
525 struct scatterlist
*src
, unsigned int nbytes
)
527 struct serpent_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
528 be128 buf
[SERPENT_PARALLEL_BLOCKS
];
529 struct crypt_priv crypt_ctx
= {
530 .ctx
= &ctx
->serpent_ctx
,
531 .fpu_enabled
= false,
533 struct lrw_crypt_req req
= {
535 .tbuflen
= sizeof(buf
),
537 .table_ctx
= &ctx
->lrw_table
,
538 .crypt_ctx
= &crypt_ctx
,
539 .crypt_fn
= encrypt_callback
,
543 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
544 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
545 serpent_fpu_end(crypt_ctx
.fpu_enabled
);
550 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
551 struct scatterlist
*src
, unsigned int nbytes
)
553 struct serpent_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
554 be128 buf
[SERPENT_PARALLEL_BLOCKS
];
555 struct crypt_priv crypt_ctx
= {
556 .ctx
= &ctx
->serpent_ctx
,
557 .fpu_enabled
= false,
559 struct lrw_crypt_req req
= {
561 .tbuflen
= sizeof(buf
),
563 .table_ctx
= &ctx
->lrw_table
,
564 .crypt_ctx
= &crypt_ctx
,
565 .crypt_fn
= decrypt_callback
,
569 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
570 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
571 serpent_fpu_end(crypt_ctx
.fpu_enabled
);
576 static void lrw_exit_tfm(struct crypto_tfm
*tfm
)
578 struct serpent_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
580 lrw_free_table(&ctx
->lrw_table
);
583 static struct crypto_alg blk_lrw_alg
= {
584 .cra_name
= "__lrw-serpent-sse2",
585 .cra_driver_name
= "__driver-lrw-serpent-sse2",
587 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
588 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
589 .cra_ctxsize
= sizeof(struct serpent_lrw_ctx
),
591 .cra_type
= &crypto_blkcipher_type
,
592 .cra_module
= THIS_MODULE
,
593 .cra_list
= LIST_HEAD_INIT(blk_lrw_alg
.cra_list
),
594 .cra_exit
= lrw_exit_tfm
,
597 .min_keysize
= SERPENT_MIN_KEY_SIZE
+
599 .max_keysize
= SERPENT_MAX_KEY_SIZE
+
601 .ivsize
= SERPENT_BLOCK_SIZE
,
602 .setkey
= lrw_serpent_setkey
,
603 .encrypt
= lrw_encrypt
,
604 .decrypt
= lrw_decrypt
,
609 struct serpent_xts_ctx
{
610 struct serpent_ctx tweak_ctx
;
611 struct serpent_ctx crypt_ctx
;
614 static int xts_serpent_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
617 struct serpent_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
618 u32
*flags
= &tfm
->crt_flags
;
621 /* key consists of keys of equal size concatenated, therefore
622 * the length must be even
625 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
629 /* first half of xts-key is for crypt */
630 err
= __serpent_setkey(&ctx
->crypt_ctx
, key
, keylen
/ 2);
634 /* second half of xts-key is for tweak */
635 return __serpent_setkey(&ctx
->tweak_ctx
, key
+ keylen
/ 2, keylen
/ 2);
638 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
639 struct scatterlist
*src
, unsigned int nbytes
)
641 struct serpent_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
642 be128 buf
[SERPENT_PARALLEL_BLOCKS
];
643 struct crypt_priv crypt_ctx
= {
644 .ctx
= &ctx
->crypt_ctx
,
645 .fpu_enabled
= false,
647 struct xts_crypt_req req
= {
649 .tbuflen
= sizeof(buf
),
651 .tweak_ctx
= &ctx
->tweak_ctx
,
652 .tweak_fn
= XTS_TWEAK_CAST(__serpent_encrypt
),
653 .crypt_ctx
= &crypt_ctx
,
654 .crypt_fn
= encrypt_callback
,
658 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
659 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
660 serpent_fpu_end(crypt_ctx
.fpu_enabled
);
665 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
666 struct scatterlist
*src
, unsigned int nbytes
)
668 struct serpent_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
669 be128 buf
[SERPENT_PARALLEL_BLOCKS
];
670 struct crypt_priv crypt_ctx
= {
671 .ctx
= &ctx
->crypt_ctx
,
672 .fpu_enabled
= false,
674 struct xts_crypt_req req
= {
676 .tbuflen
= sizeof(buf
),
678 .tweak_ctx
= &ctx
->tweak_ctx
,
679 .tweak_fn
= XTS_TWEAK_CAST(__serpent_encrypt
),
680 .crypt_ctx
= &crypt_ctx
,
681 .crypt_fn
= decrypt_callback
,
685 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
686 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
687 serpent_fpu_end(crypt_ctx
.fpu_enabled
);
692 static struct crypto_alg blk_xts_alg
= {
693 .cra_name
= "__xts-serpent-sse2",
694 .cra_driver_name
= "__driver-xts-serpent-sse2",
696 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
697 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
698 .cra_ctxsize
= sizeof(struct serpent_xts_ctx
),
700 .cra_type
= &crypto_blkcipher_type
,
701 .cra_module
= THIS_MODULE
,
702 .cra_list
= LIST_HEAD_INIT(blk_xts_alg
.cra_list
),
705 .min_keysize
= SERPENT_MIN_KEY_SIZE
* 2,
706 .max_keysize
= SERPENT_MAX_KEY_SIZE
* 2,
707 .ivsize
= SERPENT_BLOCK_SIZE
,
708 .setkey
= xts_serpent_setkey
,
709 .encrypt
= xts_encrypt
,
710 .decrypt
= xts_decrypt
,
715 static int ablk_set_key(struct crypto_ablkcipher
*tfm
, const u8
*key
,
716 unsigned int key_len
)
718 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
719 struct crypto_ablkcipher
*child
= &ctx
->cryptd_tfm
->base
;
722 crypto_ablkcipher_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
723 crypto_ablkcipher_set_flags(child
, crypto_ablkcipher_get_flags(tfm
)
724 & CRYPTO_TFM_REQ_MASK
);
725 err
= crypto_ablkcipher_setkey(child
, key
, key_len
);
726 crypto_ablkcipher_set_flags(tfm
, crypto_ablkcipher_get_flags(child
)
727 & CRYPTO_TFM_RES_MASK
);
731 static int __ablk_encrypt(struct ablkcipher_request
*req
)
733 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
734 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
735 struct blkcipher_desc desc
;
737 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
738 desc
.info
= req
->info
;
741 return crypto_blkcipher_crt(desc
.tfm
)->encrypt(
742 &desc
, req
->dst
, req
->src
, req
->nbytes
);
745 static int ablk_encrypt(struct ablkcipher_request
*req
)
747 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
748 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
750 if (!irq_fpu_usable()) {
751 struct ablkcipher_request
*cryptd_req
=
752 ablkcipher_request_ctx(req
);
754 memcpy(cryptd_req
, req
, sizeof(*req
));
755 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
757 return crypto_ablkcipher_encrypt(cryptd_req
);
759 return __ablk_encrypt(req
);
763 static int ablk_decrypt(struct ablkcipher_request
*req
)
765 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
766 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
768 if (!irq_fpu_usable()) {
769 struct ablkcipher_request
*cryptd_req
=
770 ablkcipher_request_ctx(req
);
772 memcpy(cryptd_req
, req
, sizeof(*req
));
773 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
775 return crypto_ablkcipher_decrypt(cryptd_req
);
777 struct blkcipher_desc desc
;
779 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
780 desc
.info
= req
->info
;
783 return crypto_blkcipher_crt(desc
.tfm
)->decrypt(
784 &desc
, req
->dst
, req
->src
, req
->nbytes
);
788 static void ablk_exit(struct crypto_tfm
*tfm
)
790 struct async_serpent_ctx
*ctx
= crypto_tfm_ctx(tfm
);
792 cryptd_free_ablkcipher(ctx
->cryptd_tfm
);
795 static void ablk_init_common(struct crypto_tfm
*tfm
,
796 struct cryptd_ablkcipher
*cryptd_tfm
)
798 struct async_serpent_ctx
*ctx
= crypto_tfm_ctx(tfm
);
800 ctx
->cryptd_tfm
= cryptd_tfm
;
801 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
802 crypto_ablkcipher_reqsize(&cryptd_tfm
->base
);
805 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
807 struct cryptd_ablkcipher
*cryptd_tfm
;
809 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ecb-serpent-sse2", 0, 0);
810 if (IS_ERR(cryptd_tfm
))
811 return PTR_ERR(cryptd_tfm
);
812 ablk_init_common(tfm
, cryptd_tfm
);
816 static struct crypto_alg ablk_ecb_alg
= {
817 .cra_name
= "ecb(serpent)",
818 .cra_driver_name
= "ecb-serpent-sse2",
820 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
821 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
822 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
824 .cra_type
= &crypto_ablkcipher_type
,
825 .cra_module
= THIS_MODULE
,
826 .cra_list
= LIST_HEAD_INIT(ablk_ecb_alg
.cra_list
),
827 .cra_init
= ablk_ecb_init
,
828 .cra_exit
= ablk_exit
,
831 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
832 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
833 .setkey
= ablk_set_key
,
834 .encrypt
= ablk_encrypt
,
835 .decrypt
= ablk_decrypt
,
840 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
842 struct cryptd_ablkcipher
*cryptd_tfm
;
844 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-cbc-serpent-sse2", 0, 0);
845 if (IS_ERR(cryptd_tfm
))
846 return PTR_ERR(cryptd_tfm
);
847 ablk_init_common(tfm
, cryptd_tfm
);
851 static struct crypto_alg ablk_cbc_alg
= {
852 .cra_name
= "cbc(serpent)",
853 .cra_driver_name
= "cbc-serpent-sse2",
855 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
856 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
857 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
859 .cra_type
= &crypto_ablkcipher_type
,
860 .cra_module
= THIS_MODULE
,
861 .cra_list
= LIST_HEAD_INIT(ablk_cbc_alg
.cra_list
),
862 .cra_init
= ablk_cbc_init
,
863 .cra_exit
= ablk_exit
,
866 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
867 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
868 .ivsize
= SERPENT_BLOCK_SIZE
,
869 .setkey
= ablk_set_key
,
870 .encrypt
= __ablk_encrypt
,
871 .decrypt
= ablk_decrypt
,
876 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
878 struct cryptd_ablkcipher
*cryptd_tfm
;
880 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ctr-serpent-sse2", 0, 0);
881 if (IS_ERR(cryptd_tfm
))
882 return PTR_ERR(cryptd_tfm
);
883 ablk_init_common(tfm
, cryptd_tfm
);
887 static struct crypto_alg ablk_ctr_alg
= {
888 .cra_name
= "ctr(serpent)",
889 .cra_driver_name
= "ctr-serpent-sse2",
891 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
893 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
895 .cra_type
= &crypto_ablkcipher_type
,
896 .cra_module
= THIS_MODULE
,
897 .cra_list
= LIST_HEAD_INIT(ablk_ctr_alg
.cra_list
),
898 .cra_init
= ablk_ctr_init
,
899 .cra_exit
= ablk_exit
,
902 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
903 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
904 .ivsize
= SERPENT_BLOCK_SIZE
,
905 .setkey
= ablk_set_key
,
906 .encrypt
= ablk_encrypt
,
907 .decrypt
= ablk_encrypt
,
913 static int ablk_lrw_init(struct crypto_tfm
*tfm
)
915 struct cryptd_ablkcipher
*cryptd_tfm
;
917 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-lrw-serpent-sse2", 0, 0);
918 if (IS_ERR(cryptd_tfm
))
919 return PTR_ERR(cryptd_tfm
);
920 ablk_init_common(tfm
, cryptd_tfm
);
924 static struct crypto_alg ablk_lrw_alg
= {
925 .cra_name
= "lrw(serpent)",
926 .cra_driver_name
= "lrw-serpent-sse2",
928 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
929 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
930 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
932 .cra_type
= &crypto_ablkcipher_type
,
933 .cra_module
= THIS_MODULE
,
934 .cra_list
= LIST_HEAD_INIT(ablk_lrw_alg
.cra_list
),
935 .cra_init
= ablk_lrw_init
,
936 .cra_exit
= ablk_exit
,
939 .min_keysize
= SERPENT_MIN_KEY_SIZE
+
941 .max_keysize
= SERPENT_MAX_KEY_SIZE
+
943 .ivsize
= SERPENT_BLOCK_SIZE
,
944 .setkey
= ablk_set_key
,
945 .encrypt
= ablk_encrypt
,
946 .decrypt
= ablk_decrypt
,
951 static int ablk_xts_init(struct crypto_tfm
*tfm
)
953 struct cryptd_ablkcipher
*cryptd_tfm
;
955 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-xts-serpent-sse2", 0, 0);
956 if (IS_ERR(cryptd_tfm
))
957 return PTR_ERR(cryptd_tfm
);
958 ablk_init_common(tfm
, cryptd_tfm
);
962 static struct crypto_alg ablk_xts_alg
= {
963 .cra_name
= "xts(serpent)",
964 .cra_driver_name
= "xts-serpent-sse2",
966 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
967 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
968 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
970 .cra_type
= &crypto_ablkcipher_type
,
971 .cra_module
= THIS_MODULE
,
972 .cra_list
= LIST_HEAD_INIT(ablk_xts_alg
.cra_list
),
973 .cra_init
= ablk_xts_init
,
974 .cra_exit
= ablk_exit
,
977 .min_keysize
= SERPENT_MIN_KEY_SIZE
* 2,
978 .max_keysize
= SERPENT_MAX_KEY_SIZE
* 2,
979 .ivsize
= SERPENT_BLOCK_SIZE
,
980 .setkey
= ablk_set_key
,
981 .encrypt
= ablk_encrypt
,
982 .decrypt
= ablk_decrypt
,
987 static int __init
serpent_sse2_init(void)
992 printk(KERN_INFO
"SSE2 instructions are not detected.\n");
996 err
= crypto_register_alg(&blk_ecb_alg
);
999 err
= crypto_register_alg(&blk_cbc_alg
);
1002 err
= crypto_register_alg(&blk_ctr_alg
);
1005 err
= crypto_register_alg(&ablk_ecb_alg
);
1008 err
= crypto_register_alg(&ablk_cbc_alg
);
1011 err
= crypto_register_alg(&ablk_ctr_alg
);
1014 err
= crypto_register_alg(&blk_lrw_alg
);
1017 err
= crypto_register_alg(&ablk_lrw_alg
);
1020 err
= crypto_register_alg(&blk_xts_alg
);
1023 err
= crypto_register_alg(&ablk_xts_alg
);
1028 crypto_unregister_alg(&ablk_xts_alg
);
1030 crypto_unregister_alg(&blk_xts_alg
);
1032 crypto_unregister_alg(&ablk_lrw_alg
);
1034 crypto_unregister_alg(&blk_lrw_alg
);
1036 crypto_unregister_alg(&ablk_ctr_alg
);
1038 crypto_unregister_alg(&ablk_cbc_alg
);
1040 crypto_unregister_alg(&ablk_ecb_alg
);
1042 crypto_unregister_alg(&blk_ctr_alg
);
1044 crypto_unregister_alg(&blk_cbc_alg
);
1046 crypto_unregister_alg(&blk_ecb_alg
);
1051 static void __exit
serpent_sse2_exit(void)
1053 crypto_unregister_alg(&ablk_xts_alg
);
1054 crypto_unregister_alg(&blk_xts_alg
);
1055 crypto_unregister_alg(&ablk_lrw_alg
);
1056 crypto_unregister_alg(&blk_lrw_alg
);
1057 crypto_unregister_alg(&ablk_ctr_alg
);
1058 crypto_unregister_alg(&ablk_cbc_alg
);
1059 crypto_unregister_alg(&ablk_ecb_alg
);
1060 crypto_unregister_alg(&blk_ctr_alg
);
1061 crypto_unregister_alg(&blk_cbc_alg
);
1062 crypto_unregister_alg(&blk_ecb_alg
);
1065 module_init(serpent_sse2_init
);
1066 module_exit(serpent_sse2_exit
);
1068 MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
1069 MODULE_LICENSE("GPL");
1070 MODULE_ALIAS("serpent");