2 * Serpent Cipher 4-way parallel algorithm (i586/SSE2)
4 * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * Based on crypto/serpent.c by
7 * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
8 * 2003 Herbert Valerio Riedel <hvr@gnu.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
27 .file "serpent-sse2-i586-asm_32.S"
35 /**********************************************************************
37 **********************************************************************/
51 #define get_key(i, j, t) \
52 movd (4*(i)+(j))*4(CTX), t; \
55 #define K(x0, x1, x2, x3, x4, i) \
65 #define LK(x0, x1, x2, x3, x4, i) \
68 psrld $(32 - 13), x4; \
73 psrld $(32 - 3), x4; \
78 psrld $(32 - 1), x4; \
86 psrld $(32 - 7), x4; \
100 psrld $(32 - 5), x4; \
104 psrld $(32 - 22), x4; \
106 get_key(i, 0, RT0); \
108 get_key(i, 2, RT0); \
111 #define KL(x0, x1, x2, x3, x4, i) \
112 K(x0, x1, x2, x3, x4, i); \
115 pslld $(32 - 5), x4; \
119 pslld $(32 - 22), x4; \
129 pslld $(32 - 1), x4; \
133 pslld $(32 - 7), x4; \
141 pslld $(32 - 13), x4; \
147 pslld $(32 - 3), x4; \
150 #define S0(x0, x1, x2, x3, x4) \
169 #define S1(x0, x1, x2, x3, x4) \
189 #define S2(x0, x1, x2, x3, x4) \
208 #define S3(x0, x1, x2, x3, x4) \
229 #define S4(x0, x1, x2, x3, x4) \
248 #define S5(x0, x1, x2, x3, x4) \
267 #define S6(x0, x1, x2, x3, x4) \
286 #define S7(x0, x1, x2, x3, x4) \
307 #define SI0(x0, x1, x2, x3, x4) \
326 #define SI1(x0, x1, x2, x3, x4) \
345 #define SI2(x0, x1, x2, x3, x4) \
364 #define SI3(x0, x1, x2, x3, x4) \
384 #define SI4(x0, x1, x2, x3, x4) \
404 #define SI5(x0, x1, x2, x3, x4) \
426 #define SI6(x0, x1, x2, x3, x4) \
445 #define SI7(x0, x1, x2, x3, x4) \
466 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
481 #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
482 movdqu (0*4*4)(in), x0; \
483 movdqu (1*4*4)(in), x1; \
484 movdqu (2*4*4)(in), x2; \
485 movdqu (3*4*4)(in), x3; \
487 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
489 #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
490 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
492 movdqu x0, (0*4*4)(out); \
493 movdqu x1, (1*4*4)(out); \
494 movdqu x2, (2*4*4)(out); \
495 movdqu x3, (3*4*4)(out);
497 #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
498 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
500 movdqu (0*4*4)(out), t0; \
502 movdqu x0, (0*4*4)(out); \
503 movdqu (1*4*4)(out), t0; \
505 movdqu x1, (1*4*4)(out); \
506 movdqu (2*4*4)(out), t0; \
508 movdqu x2, (2*4*4)(out); \
509 movdqu (3*4*4)(out), t0; \
511 movdqu x3, (3*4*4)(out);
514 .global __serpent_enc_blk_4way
515 .type __serpent_enc_blk_4way,@function;
517 __serpent_enc_blk_4way:
519 * arg_ctx(%esp): ctx, CTX
522 * arg_xor(%esp): bool, if true: xor output
527 movl arg_ctx(%esp), CTX;
529 movl arg_src(%esp), %eax;
530 read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
532 K(RA, RB, RC, RD, RE, 0);
533 S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1);
534 S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2);
535 S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3);
536 S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4);
537 S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5);
538 S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6);
539 S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7);
540 S7(RD, RB, RA, RE, RC); LK(RC, RA, RE, RD, RB, 8);
541 S0(RC, RA, RE, RD, RB); LK(RE, RA, RD, RC, RB, 9);
542 S1(RE, RA, RD, RC, RB); LK(RB, RD, RC, RE, RA, 10);
543 S2(RB, RD, RC, RE, RA); LK(RA, RD, RB, RE, RC, 11);
544 S3(RA, RD, RB, RE, RC); LK(RE, RC, RD, RA, RB, 12);
545 S4(RE, RC, RD, RA, RB); LK(RC, RD, RA, RB, RE, 13);
546 S5(RC, RD, RA, RB, RE); LK(RE, RC, RD, RB, RA, 14);
547 S6(RE, RC, RD, RB, RA); LK(RD, RA, RC, RB, RE, 15);
548 S7(RD, RA, RC, RB, RE); LK(RE, RC, RB, RD, RA, 16);
549 S0(RE, RC, RB, RD, RA); LK(RB, RC, RD, RE, RA, 17);
550 S1(RB, RC, RD, RE, RA); LK(RA, RD, RE, RB, RC, 18);
551 S2(RA, RD, RE, RB, RC); LK(RC, RD, RA, RB, RE, 19);
552 S3(RC, RD, RA, RB, RE); LK(RB, RE, RD, RC, RA, 20);
553 S4(RB, RE, RD, RC, RA); LK(RE, RD, RC, RA, RB, 21);
554 S5(RE, RD, RC, RA, RB); LK(RB, RE, RD, RA, RC, 22);
555 S6(RB, RE, RD, RA, RC); LK(RD, RC, RE, RA, RB, 23);
556 S7(RD, RC, RE, RA, RB); LK(RB, RE, RA, RD, RC, 24);
557 S0(RB, RE, RA, RD, RC); LK(RA, RE, RD, RB, RC, 25);
558 S1(RA, RE, RD, RB, RC); LK(RC, RD, RB, RA, RE, 26);
559 S2(RC, RD, RB, RA, RE); LK(RE, RD, RC, RA, RB, 27);
560 S3(RE, RD, RC, RA, RB); LK(RA, RB, RD, RE, RC, 28);
561 S4(RA, RB, RD, RE, RC); LK(RB, RD, RE, RC, RA, 29);
562 S5(RB, RD, RE, RC, RA); LK(RA, RB, RD, RC, RE, 30);
563 S6(RA, RB, RD, RC, RE); LK(RD, RE, RB, RC, RA, 31);
564 S7(RD, RE, RB, RC, RA); K(RA, RB, RC, RD, RE, 32);
566 movl arg_dst(%esp), %eax;
568 cmpb $0, arg_xor(%esp);
571 write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
576 xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
581 .global serpent_dec_blk_4way
582 .type serpent_dec_blk_4way,@function;
584 serpent_dec_blk_4way:
586 * arg_ctx(%esp): ctx, CTX
593 movl arg_ctx(%esp), CTX;
595 movl arg_src(%esp), %eax;
596 read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
598 K(RA, RB, RC, RD, RE, 32);
599 SI7(RA, RB, RC, RD, RE); KL(RB, RD, RA, RE, RC, 31);
600 SI6(RB, RD, RA, RE, RC); KL(RA, RC, RE, RB, RD, 30);
601 SI5(RA, RC, RE, RB, RD); KL(RC, RD, RA, RE, RB, 29);
602 SI4(RC, RD, RA, RE, RB); KL(RC, RA, RB, RE, RD, 28);
603 SI3(RC, RA, RB, RE, RD); KL(RB, RC, RD, RE, RA, 27);
604 SI2(RB, RC, RD, RE, RA); KL(RC, RA, RE, RD, RB, 26);
605 SI1(RC, RA, RE, RD, RB); KL(RB, RA, RE, RD, RC, 25);
606 SI0(RB, RA, RE, RD, RC); KL(RE, RC, RA, RB, RD, 24);
607 SI7(RE, RC, RA, RB, RD); KL(RC, RB, RE, RD, RA, 23);
608 SI6(RC, RB, RE, RD, RA); KL(RE, RA, RD, RC, RB, 22);
609 SI5(RE, RA, RD, RC, RB); KL(RA, RB, RE, RD, RC, 21);
610 SI4(RA, RB, RE, RD, RC); KL(RA, RE, RC, RD, RB, 20);
611 SI3(RA, RE, RC, RD, RB); KL(RC, RA, RB, RD, RE, 19);
612 SI2(RC, RA, RB, RD, RE); KL(RA, RE, RD, RB, RC, 18);
613 SI1(RA, RE, RD, RB, RC); KL(RC, RE, RD, RB, RA, 17);
614 SI0(RC, RE, RD, RB, RA); KL(RD, RA, RE, RC, RB, 16);
615 SI7(RD, RA, RE, RC, RB); KL(RA, RC, RD, RB, RE, 15);
616 SI6(RA, RC, RD, RB, RE); KL(RD, RE, RB, RA, RC, 14);
617 SI5(RD, RE, RB, RA, RC); KL(RE, RC, RD, RB, RA, 13);
618 SI4(RE, RC, RD, RB, RA); KL(RE, RD, RA, RB, RC, 12);
619 SI3(RE, RD, RA, RB, RC); KL(RA, RE, RC, RB, RD, 11);
620 SI2(RA, RE, RC, RB, RD); KL(RE, RD, RB, RC, RA, 10);
621 SI1(RE, RD, RB, RC, RA); KL(RA, RD, RB, RC, RE, 9);
622 SI0(RA, RD, RB, RC, RE); KL(RB, RE, RD, RA, RC, 8);
623 SI7(RB, RE, RD, RA, RC); KL(RE, RA, RB, RC, RD, 7);
624 SI6(RE, RA, RB, RC, RD); KL(RB, RD, RC, RE, RA, 6);
625 SI5(RB, RD, RC, RE, RA); KL(RD, RA, RB, RC, RE, 5);
626 SI4(RD, RA, RB, RC, RE); KL(RD, RB, RE, RC, RA, 4);
627 SI3(RD, RB, RE, RC, RA); KL(RE, RD, RA, RC, RB, 3);
628 SI2(RE, RD, RA, RC, RB); KL(RD, RB, RC, RA, RE, 2);
629 SI1(RD, RB, RC, RA, RE); KL(RE, RB, RC, RA, RD, 1);
630 SI0(RE, RB, RC, RA, RD); K(RC, RD, RB, RE, RA, 0);
632 movl arg_dst(%esp), %eax;
633 write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);