2 * Serpent Cipher 4-way parallel algorithm (i586/SSE2)
4 * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * Based on crypto/serpent.c by
7 * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
8 * 2003 Herbert Valerio Riedel <hvr@gnu.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
27 .file "serpent-sse2-i586-asm_32.S"
35 /**********************************************************************
37 **********************************************************************/
51 #define get_key(i, j, t) \
52 movd (4*(i)+(j))*4(CTX), t; \
55 #define K(x0, x1, x2, x3, x4, i) \
65 #define LK(x0, x1, x2, x3, x4, i) \
68 psrld $(32 - 13), x4; \
73 psrld $(32 - 3), x4; \
78 psrld $(32 - 1), x4; \
86 psrld $(32 - 7), x4; \
100 psrld $(32 - 5), x4; \
104 psrld $(32 - 22), x4; \
106 get_key(i, 0, RT0); \
108 get_key(i, 2, RT0); \
111 #define KL(x0, x1, x2, x3, x4, i) \
112 K(x0, x1, x2, x3, x4, i); \
115 pslld $(32 - 5), x4; \
119 pslld $(32 - 22), x4; \
129 pslld $(32 - 1), x4; \
133 pslld $(32 - 7), x4; \
141 pslld $(32 - 13), x4; \
147 pslld $(32 - 3), x4; \
150 #define S0(x0, x1, x2, x3, x4) \
169 #define S1(x0, x1, x2, x3, x4) \
189 #define S2(x0, x1, x2, x3, x4) \
208 #define S3(x0, x1, x2, x3, x4) \
229 #define S4(x0, x1, x2, x3, x4) \
248 #define S5(x0, x1, x2, x3, x4) \
267 #define S6(x0, x1, x2, x3, x4) \
286 #define S7(x0, x1, x2, x3, x4) \
307 #define SI0(x0, x1, x2, x3, x4) \
326 #define SI1(x0, x1, x2, x3, x4) \
345 #define SI2(x0, x1, x2, x3, x4) \
364 #define SI3(x0, x1, x2, x3, x4) \
384 #define SI4(x0, x1, x2, x3, x4) \
404 #define SI5(x0, x1, x2, x3, x4) \
426 #define SI6(x0, x1, x2, x3, x4) \
445 #define SI7(x0, x1, x2, x3, x4) \
466 #define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
484 #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
485 movdqu (0*4*4)(in), x0; \
486 movdqu (1*4*4)(in), x1; \
487 movdqu (2*4*4)(in), x2; \
488 movdqu (3*4*4)(in), x3; \
490 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
492 #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
493 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
495 movdqu x0, (0*4*4)(out); \
496 movdqu x1, (1*4*4)(out); \
497 movdqu x2, (2*4*4)(out); \
498 movdqu x3, (3*4*4)(out);
500 #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
501 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
503 movdqu (0*4*4)(out), t0; \
505 movdqu x0, (0*4*4)(out); \
506 movdqu (1*4*4)(out), t0; \
508 movdqu x1, (1*4*4)(out); \
509 movdqu (2*4*4)(out), t0; \
511 movdqu x2, (2*4*4)(out); \
512 movdqu (3*4*4)(out), t0; \
514 movdqu x3, (3*4*4)(out);
517 .global __serpent_enc_blk_4way
518 .type __serpent_enc_blk_4way,@function;
520 __serpent_enc_blk_4way:
522 * arg_ctx(%esp): ctx, CTX
525 * arg_xor(%esp): bool, if true: xor output
530 movl arg_ctx(%esp), CTX;
532 movl arg_src(%esp), %eax;
533 read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
535 K(RA, RB, RC, RD, RE, 0);
536 S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1);
537 S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2);
538 S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3);
539 S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4);
540 S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5);
541 S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6);
542 S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7);
543 S7(RD, RB, RA, RE, RC); LK(RC, RA, RE, RD, RB, 8);
544 S0(RC, RA, RE, RD, RB); LK(RE, RA, RD, RC, RB, 9);
545 S1(RE, RA, RD, RC, RB); LK(RB, RD, RC, RE, RA, 10);
546 S2(RB, RD, RC, RE, RA); LK(RA, RD, RB, RE, RC, 11);
547 S3(RA, RD, RB, RE, RC); LK(RE, RC, RD, RA, RB, 12);
548 S4(RE, RC, RD, RA, RB); LK(RC, RD, RA, RB, RE, 13);
549 S5(RC, RD, RA, RB, RE); LK(RE, RC, RD, RB, RA, 14);
550 S6(RE, RC, RD, RB, RA); LK(RD, RA, RC, RB, RE, 15);
551 S7(RD, RA, RC, RB, RE); LK(RE, RC, RB, RD, RA, 16);
552 S0(RE, RC, RB, RD, RA); LK(RB, RC, RD, RE, RA, 17);
553 S1(RB, RC, RD, RE, RA); LK(RA, RD, RE, RB, RC, 18);
554 S2(RA, RD, RE, RB, RC); LK(RC, RD, RA, RB, RE, 19);
555 S3(RC, RD, RA, RB, RE); LK(RB, RE, RD, RC, RA, 20);
556 S4(RB, RE, RD, RC, RA); LK(RE, RD, RC, RA, RB, 21);
557 S5(RE, RD, RC, RA, RB); LK(RB, RE, RD, RA, RC, 22);
558 S6(RB, RE, RD, RA, RC); LK(RD, RC, RE, RA, RB, 23);
559 S7(RD, RC, RE, RA, RB); LK(RB, RE, RA, RD, RC, 24);
560 S0(RB, RE, RA, RD, RC); LK(RA, RE, RD, RB, RC, 25);
561 S1(RA, RE, RD, RB, RC); LK(RC, RD, RB, RA, RE, 26);
562 S2(RC, RD, RB, RA, RE); LK(RE, RD, RC, RA, RB, 27);
563 S3(RE, RD, RC, RA, RB); LK(RA, RB, RD, RE, RC, 28);
564 S4(RA, RB, RD, RE, RC); LK(RB, RD, RE, RC, RA, 29);
565 S5(RB, RD, RE, RC, RA); LK(RA, RB, RD, RC, RE, 30);
566 S6(RA, RB, RD, RC, RE); LK(RD, RE, RB, RC, RA, 31);
567 S7(RD, RE, RB, RC, RA); K(RA, RB, RC, RD, RE, 32);
569 movl arg_dst(%esp), %eax;
571 cmpb $0, arg_xor(%esp);
574 write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
579 xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
584 .global serpent_dec_blk_4way
585 .type serpent_dec_blk_4way,@function;
587 serpent_dec_blk_4way:
589 * arg_ctx(%esp): ctx, CTX
596 movl arg_ctx(%esp), CTX;
598 movl arg_src(%esp), %eax;
599 read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
601 K(RA, RB, RC, RD, RE, 32);
602 SI7(RA, RB, RC, RD, RE); KL(RB, RD, RA, RE, RC, 31);
603 SI6(RB, RD, RA, RE, RC); KL(RA, RC, RE, RB, RD, 30);
604 SI5(RA, RC, RE, RB, RD); KL(RC, RD, RA, RE, RB, 29);
605 SI4(RC, RD, RA, RE, RB); KL(RC, RA, RB, RE, RD, 28);
606 SI3(RC, RA, RB, RE, RD); KL(RB, RC, RD, RE, RA, 27);
607 SI2(RB, RC, RD, RE, RA); KL(RC, RA, RE, RD, RB, 26);
608 SI1(RC, RA, RE, RD, RB); KL(RB, RA, RE, RD, RC, 25);
609 SI0(RB, RA, RE, RD, RC); KL(RE, RC, RA, RB, RD, 24);
610 SI7(RE, RC, RA, RB, RD); KL(RC, RB, RE, RD, RA, 23);
611 SI6(RC, RB, RE, RD, RA); KL(RE, RA, RD, RC, RB, 22);
612 SI5(RE, RA, RD, RC, RB); KL(RA, RB, RE, RD, RC, 21);
613 SI4(RA, RB, RE, RD, RC); KL(RA, RE, RC, RD, RB, 20);
614 SI3(RA, RE, RC, RD, RB); KL(RC, RA, RB, RD, RE, 19);
615 SI2(RC, RA, RB, RD, RE); KL(RA, RE, RD, RB, RC, 18);
616 SI1(RA, RE, RD, RB, RC); KL(RC, RE, RD, RB, RA, 17);
617 SI0(RC, RE, RD, RB, RA); KL(RD, RA, RE, RC, RB, 16);
618 SI7(RD, RA, RE, RC, RB); KL(RA, RC, RD, RB, RE, 15);
619 SI6(RA, RC, RD, RB, RE); KL(RD, RE, RB, RA, RC, 14);
620 SI5(RD, RE, RB, RA, RC); KL(RE, RC, RD, RB, RA, 13);
621 SI4(RE, RC, RD, RB, RA); KL(RE, RD, RA, RB, RC, 12);
622 SI3(RE, RD, RA, RB, RC); KL(RA, RE, RC, RB, RD, 11);
623 SI2(RA, RE, RC, RB, RD); KL(RE, RD, RB, RC, RA, 10);
624 SI1(RE, RD, RB, RC, RA); KL(RA, RD, RB, RC, RE, 9);
625 SI0(RA, RD, RB, RC, RE); KL(RB, RE, RD, RA, RC, 8);
626 SI7(RB, RE, RD, RA, RC); KL(RE, RA, RB, RC, RD, 7);
627 SI6(RE, RA, RB, RC, RD); KL(RB, RD, RC, RE, RA, 6);
628 SI5(RB, RD, RC, RE, RA); KL(RD, RA, RB, RC, RE, 5);
629 SI4(RD, RA, RB, RC, RE); KL(RD, RB, RE, RC, RA, 4);
630 SI3(RD, RB, RE, RC, RA); KL(RE, RD, RA, RC, RB, 3);
631 SI2(RE, RD, RA, RC, RB); KL(RD, RB, RC, RA, RE, 2);
632 SI1(RD, RB, RC, RA, RE); KL(RE, RB, RC, RA, RD, 1);
633 SI0(RE, RB, RC, RA, RD); K(RC, RD, RB, RE, RA, 0);
635 movl arg_dst(%esp), %eax;
636 write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);