2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 #include <linux/linkage.h>
27 #include <asm/frame.h>
29 .file "cast5-avx-x86_64-asm_64.S"
36 /* structure of crypto context */
39 #define rr ((16*4)+16)
47 /**********************************************************************
49 **********************************************************************/
100 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101 movzbl src ## bh, RID1d; \
102 movzbl src ## bl, RID2d; \
104 movl s1(, RID1, 4), dst ## d; \
105 op1 s2(, RID2, 4), dst ## d; \
106 movzbl src ## bh, RID1d; \
107 movzbl src ## bl, RID2d; \
108 interleave_op(il_reg); \
109 op2 s3(, RID1, 4), dst ## d; \
110 op3 s4(, RID2, 4), dst ## d;
112 #define dummy(d) /* do nothing */
114 #define shr_next(reg) \
117 #define F_head(a, x, gi1, gi2, op0) \
119 vpslld RKRF, x, RTMP; \
126 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
128 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
130 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
133 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
138 vpinsrq $1, RFS3, x, x;
140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141 F_head(b1, RX, RGI1, RGI2, op0); \
142 F_head(b2, RX, RGI3, RGI4, op0); \
144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
150 #define F1_2(a1, b1, a2, b2) \
151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152 #define F2_2(a1, b1, a2, b2) \
153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154 #define F3_2(a1, b1, a2, b2) \
155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
157 #define subround(a1, b1, a2, b2, f) \
158 F ## f ## _2(a1, b1, a2, b2);
160 #define round(l, r, n, f) \
161 vbroadcastss (km+(4*n))(CTX), RKM; \
162 vpand R1ST, RKR, RKRF; \
163 vpsubq RKRF, R32, RKRR; \
164 vpsrldq $1, RKR, RKR; \
165 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
166 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
168 #define enc_preload_rkr() \
169 vbroadcastss .L16_mask, RKR; \
170 /* add 16-bit rotation to key rotations (mod 32) */ \
171 vpxor kr(CTX), RKR, RKR;
173 #define dec_preload_rkr() \
174 vbroadcastss .L16_mask, RKR; \
175 /* add 16-bit rotation to key rotations (mod 32) */ \
176 vpxor kr(CTX), RKR, RKR; \
177 vpshufb .Lbswap128_mask, RKR, RKR;
179 #define transpose_2x4(x0, x1, t0, t1) \
180 vpunpckldq x1, x0, t0; \
181 vpunpckhdq x1, x0, t1; \
183 vpunpcklqdq t1, t0, x0; \
184 vpunpckhqdq t1, t0, x1;
186 #define inpack_blocks(x0, x1, t0, t1, rmask) \
187 vpshufb rmask, x0, x0; \
188 vpshufb rmask, x1, x1; \
190 transpose_2x4(x0, x1, t0, t1)
192 #define outunpack_blocks(x0, x1, t0, t1, rmask) \
193 transpose_2x4(x0, x1, t0, t1) \
195 vpshufb rmask, x0, x0; \
196 vpshufb rmask, x1, x1;
198 .section .rodata.cst16.bswap_mask, "aM", @progbits, 16
201 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
202 .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
205 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
206 .section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
209 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
211 .section .rodata.cst4.16_mask, "aM", @progbits, 4
215 .section .rodata.cst4.32_mask, "aM", @progbits, 4
219 .section .rodata.cst4.first_mask, "aM", @progbits, 4
230 * RL1: blocks 1 and 2
231 * RR1: blocks 3 and 4
232 * RL2: blocks 5 and 6
233 * RR2: blocks 7 and 8
234 * RL3: blocks 9 and 10
235 * RR3: blocks 11 and 12
236 * RL4: blocks 13 and 14
237 * RR4: blocks 15 and 16
239 * RL1: encrypted blocks 1 and 2
240 * RR1: encrypted blocks 3 and 4
241 * RL2: encrypted blocks 5 and 6
242 * RR2: encrypted blocks 7 and 8
243 * RL3: encrypted blocks 9 and 10
244 * RR3: encrypted blocks 11 and 12
245 * RL4: encrypted blocks 13 and 14
246 * RR4: encrypted blocks 15 and 16
252 vmovdqa .Lbswap_mask, RKM;
253 vmovd .Lfirst_mask, R1ST;
254 vmovd .L32_mask, R32;
257 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
258 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
259 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
260 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
272 round(RL, RR, 10, 2);
273 round(RR, RL, 11, 3);
275 movzbl rr(CTX), %eax;
279 round(RL, RR, 12, 1);
280 round(RR, RL, 13, 2);
281 round(RL, RR, 14, 3);
282 round(RR, RL, 15, 1);
288 vmovdqa .Lbswap_mask, RKM;
290 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
291 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
292 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
293 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
296 ENDPROC(__cast5_enc_blk16)
302 * RL1: encrypted blocks 1 and 2
303 * RR1: encrypted blocks 3 and 4
304 * RL2: encrypted blocks 5 and 6
305 * RR2: encrypted blocks 7 and 8
306 * RL3: encrypted blocks 9 and 10
307 * RR3: encrypted blocks 11 and 12
308 * RL4: encrypted blocks 13 and 14
309 * RR4: encrypted blocks 15 and 16
311 * RL1: decrypted blocks 1 and 2
312 * RR1: decrypted blocks 3 and 4
313 * RL2: decrypted blocks 5 and 6
314 * RR2: decrypted blocks 7 and 8
315 * RL3: decrypted blocks 9 and 10
316 * RR3: decrypted blocks 11 and 12
317 * RL4: decrypted blocks 13 and 14
318 * RR4: decrypted blocks 15 and 16
324 vmovdqa .Lbswap_mask, RKM;
325 vmovd .Lfirst_mask, R1ST;
326 vmovd .L32_mask, R32;
329 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
330 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
331 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
332 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
334 movzbl rr(CTX), %eax;
338 round(RL, RR, 15, 1);
339 round(RR, RL, 14, 3);
340 round(RL, RR, 13, 2);
341 round(RR, RL, 12, 1);
344 round(RL, RR, 11, 3);
345 round(RR, RL, 10, 2);
357 vmovdqa .Lbswap_mask, RKM;
361 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
362 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
363 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
364 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
369 vpsrldq $4, RKR, RKR;
371 ENDPROC(__cast5_dec_blk16)
373 ENTRY(cast5_ecb_enc_16way)
383 vmovdqu (0*4*4)(%rdx), RL1;
384 vmovdqu (1*4*4)(%rdx), RR1;
385 vmovdqu (2*4*4)(%rdx), RL2;
386 vmovdqu (3*4*4)(%rdx), RR2;
387 vmovdqu (4*4*4)(%rdx), RL3;
388 vmovdqu (5*4*4)(%rdx), RR3;
389 vmovdqu (6*4*4)(%rdx), RL4;
390 vmovdqu (7*4*4)(%rdx), RR4;
392 call __cast5_enc_blk16;
394 vmovdqu RR1, (0*4*4)(%r11);
395 vmovdqu RL1, (1*4*4)(%r11);
396 vmovdqu RR2, (2*4*4)(%r11);
397 vmovdqu RL2, (3*4*4)(%r11);
398 vmovdqu RR3, (4*4*4)(%r11);
399 vmovdqu RL3, (5*4*4)(%r11);
400 vmovdqu RR4, (6*4*4)(%r11);
401 vmovdqu RL4, (7*4*4)(%r11);
405 ENDPROC(cast5_ecb_enc_16way)
407 ENTRY(cast5_ecb_dec_16way)
417 vmovdqu (0*4*4)(%rdx), RL1;
418 vmovdqu (1*4*4)(%rdx), RR1;
419 vmovdqu (2*4*4)(%rdx), RL2;
420 vmovdqu (3*4*4)(%rdx), RR2;
421 vmovdqu (4*4*4)(%rdx), RL3;
422 vmovdqu (5*4*4)(%rdx), RR3;
423 vmovdqu (6*4*4)(%rdx), RL4;
424 vmovdqu (7*4*4)(%rdx), RR4;
426 call __cast5_dec_blk16;
428 vmovdqu RR1, (0*4*4)(%r11);
429 vmovdqu RL1, (1*4*4)(%r11);
430 vmovdqu RR2, (2*4*4)(%r11);
431 vmovdqu RL2, (3*4*4)(%r11);
432 vmovdqu RR3, (4*4*4)(%r11);
433 vmovdqu RL3, (5*4*4)(%r11);
434 vmovdqu RR4, (6*4*4)(%r11);
435 vmovdqu RL4, (7*4*4)(%r11);
439 ENDPROC(cast5_ecb_dec_16way)
441 ENTRY(cast5_cbc_dec_16way)
454 vmovdqu (0*16)(%rdx), RL1;
455 vmovdqu (1*16)(%rdx), RR1;
456 vmovdqu (2*16)(%rdx), RL2;
457 vmovdqu (3*16)(%rdx), RR2;
458 vmovdqu (4*16)(%rdx), RL3;
459 vmovdqu (5*16)(%rdx), RR3;
460 vmovdqu (6*16)(%rdx), RL4;
461 vmovdqu (7*16)(%rdx), RR4;
463 call __cast5_dec_blk16;
467 vpshufd $0x4f, RX, RX;
469 vpxor 0*16+8(%r12), RL1, RL1;
470 vpxor 1*16+8(%r12), RR2, RR2;
471 vpxor 2*16+8(%r12), RL2, RL2;
472 vpxor 3*16+8(%r12), RR3, RR3;
473 vpxor 4*16+8(%r12), RL3, RL3;
474 vpxor 5*16+8(%r12), RR4, RR4;
475 vpxor 6*16+8(%r12), RL4, RL4;
477 vmovdqu RR1, (0*16)(%r11);
478 vmovdqu RL1, (1*16)(%r11);
479 vmovdqu RR2, (2*16)(%r11);
480 vmovdqu RL2, (3*16)(%r11);
481 vmovdqu RR3, (4*16)(%r11);
482 vmovdqu RL3, (5*16)(%r11);
483 vmovdqu RR4, (6*16)(%r11);
484 vmovdqu RL4, (7*16)(%r11);
490 ENDPROC(cast5_cbc_dec_16way)
492 ENTRY(cast5_ctr_16way)
497 * %rcx: iv (big endian, 64bit)
506 vpcmpeqd RTMP, RTMP, RTMP;
507 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
509 vpcmpeqd RKR, RKR, RKR;
510 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
511 vmovdqa .Lbswap_iv_mask, R1ST;
512 vmovdqa .Lbswap128_mask, RKM;
514 /* load IV and byteswap */
516 vpshufb R1ST, RX, RX;
519 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
520 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
522 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
524 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
526 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
528 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
530 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
532 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
534 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
537 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
538 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
541 call __cast5_enc_blk16;
544 vpxor (0*16)(%r12), RR1, RR1;
545 vpxor (1*16)(%r12), RL1, RL1;
546 vpxor (2*16)(%r12), RR2, RR2;
547 vpxor (3*16)(%r12), RL2, RL2;
548 vpxor (4*16)(%r12), RR3, RR3;
549 vpxor (5*16)(%r12), RL3, RL3;
550 vpxor (6*16)(%r12), RR4, RR4;
551 vpxor (7*16)(%r12), RL4, RL4;
552 vmovdqu RR1, (0*16)(%r11);
553 vmovdqu RL1, (1*16)(%r11);
554 vmovdqu RR2, (2*16)(%r11);
555 vmovdqu RL2, (3*16)(%r11);
556 vmovdqu RR3, (4*16)(%r11);
557 vmovdqu RL3, (5*16)(%r11);
558 vmovdqu RR4, (6*16)(%r11);
559 vmovdqu RL4, (7*16)(%r11);
565 ENDPROC(cast5_ctr_16way)