2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 .file "cast5-avx-x86_64-asm_64.S"
33 /* structure of crypto context */
36 #define rr ((16*4)+16)
44 /**********************************************************************
46 **********************************************************************/
97 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
98 movzbl src ## bh, RID1d; \
99 movzbl src ## bl, RID2d; \
101 movl s1(, RID1, 4), dst ## d; \
102 op1 s2(, RID2, 4), dst ## d; \
103 movzbl src ## bh, RID1d; \
104 movzbl src ## bl, RID2d; \
105 interleave_op(il_reg); \
106 op2 s3(, RID1, 4), dst ## d; \
107 op3 s4(, RID2, 4), dst ## d;
109 #define dummy(d) /* do nothing */
111 #define shr_next(reg) \
114 #define F_head(a, x, gi1, gi2, op0) \
116 vpslld RKRF, x, RTMP; \
123 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
124 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
125 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
127 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
130 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
135 vpinsrq $1, RFS3, x, x;
137 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
138 F_head(b1, RX, RGI1, RGI2, op0); \
139 F_head(b2, RX, RGI3, RGI4, op0); \
141 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
142 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
147 #define F1_2(a1, b1, a2, b2) \
148 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
149 #define F2_2(a1, b1, a2, b2) \
150 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
151 #define F3_2(a1, b1, a2, b2) \
152 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
154 #define subround(a1, b1, a2, b2, f) \
155 F ## f ## _2(a1, b1, a2, b2);
157 #define round(l, r, n, f) \
158 vbroadcastss (km+(4*n))(CTX), RKM; \
159 vpand R1ST, RKR, RKRF; \
160 vpsubq RKRF, R32, RKRR; \
161 vpsrldq $1, RKR, RKR; \
162 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
163 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
165 #define enc_preload_rkr() \
166 vbroadcastss .L16_mask, RKR; \
167 /* add 16-bit rotation to key rotations (mod 32) */ \
168 vpxor kr(CTX), RKR, RKR;
170 #define dec_preload_rkr() \
171 vbroadcastss .L16_mask, RKR; \
172 /* add 16-bit rotation to key rotations (mod 32) */ \
173 vpxor kr(CTX), RKR, RKR; \
174 vpshufb .Lbswap128_mask, RKR, RKR;
176 #define transpose_2x4(x0, x1, t0, t1) \
177 vpunpckldq x1, x0, t0; \
178 vpunpckhdq x1, x0, t1; \
180 vpunpcklqdq t1, t0, x0; \
181 vpunpckhqdq t1, t0, x1;
183 #define inpack_blocks(x0, x1, t0, t1, rmask) \
184 vpshufb rmask, x0, x0; \
185 vpshufb rmask, x1, x1; \
187 transpose_2x4(x0, x1, t0, t1)
189 #define outunpack_blocks(x0, x1, t0, t1, rmask) \
190 transpose_2x4(x0, x1, t0, t1) \
192 vpshufb rmask, x0, x0; \
193 vpshufb rmask, x1, x1;
199 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
201 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
203 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
214 .type __cast5_enc_blk16,@function;
219 * RL1: blocks 1 and 2
220 * RR1: blocks 3 and 4
221 * RL2: blocks 5 and 6
222 * RR2: blocks 7 and 8
223 * RL3: blocks 9 and 10
224 * RR3: blocks 11 and 12
225 * RL4: blocks 13 and 14
226 * RR4: blocks 15 and 16
228 * RL1: encrypted blocks 1 and 2
229 * RR1: encrypted blocks 3 and 4
230 * RL2: encrypted blocks 5 and 6
231 * RR2: encrypted blocks 7 and 8
232 * RL3: encrypted blocks 9 and 10
233 * RR3: encrypted blocks 11 and 12
234 * RL4: encrypted blocks 13 and 14
235 * RR4: encrypted blocks 15 and 16
241 vmovdqa .Lbswap_mask, RKM;
242 vmovd .Lfirst_mask, R1ST;
243 vmovd .L32_mask, R32;
246 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
247 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
248 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
249 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
261 round(RL, RR, 10, 2);
262 round(RR, RL, 11, 3);
264 movzbl rr(CTX), %eax;
268 round(RL, RR, 12, 1);
269 round(RR, RL, 13, 2);
270 round(RL, RR, 14, 3);
271 round(RR, RL, 15, 1);
277 vmovdqa .Lbswap_mask, RKM;
279 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
280 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
281 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
282 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
287 .type __cast5_dec_blk16,@function;
292 * RL1: encrypted blocks 1 and 2
293 * RR1: encrypted blocks 3 and 4
294 * RL2: encrypted blocks 5 and 6
295 * RR2: encrypted blocks 7 and 8
296 * RL3: encrypted blocks 9 and 10
297 * RR3: encrypted blocks 11 and 12
298 * RL4: encrypted blocks 13 and 14
299 * RR4: encrypted blocks 15 and 16
301 * RL1: decrypted blocks 1 and 2
302 * RR1: decrypted blocks 3 and 4
303 * RL2: decrypted blocks 5 and 6
304 * RR2: decrypted blocks 7 and 8
305 * RL3: decrypted blocks 9 and 10
306 * RR3: decrypted blocks 11 and 12
307 * RL4: decrypted blocks 13 and 14
308 * RR4: decrypted blocks 15 and 16
314 vmovdqa .Lbswap_mask, RKM;
315 vmovd .Lfirst_mask, R1ST;
316 vmovd .L32_mask, R32;
319 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
320 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
321 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
322 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
324 movzbl rr(CTX), %eax;
328 round(RL, RR, 15, 1);
329 round(RR, RL, 14, 3);
330 round(RL, RR, 13, 2);
331 round(RR, RL, 12, 1);
334 round(RL, RR, 11, 3);
335 round(RR, RL, 10, 2);
347 vmovdqa .Lbswap_mask, RKM;
351 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
352 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
353 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
354 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
359 vpsrldq $4, RKR, RKR;
363 .global cast5_ecb_enc_16way
364 .type cast5_ecb_enc_16way,@function;
375 vmovdqu (0*4*4)(%rdx), RL1;
376 vmovdqu (1*4*4)(%rdx), RR1;
377 vmovdqu (2*4*4)(%rdx), RL2;
378 vmovdqu (3*4*4)(%rdx), RR2;
379 vmovdqu (4*4*4)(%rdx), RL3;
380 vmovdqu (5*4*4)(%rdx), RR3;
381 vmovdqu (6*4*4)(%rdx), RL4;
382 vmovdqu (7*4*4)(%rdx), RR4;
384 call __cast5_enc_blk16;
386 vmovdqu RR1, (0*4*4)(%r11);
387 vmovdqu RL1, (1*4*4)(%r11);
388 vmovdqu RR2, (2*4*4)(%r11);
389 vmovdqu RL2, (3*4*4)(%r11);
390 vmovdqu RR3, (4*4*4)(%r11);
391 vmovdqu RL3, (5*4*4)(%r11);
392 vmovdqu RR4, (6*4*4)(%r11);
393 vmovdqu RL4, (7*4*4)(%r11);
398 .global cast5_ecb_dec_16way
399 .type cast5_ecb_dec_16way,@function;
410 vmovdqu (0*4*4)(%rdx), RL1;
411 vmovdqu (1*4*4)(%rdx), RR1;
412 vmovdqu (2*4*4)(%rdx), RL2;
413 vmovdqu (3*4*4)(%rdx), RR2;
414 vmovdqu (4*4*4)(%rdx), RL3;
415 vmovdqu (5*4*4)(%rdx), RR3;
416 vmovdqu (6*4*4)(%rdx), RL4;
417 vmovdqu (7*4*4)(%rdx), RR4;
419 call __cast5_dec_blk16;
421 vmovdqu RR1, (0*4*4)(%r11);
422 vmovdqu RL1, (1*4*4)(%r11);
423 vmovdqu RR2, (2*4*4)(%r11);
424 vmovdqu RL2, (3*4*4)(%r11);
425 vmovdqu RR3, (4*4*4)(%r11);
426 vmovdqu RL3, (5*4*4)(%r11);
427 vmovdqu RR4, (6*4*4)(%r11);
428 vmovdqu RL4, (7*4*4)(%r11);
433 .global cast5_cbc_dec_16way
434 .type cast5_cbc_dec_16way,@function;
448 vmovdqu (0*16)(%rdx), RL1;
449 vmovdqu (1*16)(%rdx), RR1;
450 vmovdqu (2*16)(%rdx), RL2;
451 vmovdqu (3*16)(%rdx), RR2;
452 vmovdqu (4*16)(%rdx), RL3;
453 vmovdqu (5*16)(%rdx), RR3;
454 vmovdqu (6*16)(%rdx), RL4;
455 vmovdqu (7*16)(%rdx), RR4;
457 call __cast5_dec_blk16;
461 vpshufd $0x4f, RX, RX;
463 vpxor 0*16+8(%r12), RL1, RL1;
464 vpxor 1*16+8(%r12), RR2, RR2;
465 vpxor 2*16+8(%r12), RL2, RL2;
466 vpxor 3*16+8(%r12), RR3, RR3;
467 vpxor 4*16+8(%r12), RL3, RL3;
468 vpxor 5*16+8(%r12), RR4, RR4;
469 vpxor 6*16+8(%r12), RL4, RL4;
471 vmovdqu RR1, (0*16)(%r11);
472 vmovdqu RL1, (1*16)(%r11);
473 vmovdqu RR2, (2*16)(%r11);
474 vmovdqu RL2, (3*16)(%r11);
475 vmovdqu RR3, (4*16)(%r11);
476 vmovdqu RL3, (5*16)(%r11);
477 vmovdqu RR4, (6*16)(%r11);
478 vmovdqu RL4, (7*16)(%r11);
485 .global cast5_ctr_16way
486 .type cast5_ctr_16way,@function;
493 * %rcx: iv (big endian, 64bit)
501 vpcmpeqd RTMP, RTMP, RTMP;
502 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
504 vpcmpeqd RKR, RKR, RKR;
505 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
506 vmovdqa .Lbswap_iv_mask, R1ST;
507 vmovdqa .Lbswap128_mask, RKM;
509 /* load IV and byteswap */
511 vpshufb R1ST, RX, RX;
514 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
515 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
517 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
519 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
521 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
523 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
525 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
527 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
529 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
532 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
533 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
536 call __cast5_enc_blk16;
539 vpxor (0*16)(%r12), RR1, RR1;
540 vpxor (1*16)(%r12), RL1, RL1;
541 vpxor (2*16)(%r12), RR2, RR2;
542 vpxor (3*16)(%r12), RL2, RL2;
543 vpxor (4*16)(%r12), RR3, RR3;
544 vpxor (5*16)(%r12), RL3, RL3;
545 vpxor (6*16)(%r12), RR4, RR4;
546 vpxor (7*16)(%r12), RL4, RL4;
547 vmovdqu RR1, (0*16)(%r11);
548 vmovdqu RL1, (1*16)(%r11);
549 vmovdqu RR2, (2*16)(%r11);
550 vmovdqu RL2, (3*16)(%r11);
551 vmovdqu RR3, (4*16)(%r11);
552 vmovdqu RL3, (5*16)(%r11);
553 vmovdqu RR4, (6*16)(%r11);
554 vmovdqu RL4, (7*16)(%r11);