1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
5 * Copyright (C) 2012 Johannes Goetzfried
6 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
8 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
11 #include <linux/linkage.h>
12 #include <asm/frame.h>
14 .file "cast5-avx-x86_64-asm_64.S"
21 /* structure of crypto context */
24 #define rr ((16*4)+16)
32 /**********************************************************************
34 **********************************************************************/
85 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
86 movzbl src ## bh, RID1d; \
87 leaq s1(%rip), RID2; \
88 movl (RID2,RID1,4), dst ## d; \
89 movzbl src ## bl, RID2d; \
90 leaq s2(%rip), RID1; \
91 op1 (RID1,RID2,4), dst ## d; \
93 movzbl src ## bh, RID1d; \
94 leaq s3(%rip), RID2; \
95 op2 (RID2,RID1,4), dst ## d; \
96 movzbl src ## bl, RID2d; \
97 interleave_op(il_reg); \
98 leaq s4(%rip), RID1; \
99 op3 (RID1,RID2,4), dst ## d;
101 #define dummy(d) /* do nothing */
103 #define shr_next(reg) \
106 #define F_head(a, x, gi1, gi2, op0) \
108 vpslld RKRF, x, RTMP; \
115 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
116 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
117 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
119 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
122 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
127 vpinsrq $1, RFS3, x, x;
129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
130 F_head(b1, RX, RGI1, RGI2, op0); \
131 F_head(b2, RX, RGI3, RGI4, op0); \
133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
134 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
139 #define F1_2(a1, b1, a2, b2) \
140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
141 #define F2_2(a1, b1, a2, b2) \
142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
143 #define F3_2(a1, b1, a2, b2) \
144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
146 #define subround(a1, b1, a2, b2, f) \
147 F ## f ## _2(a1, b1, a2, b2);
149 #define round(l, r, n, f) \
150 vbroadcastss (km+(4*n))(CTX), RKM; \
151 vpand R1ST, RKR, RKRF; \
152 vpsubq RKRF, R32, RKRR; \
153 vpsrldq $1, RKR, RKR; \
154 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
155 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
157 #define enc_preload_rkr() \
158 vbroadcastss .L16_mask(%rip), RKR; \
159 /* add 16-bit rotation to key rotations (mod 32) */ \
160 vpxor kr(CTX), RKR, RKR;
162 #define dec_preload_rkr() \
163 vbroadcastss .L16_mask(%rip), RKR; \
164 /* add 16-bit rotation to key rotations (mod 32) */ \
165 vpxor kr(CTX), RKR, RKR; \
166 vpshufb .Lbswap128_mask(%rip), RKR, RKR;
168 #define transpose_2x4(x0, x1, t0, t1) \
169 vpunpckldq x1, x0, t0; \
170 vpunpckhdq x1, x0, t1; \
172 vpunpcklqdq t1, t0, x0; \
173 vpunpckhqdq t1, t0, x1;
175 #define inpack_blocks(x0, x1, t0, t1, rmask) \
176 vpshufb rmask, x0, x0; \
177 vpshufb rmask, x1, x1; \
179 transpose_2x4(x0, x1, t0, t1)
181 #define outunpack_blocks(x0, x1, t0, t1, rmask) \
182 transpose_2x4(x0, x1, t0, t1) \
184 vpshufb rmask, x0, x0; \
185 vpshufb rmask, x1, x1;
187 .section .rodata.cst16.bswap_mask, "aM", @progbits, 16
190 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
191 .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
194 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
195 .section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
198 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
200 .section .rodata.cst4.16_mask, "aM", @progbits, 4
204 .section .rodata.cst4.32_mask, "aM", @progbits, 4
208 .section .rodata.cst4.first_mask, "aM", @progbits, 4
215 SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
218 * RL1: blocks 1 and 2
219 * RR1: blocks 3 and 4
220 * RL2: blocks 5 and 6
221 * RR2: blocks 7 and 8
222 * RL3: blocks 9 and 10
223 * RR3: blocks 11 and 12
224 * RL4: blocks 13 and 14
225 * RR4: blocks 15 and 16
227 * RL1: encrypted blocks 1 and 2
228 * RR1: encrypted blocks 3 and 4
229 * RL2: encrypted blocks 5 and 6
230 * RR2: encrypted blocks 7 and 8
231 * RL3: encrypted blocks 9 and 10
232 * RR3: encrypted blocks 11 and 12
233 * RL4: encrypted blocks 13 and 14
234 * RR4: encrypted blocks 15 and 16
242 vmovdqa .Lbswap_mask(%rip), RKM;
243 vmovd .Lfirst_mask(%rip), R1ST;
244 vmovd .L32_mask(%rip), R32;
247 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
248 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
249 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
250 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
262 round(RL, RR, 10, 2);
263 round(RR, RL, 11, 3);
265 movzbl rr(CTX), %eax;
269 round(RL, RR, 12, 1);
270 round(RR, RL, 13, 2);
271 round(RL, RR, 14, 3);
272 round(RR, RL, 15, 1);
278 vmovdqa .Lbswap_mask(%rip), RKM;
280 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
281 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
282 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
283 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
286 SYM_FUNC_END(__cast5_enc_blk16)
288 SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
291 * RL1: encrypted blocks 1 and 2
292 * RR1: encrypted blocks 3 and 4
293 * RL2: encrypted blocks 5 and 6
294 * RR2: encrypted blocks 7 and 8
295 * RL3: encrypted blocks 9 and 10
296 * RR3: encrypted blocks 11 and 12
297 * RL4: encrypted blocks 13 and 14
298 * RR4: encrypted blocks 15 and 16
300 * RL1: decrypted blocks 1 and 2
301 * RR1: decrypted blocks 3 and 4
302 * RL2: decrypted blocks 5 and 6
303 * RR2: decrypted blocks 7 and 8
304 * RL3: decrypted blocks 9 and 10
305 * RR3: decrypted blocks 11 and 12
306 * RL4: decrypted blocks 13 and 14
307 * RR4: decrypted blocks 15 and 16
315 vmovdqa .Lbswap_mask(%rip), RKM;
316 vmovd .Lfirst_mask(%rip), R1ST;
317 vmovd .L32_mask(%rip), R32;
320 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
321 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
322 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
323 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
325 movzbl rr(CTX), %eax;
329 round(RL, RR, 15, 1);
330 round(RR, RL, 14, 3);
331 round(RL, RR, 13, 2);
332 round(RR, RL, 12, 1);
335 round(RL, RR, 11, 3);
336 round(RR, RL, 10, 2);
348 vmovdqa .Lbswap_mask(%rip), RKM;
352 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
353 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
354 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
355 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
360 vpsrldq $4, RKR, RKR;
362 SYM_FUNC_END(__cast5_dec_blk16)
364 SYM_FUNC_START(cast5_ecb_enc_16way)
376 vmovdqu (0*4*4)(%rdx), RL1;
377 vmovdqu (1*4*4)(%rdx), RR1;
378 vmovdqu (2*4*4)(%rdx), RL2;
379 vmovdqu (3*4*4)(%rdx), RR2;
380 vmovdqu (4*4*4)(%rdx), RL3;
381 vmovdqu (5*4*4)(%rdx), RR3;
382 vmovdqu (6*4*4)(%rdx), RL4;
383 vmovdqu (7*4*4)(%rdx), RR4;
385 call __cast5_enc_blk16;
387 vmovdqu RR1, (0*4*4)(%r11);
388 vmovdqu RL1, (1*4*4)(%r11);
389 vmovdqu RR2, (2*4*4)(%r11);
390 vmovdqu RL2, (3*4*4)(%r11);
391 vmovdqu RR3, (4*4*4)(%r11);
392 vmovdqu RL3, (5*4*4)(%r11);
393 vmovdqu RR4, (6*4*4)(%r11);
394 vmovdqu RL4, (7*4*4)(%r11);
399 SYM_FUNC_END(cast5_ecb_enc_16way)
401 SYM_FUNC_START(cast5_ecb_dec_16way)
414 vmovdqu (0*4*4)(%rdx), RL1;
415 vmovdqu (1*4*4)(%rdx), RR1;
416 vmovdqu (2*4*4)(%rdx), RL2;
417 vmovdqu (3*4*4)(%rdx), RR2;
418 vmovdqu (4*4*4)(%rdx), RL3;
419 vmovdqu (5*4*4)(%rdx), RR3;
420 vmovdqu (6*4*4)(%rdx), RL4;
421 vmovdqu (7*4*4)(%rdx), RR4;
423 call __cast5_dec_blk16;
425 vmovdqu RR1, (0*4*4)(%r11);
426 vmovdqu RL1, (1*4*4)(%r11);
427 vmovdqu RR2, (2*4*4)(%r11);
428 vmovdqu RL2, (3*4*4)(%r11);
429 vmovdqu RR3, (4*4*4)(%r11);
430 vmovdqu RL3, (5*4*4)(%r11);
431 vmovdqu RR4, (6*4*4)(%r11);
432 vmovdqu RL4, (7*4*4)(%r11);
437 SYM_FUNC_END(cast5_ecb_dec_16way)
439 SYM_FUNC_START(cast5_cbc_dec_16way)
453 vmovdqu (0*16)(%rdx), RL1;
454 vmovdqu (1*16)(%rdx), RR1;
455 vmovdqu (2*16)(%rdx), RL2;
456 vmovdqu (3*16)(%rdx), RR2;
457 vmovdqu (4*16)(%rdx), RL3;
458 vmovdqu (5*16)(%rdx), RR3;
459 vmovdqu (6*16)(%rdx), RL4;
460 vmovdqu (7*16)(%rdx), RR4;
462 call __cast5_dec_blk16;
466 vpshufd $0x4f, RX, RX;
468 vpxor 0*16+8(%r12), RL1, RL1;
469 vpxor 1*16+8(%r12), RR2, RR2;
470 vpxor 2*16+8(%r12), RL2, RL2;
471 vpxor 3*16+8(%r12), RR3, RR3;
472 vpxor 4*16+8(%r12), RL3, RL3;
473 vpxor 5*16+8(%r12), RR4, RR4;
474 vpxor 6*16+8(%r12), RL4, RL4;
476 vmovdqu RR1, (0*16)(%r11);
477 vmovdqu RL1, (1*16)(%r11);
478 vmovdqu RR2, (2*16)(%r11);
479 vmovdqu RL2, (3*16)(%r11);
480 vmovdqu RR3, (4*16)(%r11);
481 vmovdqu RL3, (5*16)(%r11);
482 vmovdqu RR4, (6*16)(%r11);
483 vmovdqu RL4, (7*16)(%r11);
489 SYM_FUNC_END(cast5_cbc_dec_16way)
491 SYM_FUNC_START(cast5_ctr_16way)
496 * %rcx: iv (big endian, 64bit)
506 vpcmpeqd RTMP, RTMP, RTMP;
507 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
509 vpcmpeqd RKR, RKR, RKR;
510 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
511 vmovdqa .Lbswap_iv_mask(%rip), R1ST;
512 vmovdqa .Lbswap128_mask(%rip), RKM;
514 /* load IV and byteswap */
516 vpshufb R1ST, RX, RX;
519 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
520 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
522 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
524 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
526 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
528 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
530 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
532 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
534 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
537 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
538 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
541 call __cast5_enc_blk16;
544 vpxor (0*16)(%r12), RR1, RR1;
545 vpxor (1*16)(%r12), RL1, RL1;
546 vpxor (2*16)(%r12), RR2, RR2;
547 vpxor (3*16)(%r12), RL2, RL2;
548 vpxor (4*16)(%r12), RR3, RR3;
549 vpxor (5*16)(%r12), RL3, RL3;
550 vpxor (6*16)(%r12), RR4, RR4;
551 vpxor (7*16)(%r12), RL4, RL4;
552 vmovdqu RR1, (0*16)(%r11);
553 vmovdqu RL1, (1*16)(%r11);
554 vmovdqu RR2, (2*16)(%r11);
555 vmovdqu RL2, (3*16)(%r11);
556 vmovdqu RR3, (4*16)(%r11);
557 vmovdqu RL3, (5*16)(%r11);
558 vmovdqu RR4, (6*16)(%r11);
559 vmovdqu RL4, (7*16)(%r11);
565 SYM_FUNC_END(cast5_ctr_16way)