1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
5 * Copyright (C) 2012 Johannes Goetzfried
6 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
8 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
11 #include <linux/linkage.h>
12 #include <asm/frame.h>
14 .file "cast5-avx-x86_64-asm_64.S"
21 /* structure of crypto context */
24 #define rr ((16*4)+16)
32 /**********************************************************************
34 **********************************************************************/
85 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
86 movzbl src ## bh, RID1d; \
87 movzbl src ## bl, RID2d; \
89 movl s1(, RID1, 4), dst ## d; \
90 op1 s2(, RID2, 4), dst ## d; \
91 movzbl src ## bh, RID1d; \
92 movzbl src ## bl, RID2d; \
93 interleave_op(il_reg); \
94 op2 s3(, RID1, 4), dst ## d; \
95 op3 s4(, RID2, 4), dst ## d;
97 #define dummy(d) /* do nothing */
99 #define shr_next(reg) \
102 #define F_head(a, x, gi1, gi2, op0) \
104 vpslld RKRF, x, RTMP; \
111 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
112 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
113 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
115 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
118 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
123 vpinsrq $1, RFS3, x, x;
125 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
126 F_head(b1, RX, RGI1, RGI2, op0); \
127 F_head(b2, RX, RGI3, RGI4, op0); \
129 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
130 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
135 #define F1_2(a1, b1, a2, b2) \
136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
137 #define F2_2(a1, b1, a2, b2) \
138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
139 #define F3_2(a1, b1, a2, b2) \
140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
142 #define subround(a1, b1, a2, b2, f) \
143 F ## f ## _2(a1, b1, a2, b2);
145 #define round(l, r, n, f) \
146 vbroadcastss (km+(4*n))(CTX), RKM; \
147 vpand R1ST, RKR, RKRF; \
148 vpsubq RKRF, R32, RKRR; \
149 vpsrldq $1, RKR, RKR; \
150 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
151 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
153 #define enc_preload_rkr() \
154 vbroadcastss .L16_mask, RKR; \
155 /* add 16-bit rotation to key rotations (mod 32) */ \
156 vpxor kr(CTX), RKR, RKR;
158 #define dec_preload_rkr() \
159 vbroadcastss .L16_mask, RKR; \
160 /* add 16-bit rotation to key rotations (mod 32) */ \
161 vpxor kr(CTX), RKR, RKR; \
162 vpshufb .Lbswap128_mask, RKR, RKR;
164 #define transpose_2x4(x0, x1, t0, t1) \
165 vpunpckldq x1, x0, t0; \
166 vpunpckhdq x1, x0, t1; \
168 vpunpcklqdq t1, t0, x0; \
169 vpunpckhqdq t1, t0, x1;
171 #define inpack_blocks(x0, x1, t0, t1, rmask) \
172 vpshufb rmask, x0, x0; \
173 vpshufb rmask, x1, x1; \
175 transpose_2x4(x0, x1, t0, t1)
177 #define outunpack_blocks(x0, x1, t0, t1, rmask) \
178 transpose_2x4(x0, x1, t0, t1) \
180 vpshufb rmask, x0, x0; \
181 vpshufb rmask, x1, x1;
183 .section .rodata.cst16.bswap_mask, "aM", @progbits, 16
186 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
187 .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
190 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
191 .section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
194 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
196 .section .rodata.cst4.16_mask, "aM", @progbits, 4
200 .section .rodata.cst4.32_mask, "aM", @progbits, 4
204 .section .rodata.cst4.first_mask, "aM", @progbits, 4
212 SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
215 * RL1: blocks 1 and 2
216 * RR1: blocks 3 and 4
217 * RL2: blocks 5 and 6
218 * RR2: blocks 7 and 8
219 * RL3: blocks 9 and 10
220 * RR3: blocks 11 and 12
221 * RL4: blocks 13 and 14
222 * RR4: blocks 15 and 16
224 * RL1: encrypted blocks 1 and 2
225 * RR1: encrypted blocks 3 and 4
226 * RL2: encrypted blocks 5 and 6
227 * RR2: encrypted blocks 7 and 8
228 * RL3: encrypted blocks 9 and 10
229 * RR3: encrypted blocks 11 and 12
230 * RL4: encrypted blocks 13 and 14
231 * RR4: encrypted blocks 15 and 16
239 vmovdqa .Lbswap_mask, RKM;
240 vmovd .Lfirst_mask, R1ST;
241 vmovd .L32_mask, R32;
244 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
245 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
246 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
247 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
259 round(RL, RR, 10, 2);
260 round(RR, RL, 11, 3);
262 movzbl rr(CTX), %eax;
266 round(RL, RR, 12, 1);
267 round(RR, RL, 13, 2);
268 round(RL, RR, 14, 3);
269 round(RR, RL, 15, 1);
275 vmovdqa .Lbswap_mask, RKM;
277 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
278 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
279 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
280 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
283 SYM_FUNC_END(__cast5_enc_blk16)
286 SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
289 * RL1: encrypted blocks 1 and 2
290 * RR1: encrypted blocks 3 and 4
291 * RL2: encrypted blocks 5 and 6
292 * RR2: encrypted blocks 7 and 8
293 * RL3: encrypted blocks 9 and 10
294 * RR3: encrypted blocks 11 and 12
295 * RL4: encrypted blocks 13 and 14
296 * RR4: encrypted blocks 15 and 16
298 * RL1: decrypted blocks 1 and 2
299 * RR1: decrypted blocks 3 and 4
300 * RL2: decrypted blocks 5 and 6
301 * RR2: decrypted blocks 7 and 8
302 * RL3: decrypted blocks 9 and 10
303 * RR3: decrypted blocks 11 and 12
304 * RL4: decrypted blocks 13 and 14
305 * RR4: decrypted blocks 15 and 16
313 vmovdqa .Lbswap_mask, RKM;
314 vmovd .Lfirst_mask, R1ST;
315 vmovd .L32_mask, R32;
318 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
319 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
320 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
321 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
323 movzbl rr(CTX), %eax;
327 round(RL, RR, 15, 1);
328 round(RR, RL, 14, 3);
329 round(RL, RR, 13, 2);
330 round(RR, RL, 12, 1);
333 round(RL, RR, 11, 3);
334 round(RR, RL, 10, 2);
346 vmovdqa .Lbswap_mask, RKM;
350 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
351 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
352 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
353 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
358 vpsrldq $4, RKR, RKR;
360 SYM_FUNC_END(__cast5_dec_blk16)
362 SYM_FUNC_START(cast5_ecb_enc_16way)
374 vmovdqu (0*4*4)(%rdx), RL1;
375 vmovdqu (1*4*4)(%rdx), RR1;
376 vmovdqu (2*4*4)(%rdx), RL2;
377 vmovdqu (3*4*4)(%rdx), RR2;
378 vmovdqu (4*4*4)(%rdx), RL3;
379 vmovdqu (5*4*4)(%rdx), RR3;
380 vmovdqu (6*4*4)(%rdx), RL4;
381 vmovdqu (7*4*4)(%rdx), RR4;
383 call __cast5_enc_blk16;
385 vmovdqu RR1, (0*4*4)(%r11);
386 vmovdqu RL1, (1*4*4)(%r11);
387 vmovdqu RR2, (2*4*4)(%r11);
388 vmovdqu RL2, (3*4*4)(%r11);
389 vmovdqu RR3, (4*4*4)(%r11);
390 vmovdqu RL3, (5*4*4)(%r11);
391 vmovdqu RR4, (6*4*4)(%r11);
392 vmovdqu RL4, (7*4*4)(%r11);
397 SYM_FUNC_END(cast5_ecb_enc_16way)
399 SYM_FUNC_START(cast5_ecb_dec_16way)
412 vmovdqu (0*4*4)(%rdx), RL1;
413 vmovdqu (1*4*4)(%rdx), RR1;
414 vmovdqu (2*4*4)(%rdx), RL2;
415 vmovdqu (3*4*4)(%rdx), RR2;
416 vmovdqu (4*4*4)(%rdx), RL3;
417 vmovdqu (5*4*4)(%rdx), RR3;
418 vmovdqu (6*4*4)(%rdx), RL4;
419 vmovdqu (7*4*4)(%rdx), RR4;
421 call __cast5_dec_blk16;
423 vmovdqu RR1, (0*4*4)(%r11);
424 vmovdqu RL1, (1*4*4)(%r11);
425 vmovdqu RR2, (2*4*4)(%r11);
426 vmovdqu RL2, (3*4*4)(%r11);
427 vmovdqu RR3, (4*4*4)(%r11);
428 vmovdqu RL3, (5*4*4)(%r11);
429 vmovdqu RR4, (6*4*4)(%r11);
430 vmovdqu RL4, (7*4*4)(%r11);
435 SYM_FUNC_END(cast5_ecb_dec_16way)
437 SYM_FUNC_START(cast5_cbc_dec_16way)
451 vmovdqu (0*16)(%rdx), RL1;
452 vmovdqu (1*16)(%rdx), RR1;
453 vmovdqu (2*16)(%rdx), RL2;
454 vmovdqu (3*16)(%rdx), RR2;
455 vmovdqu (4*16)(%rdx), RL3;
456 vmovdqu (5*16)(%rdx), RR3;
457 vmovdqu (6*16)(%rdx), RL4;
458 vmovdqu (7*16)(%rdx), RR4;
460 call __cast5_dec_blk16;
464 vpshufd $0x4f, RX, RX;
466 vpxor 0*16+8(%r12), RL1, RL1;
467 vpxor 1*16+8(%r12), RR2, RR2;
468 vpxor 2*16+8(%r12), RL2, RL2;
469 vpxor 3*16+8(%r12), RR3, RR3;
470 vpxor 4*16+8(%r12), RL3, RL3;
471 vpxor 5*16+8(%r12), RR4, RR4;
472 vpxor 6*16+8(%r12), RL4, RL4;
474 vmovdqu RR1, (0*16)(%r11);
475 vmovdqu RL1, (1*16)(%r11);
476 vmovdqu RR2, (2*16)(%r11);
477 vmovdqu RL2, (3*16)(%r11);
478 vmovdqu RR3, (4*16)(%r11);
479 vmovdqu RL3, (5*16)(%r11);
480 vmovdqu RR4, (6*16)(%r11);
481 vmovdqu RL4, (7*16)(%r11);
487 SYM_FUNC_END(cast5_cbc_dec_16way)
489 SYM_FUNC_START(cast5_ctr_16way)
494 * %rcx: iv (big endian, 64bit)
504 vpcmpeqd RTMP, RTMP, RTMP;
505 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
507 vpcmpeqd RKR, RKR, RKR;
508 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
509 vmovdqa .Lbswap_iv_mask, R1ST;
510 vmovdqa .Lbswap128_mask, RKM;
512 /* load IV and byteswap */
514 vpshufb R1ST, RX, RX;
517 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
518 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
520 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
522 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
524 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
526 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
528 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
530 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
532 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
535 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
536 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
539 call __cast5_enc_blk16;
542 vpxor (0*16)(%r12), RR1, RR1;
543 vpxor (1*16)(%r12), RL1, RL1;
544 vpxor (2*16)(%r12), RR2, RR2;
545 vpxor (3*16)(%r12), RL2, RL2;
546 vpxor (4*16)(%r12), RR3, RR3;
547 vpxor (5*16)(%r12), RL3, RL3;
548 vpxor (6*16)(%r12), RR4, RR4;
549 vpxor (7*16)(%r12), RL4, RL4;
550 vmovdqu RR1, (0*16)(%r11);
551 vmovdqu RL1, (1*16)(%r11);
552 vmovdqu RR2, (2*16)(%r11);
553 vmovdqu RL2, (3*16)(%r11);
554 vmovdqu RR3, (4*16)(%r11);
555 vmovdqu RL3, (5*16)(%r11);
556 vmovdqu RR4, (6*16)(%r11);
557 vmovdqu RL4, (7*16)(%r11);
563 SYM_FUNC_END(cast5_ctr_16way)