2 # SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
4 # Copyright (C) 2017-2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
5 # Copyright (C) 2017-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
6 # Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
8 # This code is taken from the OpenSSL project but the author, Andy Polyakov,
9 # has relicensed it under the licenses specified in the SPDX header above.
10 # The original headers, including the original license headers, are
11 # included below for completeness.
13 # ====================================================================
14 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
15 # project. The module is, however, dual licensed under OpenSSL and
16 # CRYPTOGAMS licenses depending on where you obtain it. For further
17 # details see http://www.openssl.org/~appro/cryptogams/.
18 # ====================================================================
20 # This module implements Poly1305 hash for x86_64.
28 # Add AVX512F+VL+BW code path.
32 # Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be
33 # executed even on Knights Landing. Trigger for modification was
34 # observation that AVX512 code paths can negatively affect overall
35 # Skylake-X system performance. Since we are likely to suppress
36 # AVX512F capability flag [at least on Skylake-X], conversion serves
37 # as kind of "investment protection". Note that next *lake processor,
38 # Cannonlake, has AVX512IFMA code path to execute...
40 # Numbers are cycles per processed byte with poly1305_blocks alone,
41 # measured with rdtsc at fixed clock frequency.
43 # IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512
46 # Westmere 1.88/+120% -
47 # Sandy Bridge 1.39/+140% 1.10
48 # Haswell 1.14/+175% 1.11 0.65
49 # Skylake[-X] 1.13/+120% 0.96 0.51 [0.35]
50 # Silvermont 2.83/+95% -
51 # Knights L 3.60/? 1.65 1.10 0.41(***)
52 # Goldmont 1.70/+180% -
53 # VIA Nano 1.82/+150% -
54 # Sledgehammer 1.38/+160% -
55 # Bulldozer 2.30/+130% 0.97
56 # Ryzen 1.15/+200% 1.08 1.18
58 # (*) improvement coefficients relative to clang are more modest and
59 # are ~50% on most processors, in both cases we are comparing to
61 # (**) SSE2 implementation was attempted, but among non-AVX processors
62 # it was faster than integer-only code only on older Intel P4 and
63 # Core processors, 50-30%, less newer processor is, but slower on
64 # contemporary ones, for example almost 2x slower on Atom, and as
65 # former are naturally disappearing, SSE2 is deemed unnecessary;
66 # (***) strangely enough performance seems to vary from core to core,
67 # listed result is best case;
71 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
73 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
74 $kernel=0; $kernel=1 if (!$flavour && !$output);
77 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
78 ( $xlate="${dir}x86_64-xlate.pl" and -f
$xlate ) or
79 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f
$xlate) or
80 die "can't locate x86_64-xlate.pl";
82 open OUT
,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
85 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
86 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
87 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
90 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM
} =~ /nasm/) &&
91 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
92 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
93 $avx += 1 if ($1==2.11 && $2>=8);
96 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM
} =~ /ml64/) &&
97 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
98 $avx = ($1>=10) + ($1>=11);
101 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
102 $avx = ($2>=3.0) + ($2>3.0);
105 $avx = 4; # The kernel uses ifdefs for this.
108 sub declare_function
() {
109 my ($name, $align, $nargs) = @_;
111 $code .= ".align $align\n";
112 $code .= "SYM_FUNC_START($name)\n";
113 $code .= ".L$name:\n";
115 $code .= ".globl $name\n";
116 $code .= ".type $name,\@function,$nargs\n";
117 $code .= ".align $align\n";
125 $code .= "SYM_FUNC_END($name)\n";
127 $code .= ".size $name,.-$name\n";
131 $code.=<<___
if $kernel;
132 #include <linux/linkage.h>
136 $code.=<<___
if $kernel;
143 .long
0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
145 .long
`1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
147 .long
0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
149 .long
2,2,2,3,2,0,2,1
151 .long
0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7
154 .long
0,1,1,2,2,3,7,7
158 .quad
0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
166 .quad
0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
167 .quad
0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
169 .quad
0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
170 .quad
0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
173 $code.=<<___
if (!$kernel);
174 .asciz
"Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
178 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
179 my ($mac,$nonce)=($inp,$len); # *_emit arguments
180 my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13");
181 my ($h0,$h1,$h2)=("%r14","%rbx","%r10");
183 sub poly1305_iteration
{
184 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
185 # output: $h0-$h2 *= $r0-$r1
193 mov
%rax,$h0 # future $h0
203 mov
$h2,$h1 # borrow $h1
207 imulq
$s1,$h1 # h2*s1
212 imulq
$r0,$h2 # h2*r0
214 mov \
$-4,%rax # mask value
217 and $d3,%rax # last reduction step
228 ########################################################################
229 # Layout of opaque area is following.
231 # unsigned __int64 h[3]; # current hash value base 2^64
232 # unsigned __int64 r[2]; # key value base 2^64
237 $code.=<<___
if (!$kernel);
238 .extern OPENSSL_ia32cap_P
240 .globl poly1305_init_x86_64
241 .hidden poly1305_init_x86_64
242 .globl poly1305_blocks_x86_64
243 .hidden poly1305_blocks_x86_64
244 .globl poly1305_emit_x86_64
245 .hidden poly1305_emit_x86_64
247 &declare_function
("poly1305_init_x86_64", 32, 3);
250 mov
%rax,0($ctx) # initialize hash value
257 $code.=<<___
if (!$kernel);
258 lea poly1305_blocks_x86_64
(%rip),%r10
259 lea poly1305_emit_x86_64
(%rip),%r11
261 $code.=<<___
if (!$kernel && $avx);
262 mov OPENSSL_ia32cap_P
+4(%rip),%r9
263 lea poly1305_blocks_avx
(%rip),%rax
264 lea poly1305_emit_avx
(%rip),%rcx
265 bt \
$`60-32`,%r9 # AVX?
269 $code.=<<___
if (!$kernel && $avx>1);
270 lea poly1305_blocks_avx2
(%rip),%rax
271 bt \
$`5+32`,%r9 # AVX2?
274 $code.=<<___
if (!$kernel && $avx>3);
275 mov \
$`(1<<31|1<<21|1<<16)`,%rax
282 mov \
$0x0ffffffc0fffffff,%rax
283 mov \
$0x0ffffffc0ffffffc,%rcx
289 $code.=<<___
if (!$kernel && $flavour !~ /elf32/);
293 $code.=<<___
if (!$kernel && $flavour =~ /elf32/);
302 &end_function
("poly1305_init_x86_64");
304 &declare_function
("poly1305_blocks_x86_64", 32, 4);
309 jz
.Lno_data
# too short
325 mov
$len,%r15 # reassign $len
327 mov
24($ctx),$r0 # load r
330 mov
0($ctx),$h0 # load hash value
337 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
342 add
0($inp),$h0 # accumulate input
348 &poly1305_iteration
();
358 mov
$h0,0($ctx) # store hash value
373 .cfi_adjust_cfa_offset
-48
379 &end_function
("poly1305_blocks_x86_64");
381 &declare_function
("poly1305_emit_x86_64", 32, 3);
384 mov
0($ctx),%r8 # load hash value
389 add \
$5,%r8 # compare to modulus
393 shr \
$2,%r10 # did 130-bit value overflow?
397 add
0($nonce),%rax # accumulate nonce
399 mov
%rax,0($mac) # write result
404 &end_function
("poly1305_emit_x86_64");
408 $code .= "#ifdef CONFIG_AS_AVX\n";
411 ########################################################################
412 # Layout of opaque area is following.
414 # unsigned __int32 h[5]; # current hash value base 2^26
415 # unsigned __int32 is_base2_26;
416 # unsigned __int64 r[2]; # key value base 2^64
417 # unsigned __int64 pad;
418 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
420 # where r^n are base 2^26 digits of degrees of multiplier key. There are
421 # 5 digits, but last four are interleaved with multiples of 5, totalling
422 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
424 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
425 map("%xmm$_",(0..15));
428 .type __poly1305_block
,\
@abi-omnipotent
433 &poly1305_iteration
();
437 .size __poly1305_block
,.-__poly1305_block
439 .type __poly1305_init_avx
,\
@abi-omnipotent
448 lea
48+64($ctx),$ctx # size optimization
451 call __poly1305_block
# r^2
453 mov \
$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
459 mov
%eax,`16*0+0-64`($ctx)
461 mov
%edx,`16*0+4-64`($ctx)
468 mov
%eax,`16*1+0-64`($ctx)
469 lea
(%rax,%rax,4),%eax # *5
470 mov
%edx,`16*1+4-64`($ctx)
471 lea
(%rdx,%rdx,4),%edx # *5
472 mov
%eax,`16*2+0-64`($ctx)
474 mov
%edx,`16*2+4-64`($ctx)
485 mov
%eax,`16*3+0-64`($ctx)
486 lea
(%rax,%rax,4),%eax # *5
487 mov
%edx,`16*3+4-64`($ctx)
488 lea
(%rdx,%rdx,4),%edx # *5
489 mov
%eax,`16*4+0-64`($ctx)
491 mov
%edx,`16*4+4-64`($ctx)
500 mov
%eax,`16*5+0-64`($ctx)
501 lea
(%rax,%rax,4),%eax # *5
502 mov
%edx,`16*5+4-64`($ctx)
503 lea
(%rdx,%rdx,4),%edx # *5
504 mov
%eax,`16*6+0-64`($ctx)
506 mov
%edx,`16*6+4-64`($ctx)
512 mov
$d1#d,`16*7+0-64`($ctx)
513 lea
($d1,$d1,4),$d1 # *5
514 mov
$d2#d,`16*7+4-64`($ctx)
515 lea
($d2,$d2,4),$d2 # *5
516 mov
$d1#d,`16*8+0-64`($ctx)
517 mov
$d2#d,`16*8+4-64`($ctx)
520 call __poly1305_block
# r^3
522 mov \
$0x3ffffff,%eax # save r^3 base 2^26
526 mov
%eax,`16*0+12-64`($ctx)
530 mov
%edx,`16*1+12-64`($ctx)
531 lea
(%rdx,%rdx,4),%edx # *5
533 mov
%edx,`16*2+12-64`($ctx)
539 mov
%eax,`16*3+12-64`($ctx)
540 lea
(%rax,%rax,4),%eax # *5
542 mov
%eax,`16*4+12-64`($ctx)
547 mov
%edx,`16*5+12-64`($ctx)
548 lea
(%rdx,%rdx,4),%edx # *5
550 mov
%edx,`16*6+12-64`($ctx)
555 mov
$d1#d,`16*7+12-64`($ctx)
556 lea
($d1,$d1,4),$d1 # *5
557 mov
$d1#d,`16*8+12-64`($ctx)
560 call __poly1305_block
# r^4
562 mov \
$0x3ffffff,%eax # save r^4 base 2^26
566 mov
%eax,`16*0+8-64`($ctx)
570 mov
%edx,`16*1+8-64`($ctx)
571 lea
(%rdx,%rdx,4),%edx # *5
573 mov
%edx,`16*2+8-64`($ctx)
579 mov
%eax,`16*3+8-64`($ctx)
580 lea
(%rax,%rax,4),%eax # *5
582 mov
%eax,`16*4+8-64`($ctx)
587 mov
%edx,`16*5+8-64`($ctx)
588 lea
(%rdx,%rdx,4),%edx # *5
590 mov
%edx,`16*6+8-64`($ctx)
595 mov
$d1#d,`16*7+8-64`($ctx)
596 lea
($d1,$d1,4),$d1 # *5
597 mov
$d1#d,`16*8+8-64`($ctx)
599 lea
-48-64($ctx),$ctx # size [de-]optimization
602 .size __poly1305_init_avx
,.-__poly1305_init_avx
605 &declare_function
("poly1305_blocks_avx", 32, 4);
608 mov
20($ctx),%r8d # is_base2_26
641 mov
$len,%r15 # reassign $len
643 mov
0($ctx),$d1 # load hash value
647 mov
24($ctx),$r0 # load r
650 ################################# base 2^26 -> base 2^64
652 and \
$`-1*(1<<31)`,$d1
653 mov
$d2,$r1 # borrow $r1
655 and \
$`-1*(1<<31)`,$d2
669 adc \
$0,$h2 # can be partially reduced...
671 mov \
$-4,$d2 # ... so reduce
684 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
686 add
0($inp),$h0 # accumulate input
691 call __poly1305_block
693 test
$padbit,$padbit # if $padbit is zero,
694 jz
.Lstore_base2_64_avx
# store hash in base 2^64 format
696 ################################# base 2^64 -> base 2^26
703 and \
$0x3ffffff,%rax # h[0]
705 and \
$0x3ffffff,%rdx # h[1]
709 and \
$0x3ffffff,$h0 # h[2]
711 and \
$0x3ffffff,$h1 # h[3]
715 jz
.Lstore_base2_26_avx
725 .Lstore_base2_64_avx
:
728 mov
$h2,16($ctx) # note that is_base2_26 is zeroed
732 .Lstore_base2_26_avx
:
733 mov
%rax#d,0($ctx) # store hash value base 2^26
753 .Lblocks_avx_epilogue
:
775 mov
$len,%r15 # reassign $len
777 mov
24($ctx),$r0 # load r
780 mov
0($ctx),$h0 # load hash value
787 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
792 add
0($inp),$h0 # accumulate input
798 call __poly1305_block
801 ################################# base 2^64 -> base 2^26
808 and \
$0x3ffffff,%rax # h[0]
810 and \
$0x3ffffff,%rdx # h[1]
814 and \
$0x3ffffff,$h0 # h[2]
816 and \
$0x3ffffff,$h1 # h[3]
824 movl \
$1,20($ctx) # set is_base2_26
826 call __poly1305_init_avx
842 .Lbase2_64_avx_epilogue
:
849 vmovd
4*0($ctx),$H0 # load hash value
857 $code.=<<___
if (!$win64);
859 .cfi_def_cfa_register
%r10
865 $code.=<<___
if ($win64);
868 vmovdqa
%xmm6,0x50(%r11)
869 vmovdqa
%xmm7,0x60(%r11)
870 vmovdqa
%xmm8,0x70(%r11)
871 vmovdqa
%xmm9,0x80(%r11)
872 vmovdqa
%xmm10,0x90(%r11)
873 vmovdqa
%xmm11,0xa0(%r11)
874 vmovdqa
%xmm12,0xb0(%r11)
875 vmovdqa
%xmm13,0xc0(%r11)
876 vmovdqa
%xmm14,0xd0(%r11)
877 vmovdqa
%xmm15,0xe0(%r11)
885 vmovdqu
`16*3`($ctx),$D4 # preload r0^2
886 lea
`16*3+64`($ctx),$ctx # size optimization
887 lea
.Lconst
(%rip),%rcx
889 ################################################################
891 vmovdqu
16*2($inp),$T0
892 vmovdqu
16*3($inp),$T1
893 vmovdqa
64(%rcx),$MASK # .Lmask26
895 vpsrldq \
$6,$T0,$T2 # splat input
897 vpunpckhqdq
$T1,$T0,$T4 # 4
898 vpunpcklqdq
$T1,$T0,$T0 # 0:1
899 vpunpcklqdq
$T3,$T2,$T3 # 2:3
901 vpsrlq \
$40,$T4,$T4 # 4
903 vpand
$MASK,$T0,$T0 # 0
905 vpand
$MASK,$T1,$T1 # 1
907 vpand
$MASK,$T2,$T2 # 2
908 vpand
$MASK,$T3,$T3 # 3
909 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
913 # expand and copy pre-calculated table to stack
914 vmovdqu
`16*1-64`($ctx),$D1
915 vmovdqu
`16*2-64`($ctx),$D2
916 vpshufd \
$0xEE,$D4,$D3 # 34xx -> 3434
917 vpshufd \
$0x44,$D4,$D0 # xx12 -> 1212
918 vmovdqa
$D3,-0x90(%r11)
919 vmovdqa
$D0,0x00(%rsp)
920 vpshufd \
$0xEE,$D1,$D4
921 vmovdqu
`16*3-64`($ctx),$D0
922 vpshufd \
$0x44,$D1,$D1
923 vmovdqa
$D4,-0x80(%r11)
924 vmovdqa
$D1,0x10(%rsp)
925 vpshufd \
$0xEE,$D2,$D3
926 vmovdqu
`16*4-64`($ctx),$D1
927 vpshufd \
$0x44,$D2,$D2
928 vmovdqa
$D3,-0x70(%r11)
929 vmovdqa
$D2,0x20(%rsp)
930 vpshufd \
$0xEE,$D0,$D4
931 vmovdqu
`16*5-64`($ctx),$D2
932 vpshufd \
$0x44,$D0,$D0
933 vmovdqa
$D4,-0x60(%r11)
934 vmovdqa
$D0,0x30(%rsp)
935 vpshufd \
$0xEE,$D1,$D3
936 vmovdqu
`16*6-64`($ctx),$D0
937 vpshufd \
$0x44,$D1,$D1
938 vmovdqa
$D3,-0x50(%r11)
939 vmovdqa
$D1,0x40(%rsp)
940 vpshufd \
$0xEE,$D2,$D4
941 vmovdqu
`16*7-64`($ctx),$D1
942 vpshufd \
$0x44,$D2,$D2
943 vmovdqa
$D4,-0x40(%r11)
944 vmovdqa
$D2,0x50(%rsp)
945 vpshufd \
$0xEE,$D0,$D3
946 vmovdqu
`16*8-64`($ctx),$D2
947 vpshufd \
$0x44,$D0,$D0
948 vmovdqa
$D3,-0x30(%r11)
949 vmovdqa
$D0,0x60(%rsp)
950 vpshufd \
$0xEE,$D1,$D4
951 vpshufd \
$0x44,$D1,$D1
952 vmovdqa
$D4,-0x20(%r11)
953 vmovdqa
$D1,0x70(%rsp)
954 vpshufd \
$0xEE,$D2,$D3
955 vmovdqa
0x00(%rsp),$D4 # preload r0^2
956 vpshufd \
$0x44,$D2,$D2
957 vmovdqa
$D3,-0x10(%r11)
958 vmovdqa
$D2,0x80(%rsp)
964 ################################################################
965 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
966 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
967 # \___________________/
968 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
969 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
970 # \___________________/ \____________________/
972 # Note that we start with inp[2:3]*r^2. This is because it
973 # doesn't depend on reduction in previous iteration.
974 ################################################################
975 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
976 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
977 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
978 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
979 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
981 # though note that $Tx and $Hx are "reversed" in this section,
982 # and $D4 is preloaded with r0^2...
984 vpmuludq
$T0,$D4,$D0 # d0 = h0*r0
985 vpmuludq
$T1,$D4,$D1 # d1 = h1*r0
986 vmovdqa
$H2,0x20(%r11) # offload hash
987 vpmuludq
$T2,$D4,$D2 # d3 = h2*r0
988 vmovdqa
0x10(%rsp),$H2 # r1^2
989 vpmuludq
$T3,$D4,$D3 # d3 = h3*r0
990 vpmuludq
$T4,$D4,$D4 # d4 = h4*r0
992 vmovdqa
$H0,0x00(%r11) #
993 vpmuludq
0x20(%rsp),$T4,$H0 # h4*s1
994 vmovdqa
$H1,0x10(%r11) #
995 vpmuludq
$T3,$H2,$H1 # h3*r1
996 vpaddq
$H0,$D0,$D0 # d0 += h4*s1
997 vpaddq
$H1,$D4,$D4 # d4 += h3*r1
998 vmovdqa
$H3,0x30(%r11) #
999 vpmuludq
$T2,$H2,$H0 # h2*r1
1000 vpmuludq
$T1,$H2,$H1 # h1*r1
1001 vpaddq
$H0,$D3,$D3 # d3 += h2*r1
1002 vmovdqa
0x30(%rsp),$H3 # r2^2
1003 vpaddq
$H1,$D2,$D2 # d2 += h1*r1
1004 vmovdqa
$H4,0x40(%r11) #
1005 vpmuludq
$T0,$H2,$H2 # h0*r1
1006 vpmuludq
$T2,$H3,$H0 # h2*r2
1007 vpaddq
$H2,$D1,$D1 # d1 += h0*r1
1009 vmovdqa
0x40(%rsp),$H4 # s2^2
1010 vpaddq
$H0,$D4,$D4 # d4 += h2*r2
1011 vpmuludq
$T1,$H3,$H1 # h1*r2
1012 vpmuludq
$T0,$H3,$H3 # h0*r2
1013 vpaddq
$H1,$D3,$D3 # d3 += h1*r2
1014 vmovdqa
0x50(%rsp),$H2 # r3^2
1015 vpaddq
$H3,$D2,$D2 # d2 += h0*r2
1016 vpmuludq
$T4,$H4,$H0 # h4*s2
1017 vpmuludq
$T3,$H4,$H4 # h3*s2
1018 vpaddq
$H0,$D1,$D1 # d1 += h4*s2
1019 vmovdqa
0x60(%rsp),$H3 # s3^2
1020 vpaddq
$H4,$D0,$D0 # d0 += h3*s2
1022 vmovdqa
0x80(%rsp),$H4 # s4^2
1023 vpmuludq
$T1,$H2,$H1 # h1*r3
1024 vpmuludq
$T0,$H2,$H2 # h0*r3
1025 vpaddq
$H1,$D4,$D4 # d4 += h1*r3
1026 vpaddq
$H2,$D3,$D3 # d3 += h0*r3
1027 vpmuludq
$T4,$H3,$H0 # h4*s3
1028 vpmuludq
$T3,$H3,$H1 # h3*s3
1029 vpaddq
$H0,$D2,$D2 # d2 += h4*s3
1030 vmovdqu
16*0($inp),$H0 # load input
1031 vpaddq
$H1,$D1,$D1 # d1 += h3*s3
1032 vpmuludq
$T2,$H3,$H3 # h2*s3
1033 vpmuludq
$T2,$H4,$T2 # h2*s4
1034 vpaddq
$H3,$D0,$D0 # d0 += h2*s3
1036 vmovdqu
16*1($inp),$H1 #
1037 vpaddq
$T2,$D1,$D1 # d1 += h2*s4
1038 vpmuludq
$T3,$H4,$T3 # h3*s4
1039 vpmuludq
$T4,$H4,$T4 # h4*s4
1040 vpsrldq \
$6,$H0,$H2 # splat input
1041 vpaddq
$T3,$D2,$D2 # d2 += h3*s4
1042 vpaddq
$T4,$D3,$D3 # d3 += h4*s4
1043 vpsrldq \
$6,$H1,$H3 #
1044 vpmuludq
0x70(%rsp),$T0,$T4 # h0*r4
1045 vpmuludq
$T1,$H4,$T0 # h1*s4
1046 vpunpckhqdq
$H1,$H0,$H4 # 4
1047 vpaddq
$T4,$D4,$D4 # d4 += h0*r4
1048 vmovdqa
-0x90(%r11),$T4 # r0^4
1049 vpaddq
$T0,$D0,$D0 # d0 += h1*s4
1051 vpunpcklqdq
$H1,$H0,$H0 # 0:1
1052 vpunpcklqdq
$H3,$H2,$H3 # 2:3
1054 #vpsrlq \$40,$H4,$H4 # 4
1055 vpsrldq \
$`40/8`,$H4,$H4 # 4
1057 vpand
$MASK,$H0,$H0 # 0
1059 vpand
$MASK,$H1,$H1 # 1
1060 vpand
0(%rcx),$H4,$H4 # .Lmask24
1062 vpand
$MASK,$H2,$H2 # 2
1063 vpand
$MASK,$H3,$H3 # 3
1064 vpor
32(%rcx),$H4,$H4 # padbit, yes, always
1066 vpaddq
0x00(%r11),$H0,$H0 # add hash value
1067 vpaddq
0x10(%r11),$H1,$H1
1068 vpaddq
0x20(%r11),$H2,$H2
1069 vpaddq
0x30(%r11),$H3,$H3
1070 vpaddq
0x40(%r11),$H4,$H4
1077 ################################################################
1078 # Now we accumulate (inp[0:1]+hash)*r^4
1079 ################################################################
1080 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1081 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1082 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1083 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1084 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1086 vpmuludq
$H0,$T4,$T0 # h0*r0
1087 vpmuludq
$H1,$T4,$T1 # h1*r0
1090 vmovdqa
-0x80(%r11),$T2 # r1^4
1091 vpmuludq
$H2,$T4,$T0 # h2*r0
1092 vpmuludq
$H3,$T4,$T1 # h3*r0
1095 vpmuludq
$H4,$T4,$T4 # h4*r0
1096 vpmuludq
-0x70(%r11),$H4,$T0 # h4*s1
1099 vpaddq
$T0,$D0,$D0 # d0 += h4*s1
1100 vpmuludq
$H2,$T2,$T1 # h2*r1
1101 vpmuludq
$H3,$T2,$T0 # h3*r1
1102 vpaddq
$T1,$D3,$D3 # d3 += h2*r1
1103 vmovdqa
-0x60(%r11),$T3 # r2^4
1104 vpaddq
$T0,$D4,$D4 # d4 += h3*r1
1105 vpmuludq
$H1,$T2,$T1 # h1*r1
1106 vpmuludq
$H0,$T2,$T2 # h0*r1
1107 vpaddq
$T1,$D2,$D2 # d2 += h1*r1
1108 vpaddq
$T2,$D1,$D1 # d1 += h0*r1
1110 vmovdqa
-0x50(%r11),$T4 # s2^4
1111 vpmuludq
$H2,$T3,$T0 # h2*r2
1112 vpmuludq
$H1,$T3,$T1 # h1*r2
1113 vpaddq
$T0,$D4,$D4 # d4 += h2*r2
1114 vpaddq
$T1,$D3,$D3 # d3 += h1*r2
1115 vmovdqa
-0x40(%r11),$T2 # r3^4
1116 vpmuludq
$H0,$T3,$T3 # h0*r2
1117 vpmuludq
$H4,$T4,$T0 # h4*s2
1118 vpaddq
$T3,$D2,$D2 # d2 += h0*r2
1119 vpaddq
$T0,$D1,$D1 # d1 += h4*s2
1120 vmovdqa
-0x30(%r11),$T3 # s3^4
1121 vpmuludq
$H3,$T4,$T4 # h3*s2
1122 vpmuludq
$H1,$T2,$T1 # h1*r3
1123 vpaddq
$T4,$D0,$D0 # d0 += h3*s2
1125 vmovdqa
-0x10(%r11),$T4 # s4^4
1126 vpaddq
$T1,$D4,$D4 # d4 += h1*r3
1127 vpmuludq
$H0,$T2,$T2 # h0*r3
1128 vpmuludq
$H4,$T3,$T0 # h4*s3
1129 vpaddq
$T2,$D3,$D3 # d3 += h0*r3
1130 vpaddq
$T0,$D2,$D2 # d2 += h4*s3
1131 vmovdqu
16*2($inp),$T0 # load input
1132 vpmuludq
$H3,$T3,$T2 # h3*s3
1133 vpmuludq
$H2,$T3,$T3 # h2*s3
1134 vpaddq
$T2,$D1,$D1 # d1 += h3*s3
1135 vmovdqu
16*3($inp),$T1 #
1136 vpaddq
$T3,$D0,$D0 # d0 += h2*s3
1138 vpmuludq
$H2,$T4,$H2 # h2*s4
1139 vpmuludq
$H3,$T4,$H3 # h3*s4
1140 vpsrldq \
$6,$T0,$T2 # splat input
1141 vpaddq
$H2,$D1,$D1 # d1 += h2*s4
1142 vpmuludq
$H4,$T4,$H4 # h4*s4
1143 vpsrldq \
$6,$T1,$T3 #
1144 vpaddq
$H3,$D2,$H2 # h2 = d2 + h3*s4
1145 vpaddq
$H4,$D3,$H3 # h3 = d3 + h4*s4
1146 vpmuludq
-0x20(%r11),$H0,$H4 # h0*r4
1147 vpmuludq
$H1,$T4,$H0
1148 vpunpckhqdq
$T1,$T0,$T4 # 4
1149 vpaddq
$H4,$D4,$H4 # h4 = d4 + h0*r4
1150 vpaddq
$H0,$D0,$H0 # h0 = d0 + h1*s4
1152 vpunpcklqdq
$T1,$T0,$T0 # 0:1
1153 vpunpcklqdq
$T3,$T2,$T3 # 2:3
1155 #vpsrlq \$40,$T4,$T4 # 4
1156 vpsrldq \
$`40/8`,$T4,$T4 # 4
1158 vmovdqa
0x00(%rsp),$D4 # preload r0^2
1159 vpand
$MASK,$T0,$T0 # 0
1161 vpand
$MASK,$T1,$T1 # 1
1162 vpand
0(%rcx),$T4,$T4 # .Lmask24
1164 vpand
$MASK,$T2,$T2 # 2
1165 vpand
$MASK,$T3,$T3 # 3
1166 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
1168 ################################################################
1169 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1174 vpaddq
$D3,$H4,$H4 # h3 -> h4
1178 vpaddq
$D0,$D1,$H1 # h0 -> h1
1185 vpaddq
$D1,$H2,$H2 # h1 -> h2
1189 vpaddq
$D0,$H0,$H0 # h4 -> h0
1193 vpaddq
$D2,$H3,$H3 # h2 -> h3
1197 vpaddq
$D0,$H1,$H1 # h0 -> h1
1201 vpaddq
$D3,$H4,$H4 # h3 -> h4
1206 ################################################################
1207 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1209 vpshufd \
$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1220 vmovdqa
$H2,0x20(%r11)
1221 vmovdqa
$H0,0x00(%r11)
1222 vmovdqa
$H1,0x10(%r11)
1223 vmovdqa
$H3,0x30(%r11)
1224 vmovdqa
$H4,0x40(%r11)
1226 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1227 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1228 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1229 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1230 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1232 vpmuludq
$T2,$D4,$D2 # d2 = h2*r0
1233 vpmuludq
$T0,$D4,$D0 # d0 = h0*r0
1234 vpshufd \
$0x10,`16*1-64`($ctx),$H2 # r1^n
1235 vpmuludq
$T1,$D4,$D1 # d1 = h1*r0
1236 vpmuludq
$T3,$D4,$D3 # d3 = h3*r0
1237 vpmuludq
$T4,$D4,$D4 # d4 = h4*r0
1239 vpmuludq
$T3,$H2,$H0 # h3*r1
1240 vpaddq
$H0,$D4,$D4 # d4 += h3*r1
1241 vpshufd \
$0x10,`16*2-64`($ctx),$H3 # s1^n
1242 vpmuludq
$T2,$H2,$H1 # h2*r1
1243 vpaddq
$H1,$D3,$D3 # d3 += h2*r1
1244 vpshufd \
$0x10,`16*3-64`($ctx),$H4 # r2^n
1245 vpmuludq
$T1,$H2,$H0 # h1*r1
1246 vpaddq
$H0,$D2,$D2 # d2 += h1*r1
1247 vpmuludq
$T0,$H2,$H2 # h0*r1
1248 vpaddq
$H2,$D1,$D1 # d1 += h0*r1
1249 vpmuludq
$T4,$H3,$H3 # h4*s1
1250 vpaddq
$H3,$D0,$D0 # d0 += h4*s1
1252 vpshufd \
$0x10,`16*4-64`($ctx),$H2 # s2^n
1253 vpmuludq
$T2,$H4,$H1 # h2*r2
1254 vpaddq
$H1,$D4,$D4 # d4 += h2*r2
1255 vpmuludq
$T1,$H4,$H0 # h1*r2
1256 vpaddq
$H0,$D3,$D3 # d3 += h1*r2
1257 vpshufd \
$0x10,`16*5-64`($ctx),$H3 # r3^n
1258 vpmuludq
$T0,$H4,$H4 # h0*r2
1259 vpaddq
$H4,$D2,$D2 # d2 += h0*r2
1260 vpmuludq
$T4,$H2,$H1 # h4*s2
1261 vpaddq
$H1,$D1,$D1 # d1 += h4*s2
1262 vpshufd \
$0x10,`16*6-64`($ctx),$H4 # s3^n
1263 vpmuludq
$T3,$H2,$H2 # h3*s2
1264 vpaddq
$H2,$D0,$D0 # d0 += h3*s2
1266 vpmuludq
$T1,$H3,$H0 # h1*r3
1267 vpaddq
$H0,$D4,$D4 # d4 += h1*r3
1268 vpmuludq
$T0,$H3,$H3 # h0*r3
1269 vpaddq
$H3,$D3,$D3 # d3 += h0*r3
1270 vpshufd \
$0x10,`16*7-64`($ctx),$H2 # r4^n
1271 vpmuludq
$T4,$H4,$H1 # h4*s3
1272 vpaddq
$H1,$D2,$D2 # d2 += h4*s3
1273 vpshufd \
$0x10,`16*8-64`($ctx),$H3 # s4^n
1274 vpmuludq
$T3,$H4,$H0 # h3*s3
1275 vpaddq
$H0,$D1,$D1 # d1 += h3*s3
1276 vpmuludq
$T2,$H4,$H4 # h2*s3
1277 vpaddq
$H4,$D0,$D0 # d0 += h2*s3
1279 vpmuludq
$T0,$H2,$H2 # h0*r4
1280 vpaddq
$H2,$D4,$D4 # h4 = d4 + h0*r4
1281 vpmuludq
$T4,$H3,$H1 # h4*s4
1282 vpaddq
$H1,$D3,$D3 # h3 = d3 + h4*s4
1283 vpmuludq
$T3,$H3,$H0 # h3*s4
1284 vpaddq
$H0,$D2,$D2 # h2 = d2 + h3*s4
1285 vpmuludq
$T2,$H3,$H1 # h2*s4
1286 vpaddq
$H1,$D1,$D1 # h1 = d1 + h2*s4
1287 vpmuludq
$T1,$H3,$H3 # h1*s4
1288 vpaddq
$H3,$D0,$D0 # h0 = d0 + h1*s4
1292 vmovdqu
16*0($inp),$H0 # load input
1293 vmovdqu
16*1($inp),$H1
1295 vpsrldq \
$6,$H0,$H2 # splat input
1297 vpunpckhqdq
$H1,$H0,$H4 # 4
1298 vpunpcklqdq
$H1,$H0,$H0 # 0:1
1299 vpunpcklqdq
$H3,$H2,$H3 # 2:3
1301 vpsrlq \
$40,$H4,$H4 # 4
1303 vpand
$MASK,$H0,$H0 # 0
1305 vpand
$MASK,$H1,$H1 # 1
1307 vpand
$MASK,$H2,$H2 # 2
1308 vpand
$MASK,$H3,$H3 # 3
1309 vpor
32(%rcx),$H4,$H4 # padbit, yes, always
1311 vpshufd \
$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1312 vpaddq
0x00(%r11),$H0,$H0
1313 vpaddq
0x10(%r11),$H1,$H1
1314 vpaddq
0x20(%r11),$H2,$H2
1315 vpaddq
0x30(%r11),$H3,$H3
1316 vpaddq
0x40(%r11),$H4,$H4
1318 ################################################################
1319 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1321 vpmuludq
$H0,$T4,$T0 # h0*r0
1322 vpaddq
$T0,$D0,$D0 # d0 += h0*r0
1323 vpmuludq
$H1,$T4,$T1 # h1*r0
1324 vpaddq
$T1,$D1,$D1 # d1 += h1*r0
1325 vpmuludq
$H2,$T4,$T0 # h2*r0
1326 vpaddq
$T0,$D2,$D2 # d2 += h2*r0
1327 vpshufd \
$0x32,`16*1-64`($ctx),$T2 # r1^n
1328 vpmuludq
$H3,$T4,$T1 # h3*r0
1329 vpaddq
$T1,$D3,$D3 # d3 += h3*r0
1330 vpmuludq
$H4,$T4,$T4 # h4*r0
1331 vpaddq
$T4,$D4,$D4 # d4 += h4*r0
1333 vpmuludq
$H3,$T2,$T0 # h3*r1
1334 vpaddq
$T0,$D4,$D4 # d4 += h3*r1
1335 vpshufd \
$0x32,`16*2-64`($ctx),$T3 # s1
1336 vpmuludq
$H2,$T2,$T1 # h2*r1
1337 vpaddq
$T1,$D3,$D3 # d3 += h2*r1
1338 vpshufd \
$0x32,`16*3-64`($ctx),$T4 # r2
1339 vpmuludq
$H1,$T2,$T0 # h1*r1
1340 vpaddq
$T0,$D2,$D2 # d2 += h1*r1
1341 vpmuludq
$H0,$T2,$T2 # h0*r1
1342 vpaddq
$T2,$D1,$D1 # d1 += h0*r1
1343 vpmuludq
$H4,$T3,$T3 # h4*s1
1344 vpaddq
$T3,$D0,$D0 # d0 += h4*s1
1346 vpshufd \
$0x32,`16*4-64`($ctx),$T2 # s2
1347 vpmuludq
$H2,$T4,$T1 # h2*r2
1348 vpaddq
$T1,$D4,$D4 # d4 += h2*r2
1349 vpmuludq
$H1,$T4,$T0 # h1*r2
1350 vpaddq
$T0,$D3,$D3 # d3 += h1*r2
1351 vpshufd \
$0x32,`16*5-64`($ctx),$T3 # r3
1352 vpmuludq
$H0,$T4,$T4 # h0*r2
1353 vpaddq
$T4,$D2,$D2 # d2 += h0*r2
1354 vpmuludq
$H4,$T2,$T1 # h4*s2
1355 vpaddq
$T1,$D1,$D1 # d1 += h4*s2
1356 vpshufd \
$0x32,`16*6-64`($ctx),$T4 # s3
1357 vpmuludq
$H3,$T2,$T2 # h3*s2
1358 vpaddq
$T2,$D0,$D0 # d0 += h3*s2
1360 vpmuludq
$H1,$T3,$T0 # h1*r3
1361 vpaddq
$T0,$D4,$D4 # d4 += h1*r3
1362 vpmuludq
$H0,$T3,$T3 # h0*r3
1363 vpaddq
$T3,$D3,$D3 # d3 += h0*r3
1364 vpshufd \
$0x32,`16*7-64`($ctx),$T2 # r4
1365 vpmuludq
$H4,$T4,$T1 # h4*s3
1366 vpaddq
$T1,$D2,$D2 # d2 += h4*s3
1367 vpshufd \
$0x32,`16*8-64`($ctx),$T3 # s4
1368 vpmuludq
$H3,$T4,$T0 # h3*s3
1369 vpaddq
$T0,$D1,$D1 # d1 += h3*s3
1370 vpmuludq
$H2,$T4,$T4 # h2*s3
1371 vpaddq
$T4,$D0,$D0 # d0 += h2*s3
1373 vpmuludq
$H0,$T2,$T2 # h0*r4
1374 vpaddq
$T2,$D4,$D4 # d4 += h0*r4
1375 vpmuludq
$H4,$T3,$T1 # h4*s4
1376 vpaddq
$T1,$D3,$D3 # d3 += h4*s4
1377 vpmuludq
$H3,$T3,$T0 # h3*s4
1378 vpaddq
$T0,$D2,$D2 # d2 += h3*s4
1379 vpmuludq
$H2,$T3,$T1 # h2*s4
1380 vpaddq
$T1,$D1,$D1 # d1 += h2*s4
1381 vpmuludq
$H1,$T3,$T3 # h1*s4
1382 vpaddq
$T3,$D0,$D0 # d0 += h1*s4
1385 ################################################################
1386 # horizontal addition
1399 ################################################################
1404 vpaddq
$H3,$D4,$D4 # h3 -> h4
1408 vpaddq
$H0,$D1,$D1 # h0 -> h1
1415 vpaddq
$H1,$D2,$D2 # h1 -> h2
1419 vpaddq
$H4,$D0,$D0 # h4 -> h0
1423 vpaddq
$H2,$D3,$D3 # h2 -> h3
1427 vpaddq
$H0,$D1,$D1 # h0 -> h1
1431 vpaddq
$H3,$D4,$D4 # h3 -> h4
1433 vmovd
$D0,`4*0-48-64`($ctx) # save partially reduced
1434 vmovd
$D1,`4*1-48-64`($ctx)
1435 vmovd
$D2,`4*2-48-64`($ctx)
1436 vmovd
$D3,`4*3-48-64`($ctx)
1437 vmovd
$D4,`4*4-48-64`($ctx)
1439 $code.=<<___
if ($win64);
1440 vmovdqa
0x50(%r11),%xmm6
1441 vmovdqa
0x60(%r11),%xmm7
1442 vmovdqa
0x70(%r11),%xmm8
1443 vmovdqa
0x80(%r11),%xmm9
1444 vmovdqa
0x90(%r11),%xmm10
1445 vmovdqa
0xa0(%r11),%xmm11
1446 vmovdqa
0xb0(%r11),%xmm12
1447 vmovdqa
0xc0(%r11),%xmm13
1448 vmovdqa
0xd0(%r11),%xmm14
1449 vmovdqa
0xe0(%r11),%xmm15
1453 $code.=<<___
if (!$win64);
1455 .cfi_def_cfa_register
%rsp
1462 &end_function
("poly1305_blocks_avx");
1464 &declare_function
("poly1305_emit_avx", 32, 3);
1466 cmpl \
$0,20($ctx) # is_base2_26?
1469 mov
0($ctx),%eax # load hash value base 2^26
1475 shl \
$26,%rcx # base 2^26 -> base 2^64
1491 mov
%r10,%rax # could be partially reduced, so reduce
1502 add \
$5,%r8 # compare to modulus
1506 shr \
$2,%r10 # did 130-bit value overflow?
1510 add
0($nonce),%rax # accumulate nonce
1512 mov
%rax,0($mac) # write result
1517 &end_function
("poly1305_emit_avx");
1520 $code .= "#endif\n";
1526 $code .= "#ifdef CONFIG_AS_AVX2\n";
1529 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1530 map("%ymm$_",(0..15));
1533 sub poly1305_blocks_avxN
{
1535 my $suffix = $avx512 ?
"_avx512" : "";
1538 mov
20($ctx),%r8d # is_base2_26
1540 jae
.Lblocks_avx2
$suffix
1544 .Lblocks_avx2
$suffix:
1546 jz
.Lno_data_avx2
$suffix
1551 jz
.Lbase2_64_avx2
$suffix
1554 jz
.Leven_avx2
$suffix
1569 .Lblocks_avx2_body
$suffix:
1571 mov
$len,%r15 # reassign $len
1573 mov
0($ctx),$d1 # load hash value
1577 mov
24($ctx),$r0 # load r
1580 ################################# base 2^26 -> base 2^64
1582 and \
$`-1*(1<<31)`,$d1
1583 mov
$d2,$r1 # borrow $r1
1585 and \
$`-1*(1<<31)`,$d2
1599 adc \
$0,$h2 # can be partially reduced...
1601 mov \
$-4,$d2 # ... so reduce
1614 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
1616 .Lbase2_26_pre_avx2
$suffix:
1617 add
0($inp),$h0 # accumulate input
1623 call __poly1305_block
1627 jnz
.Lbase2_26_pre_avx2
$suffix
1629 test
$padbit,$padbit # if $padbit is zero,
1630 jz
.Lstore_base2_64_avx2
$suffix # store hash in base 2^64 format
1632 ################################# base 2^64 -> base 2^26
1639 and \
$0x3ffffff,%rax # h[0]
1641 and \
$0x3ffffff,%rdx # h[1]
1645 and \
$0x3ffffff,$h0 # h[2]
1647 and \
$0x3ffffff,$h1 # h[3]
1651 jz
.Lstore_base2_26_avx2
$suffix
1658 jmp
.Lproceed_avx2
$suffix
1661 .Lstore_base2_64_avx2
$suffix:
1664 mov
$h2,16($ctx) # note that is_base2_26 is zeroed
1665 jmp
.Ldone_avx2
$suffix
1668 .Lstore_base2_26_avx2
$suffix:
1669 mov
%rax#d,0($ctx) # store hash value base 2^26
1688 .Lno_data_avx2
$suffix:
1689 .Lblocks_avx2_epilogue
$suffix:
1694 .Lbase2_64_avx2
$suffix:
1709 .Lbase2_64_avx2_body
$suffix:
1711 mov
$len,%r15 # reassign $len
1713 mov
24($ctx),$r0 # load r
1716 mov
0($ctx),$h0 # load hash value
1723 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
1726 jz
.Linit_avx2
$suffix
1728 .Lbase2_64_pre_avx2
$suffix:
1729 add
0($inp),$h0 # accumulate input
1735 call __poly1305_block
1739 jnz
.Lbase2_64_pre_avx2
$suffix
1742 ################################# base 2^64 -> base 2^26
1749 and \
$0x3ffffff,%rax # h[0]
1751 and \
$0x3ffffff,%rdx # h[1]
1755 and \
$0x3ffffff,$h0 # h[2]
1757 and \
$0x3ffffff,$h1 # h[3]
1765 movl \
$1,20($ctx) # set is_base2_26
1767 call __poly1305_init_avx
1769 .Lproceed_avx2
$suffix:
1770 mov
%r15,$len # restore $len
1772 $code.=<<___
if (!$kernel);
1773 mov OPENSSL_ia32cap_P
+8(%rip),%r9d
1774 mov \
$`(1<<31|1<<30|1<<16)`,%r11d
1789 .Lbase2_64_avx2_epilogue
$suffix:
1790 jmp
.Ldo_avx2
$suffix
1797 $code.=<<___
if (!$kernel);
1798 mov OPENSSL_ia32cap_P
+8(%rip),%r9d
1801 vmovd
4*0($ctx),%x#$H0 # load hash value base 2^26
1802 vmovd
4*1($ctx),%x#$H1
1803 vmovd
4*2($ctx),%x#$H2
1804 vmovd
4*3($ctx),%x#$H3
1805 vmovd
4*4($ctx),%x#$H4
1809 $code.=<<___
if (!$kernel && $avx>2);
1813 test \
$`1<<16`,%r9d # check for AVX512F
1815 .Lskip_avx512
$suffix:
1817 $code.=<<___
if ($avx > 2 && $avx512 && $kernel);
1821 $code.=<<___
if (!$win64);
1823 .cfi_def_cfa_register
%r10
1826 $code.=<<___
if ($win64);
1829 vmovdqa
%xmm6,-0xb0(%r10)
1830 vmovdqa
%xmm7,-0xa0(%r10)
1831 vmovdqa
%xmm8,-0x90(%r10)
1832 vmovdqa
%xmm9,-0x80(%r10)
1833 vmovdqa
%xmm10,-0x70(%r10)
1834 vmovdqa
%xmm11,-0x60(%r10)
1835 vmovdqa
%xmm12,-0x50(%r10)
1836 vmovdqa
%xmm13,-0x40(%r10)
1837 vmovdqa
%xmm14,-0x30(%r10)
1838 vmovdqa
%xmm15,-0x20(%r10)
1839 .Ldo_avx2_body
$suffix:
1842 lea
.Lconst
(%rip),%rcx
1843 lea
48+64($ctx),$ctx # size optimization
1844 vmovdqa
96(%rcx),$T0 # .Lpermd_avx2
1846 # expand and copy pre-calculated table to stack
1847 vmovdqu
`16*0-64`($ctx),%x#$T2
1849 vmovdqu
`16*1-64`($ctx),%x#$T3
1850 vmovdqu
`16*2-64`($ctx),%x#$T4
1851 vmovdqu
`16*3-64`($ctx),%x#$D0
1852 vmovdqu
`16*4-64`($ctx),%x#$D1
1853 vmovdqu
`16*5-64`($ctx),%x#$D2
1854 lea
0x90(%rsp),%rax # size optimization
1855 vmovdqu
`16*6-64`($ctx),%x#$D3
1856 vpermd
$T2,$T0,$T2 # 00003412 -> 14243444
1857 vmovdqu
`16*7-64`($ctx),%x#$D4
1859 vmovdqu
`16*8-64`($ctx),%x#$MASK
1861 vmovdqa
$T2,0x00(%rsp)
1863 vmovdqa
$T3,0x20-0x90(%rax)
1865 vmovdqa
$T4,0x40-0x90(%rax)
1867 vmovdqa
$D0,0x60-0x90(%rax)
1869 vmovdqa
$D1,0x80-0x90(%rax)
1871 vmovdqa
$D2,0xa0-0x90(%rax)
1872 vpermd
$MASK,$T0,$MASK
1873 vmovdqa
$D3,0xc0-0x90(%rax)
1874 vmovdqa
$D4,0xe0-0x90(%rax)
1875 vmovdqa
$MASK,0x100-0x90(%rax)
1876 vmovdqa
64(%rcx),$MASK # .Lmask26
1878 ################################################################
1880 vmovdqu
16*0($inp),%x#$T0
1881 vmovdqu
16*1($inp),%x#$T1
1882 vinserti128 \
$1,16*2($inp),$T0,$T0
1883 vinserti128 \
$1,16*3($inp),$T1,$T1
1886 vpsrldq \
$6,$T0,$T2 # splat input
1888 vpunpckhqdq
$T1,$T0,$T4 # 4
1889 vpunpcklqdq
$T3,$T2,$T2 # 2:3
1890 vpunpcklqdq
$T1,$T0,$T0 # 0:1
1895 vpsrlq \
$40,$T4,$T4 # 4
1896 vpand
$MASK,$T2,$T2 # 2
1897 vpand
$MASK,$T0,$T0 # 0
1898 vpand
$MASK,$T1,$T1 # 1
1899 vpand
$MASK,$T3,$T3 # 3
1900 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
1902 vpaddq
$H2,$T2,$H2 # accumulate input
1904 jz
.Ltail_avx2
$suffix
1905 jmp
.Loop_avx2
$suffix
1909 ################################################################
1910 # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1911 # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1912 # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1913 # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1914 # \________/\__________/
1915 ################################################################
1916 #vpaddq $H2,$T2,$H2 # accumulate input
1918 vmovdqa
`32*0`(%rsp),$T0 # r0^4
1920 vmovdqa
`32*1`(%rsp),$T1 # r1^4
1922 vmovdqa
`32*3`(%rsp),$T2 # r2^4
1924 vmovdqa
`32*6-0x90`(%rax),$T3 # s3^4
1925 vmovdqa
`32*8-0x90`(%rax),$S4 # s4^4
1927 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1928 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1929 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1930 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1931 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1933 # however, as h2 is "chronologically" first one available pull
1934 # corresponding operations up, so it's
1936 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1937 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1938 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1939 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1940 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1942 vpmuludq
$H2,$T0,$D2 # d2 = h2*r0
1943 vpmuludq
$H2,$T1,$D3 # d3 = h2*r1
1944 vpmuludq
$H2,$T2,$D4 # d4 = h2*r2
1945 vpmuludq
$H2,$T3,$D0 # d0 = h2*s3
1946 vpmuludq
$H2,$S4,$D1 # d1 = h2*s4
1948 vpmuludq
$H0,$T1,$T4 # h0*r1
1949 vpmuludq
$H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1950 vpaddq
$T4,$D1,$D1 # d1 += h0*r1
1951 vpaddq
$H2,$D2,$D2 # d2 += h1*r1
1952 vpmuludq
$H3,$T1,$T4 # h3*r1
1953 vpmuludq
`32*2`(%rsp),$H4,$H2 # h4*s1
1954 vpaddq
$T4,$D4,$D4 # d4 += h3*r1
1955 vpaddq
$H2,$D0,$D0 # d0 += h4*s1
1956 vmovdqa
`32*4-0x90`(%rax),$T1 # s2
1958 vpmuludq
$H0,$T0,$T4 # h0*r0
1959 vpmuludq
$H1,$T0,$H2 # h1*r0
1960 vpaddq
$T4,$D0,$D0 # d0 += h0*r0
1961 vpaddq
$H2,$D1,$D1 # d1 += h1*r0
1962 vpmuludq
$H3,$T0,$T4 # h3*r0
1963 vpmuludq
$H4,$T0,$H2 # h4*r0
1964 vmovdqu
16*0($inp),%x#$T0 # load input
1965 vpaddq
$T4,$D3,$D3 # d3 += h3*r0
1966 vpaddq
$H2,$D4,$D4 # d4 += h4*r0
1967 vinserti128 \
$1,16*2($inp),$T0,$T0
1969 vpmuludq
$H3,$T1,$T4 # h3*s2
1970 vpmuludq
$H4,$T1,$H2 # h4*s2
1971 vmovdqu
16*1($inp),%x#$T1
1972 vpaddq
$T4,$D0,$D0 # d0 += h3*s2
1973 vpaddq
$H2,$D1,$D1 # d1 += h4*s2
1974 vmovdqa
`32*5-0x90`(%rax),$H2 # r3
1975 vpmuludq
$H1,$T2,$T4 # h1*r2
1976 vpmuludq
$H0,$T2,$T2 # h0*r2
1977 vpaddq
$T4,$D3,$D3 # d3 += h1*r2
1978 vpaddq
$T2,$D2,$D2 # d2 += h0*r2
1979 vinserti128 \
$1,16*3($inp),$T1,$T1
1982 vpmuludq
$H1,$H2,$T4 # h1*r3
1983 vpmuludq
$H0,$H2,$H2 # h0*r3
1984 vpsrldq \
$6,$T0,$T2 # splat input
1985 vpaddq
$T4,$D4,$D4 # d4 += h1*r3
1986 vpaddq
$H2,$D3,$D3 # d3 += h0*r3
1987 vpmuludq
$H3,$T3,$T4 # h3*s3
1988 vpmuludq
$H4,$T3,$H2 # h4*s3
1990 vpaddq
$T4,$D1,$D1 # d1 += h3*s3
1991 vpaddq
$H2,$D2,$D2 # d2 += h4*s3
1992 vpunpckhqdq
$T1,$T0,$T4 # 4
1994 vpmuludq
$H3,$S4,$H3 # h3*s4
1995 vpmuludq
$H4,$S4,$H4 # h4*s4
1996 vpunpcklqdq
$T1,$T0,$T0 # 0:1
1997 vpaddq
$H3,$D2,$H2 # h2 = d2 + h3*r4
1998 vpaddq
$H4,$D3,$H3 # h3 = d3 + h4*r4
1999 vpunpcklqdq
$T3,$T2,$T3 # 2:3
2000 vpmuludq
`32*7-0x90`(%rax),$H0,$H4 # h0*r4
2001 vpmuludq
$H1,$S4,$H0 # h1*s4
2002 vmovdqa
64(%rcx),$MASK # .Lmask26
2003 vpaddq
$H4,$D4,$H4 # h4 = d4 + h0*r4
2004 vpaddq
$H0,$D0,$H0 # h0 = d0 + h1*s4
2006 ################################################################
2007 # lazy reduction (interleaved with tail of input splat)
2011 vpaddq
$D3,$H4,$H4 # h3 -> h4
2015 vpaddq
$D0,$D1,$H1 # h0 -> h1
2024 vpaddq
$D1,$H2,$H2 # h1 -> h2
2028 vpaddq
$D4,$H0,$H0 # h4 -> h0
2030 vpand
$MASK,$T2,$T2 # 2
2035 vpaddq
$D2,$H3,$H3 # h2 -> h3
2037 vpaddq
$T2,$H2,$H2 # modulo-scheduled
2042 vpaddq
$D0,$H1,$H1 # h0 -> h1
2044 vpsrlq \
$40,$T4,$T4 # 4
2048 vpaddq
$D3,$H4,$H4 # h3 -> h4
2050 vpand
$MASK,$T0,$T0 # 0
2051 vpand
$MASK,$T1,$T1 # 1
2052 vpand
$MASK,$T3,$T3 # 3
2053 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
2056 jnz
.Loop_avx2
$suffix
2060 ################################################################
2061 # while above multiplications were by r^4 in all lanes, in last
2062 # iteration we multiply least significant lane by r^4 and most
2063 # significant one by r, so copy of above except that references
2064 # to the precomputed table are displaced by 4...
2066 #vpaddq $H2,$T2,$H2 # accumulate input
2068 vmovdqu
`32*0+4`(%rsp),$T0 # r0^4
2070 vmovdqu
`32*1+4`(%rsp),$T1 # r1^4
2072 vmovdqu
`32*3+4`(%rsp),$T2 # r2^4
2074 vmovdqu
`32*6+4-0x90`(%rax),$T3 # s3^4
2075 vmovdqu
`32*8+4-0x90`(%rax),$S4 # s4^4
2077 vpmuludq
$H2,$T0,$D2 # d2 = h2*r0
2078 vpmuludq
$H2,$T1,$D3 # d3 = h2*r1
2079 vpmuludq
$H2,$T2,$D4 # d4 = h2*r2
2080 vpmuludq
$H2,$T3,$D0 # d0 = h2*s3
2081 vpmuludq
$H2,$S4,$D1 # d1 = h2*s4
2083 vpmuludq
$H0,$T1,$T4 # h0*r1
2084 vpmuludq
$H1,$T1,$H2 # h1*r1
2085 vpaddq
$T4,$D1,$D1 # d1 += h0*r1
2086 vpaddq
$H2,$D2,$D2 # d2 += h1*r1
2087 vpmuludq
$H3,$T1,$T4 # h3*r1
2088 vpmuludq
`32*2+4`(%rsp),$H4,$H2 # h4*s1
2089 vpaddq
$T4,$D4,$D4 # d4 += h3*r1
2090 vpaddq
$H2,$D0,$D0 # d0 += h4*s1
2092 vpmuludq
$H0,$T0,$T4 # h0*r0
2093 vpmuludq
$H1,$T0,$H2 # h1*r0
2094 vpaddq
$T4,$D0,$D0 # d0 += h0*r0
2095 vmovdqu
`32*4+4-0x90`(%rax),$T1 # s2
2096 vpaddq
$H2,$D1,$D1 # d1 += h1*r0
2097 vpmuludq
$H3,$T0,$T4 # h3*r0
2098 vpmuludq
$H4,$T0,$H2 # h4*r0
2099 vpaddq
$T4,$D3,$D3 # d3 += h3*r0
2100 vpaddq
$H2,$D4,$D4 # d4 += h4*r0
2102 vpmuludq
$H3,$T1,$T4 # h3*s2
2103 vpmuludq
$H4,$T1,$H2 # h4*s2
2104 vpaddq
$T4,$D0,$D0 # d0 += h3*s2
2105 vpaddq
$H2,$D1,$D1 # d1 += h4*s2
2106 vmovdqu
`32*5+4-0x90`(%rax),$H2 # r3
2107 vpmuludq
$H1,$T2,$T4 # h1*r2
2108 vpmuludq
$H0,$T2,$T2 # h0*r2
2109 vpaddq
$T4,$D3,$D3 # d3 += h1*r2
2110 vpaddq
$T2,$D2,$D2 # d2 += h0*r2
2112 vpmuludq
$H1,$H2,$T4 # h1*r3
2113 vpmuludq
$H0,$H2,$H2 # h0*r3
2114 vpaddq
$T4,$D4,$D4 # d4 += h1*r3
2115 vpaddq
$H2,$D3,$D3 # d3 += h0*r3
2116 vpmuludq
$H3,$T3,$T4 # h3*s3
2117 vpmuludq
$H4,$T3,$H2 # h4*s3
2118 vpaddq
$T4,$D1,$D1 # d1 += h3*s3
2119 vpaddq
$H2,$D2,$D2 # d2 += h4*s3
2121 vpmuludq
$H3,$S4,$H3 # h3*s4
2122 vpmuludq
$H4,$S4,$H4 # h4*s4
2123 vpaddq
$H3,$D2,$H2 # h2 = d2 + h3*r4
2124 vpaddq
$H4,$D3,$H3 # h3 = d3 + h4*r4
2125 vpmuludq
`32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
2126 vpmuludq
$H1,$S4,$H0 # h1*s4
2127 vmovdqa
64(%rcx),$MASK # .Lmask26
2128 vpaddq
$H4,$D4,$H4 # h4 = d4 + h0*r4
2129 vpaddq
$H0,$D0,$H0 # h0 = d0 + h1*s4
2131 ################################################################
2132 # horizontal addition
2145 vpermq \
$0x2,$H3,$T3
2146 vpermq \
$0x2,$H4,$T4
2147 vpermq \
$0x2,$H0,$T0
2148 vpermq \
$0x2,$D1,$T1
2149 vpermq \
$0x2,$H2,$T2
2156 ################################################################
2161 vpaddq
$D3,$H4,$H4 # h3 -> h4
2165 vpaddq
$D0,$D1,$H1 # h0 -> h1
2172 vpaddq
$D1,$H2,$H2 # h1 -> h2
2176 vpaddq
$D4,$H0,$H0 # h4 -> h0
2180 vpaddq
$D2,$H3,$H3 # h2 -> h3
2184 vpaddq
$D0,$H1,$H1 # h0 -> h1
2188 vpaddq
$D3,$H4,$H4 # h3 -> h4
2190 vmovd
%x#$H0,`4*0-48-64`($ctx)# save partially reduced
2191 vmovd
%x#$H1,`4*1-48-64`($ctx)
2192 vmovd
%x#$H2,`4*2-48-64`($ctx)
2193 vmovd
%x#$H3,`4*3-48-64`($ctx)
2194 vmovd
%x#$H4,`4*4-48-64`($ctx)
2196 $code.=<<___
if ($win64);
2197 vmovdqa
-0xb0(%r10),%xmm6
2198 vmovdqa
-0xa0(%r10),%xmm7
2199 vmovdqa
-0x90(%r10),%xmm8
2200 vmovdqa
-0x80(%r10),%xmm9
2201 vmovdqa
-0x70(%r10),%xmm10
2202 vmovdqa
-0x60(%r10),%xmm11
2203 vmovdqa
-0x50(%r10),%xmm12
2204 vmovdqa
-0x40(%r10),%xmm13
2205 vmovdqa
-0x30(%r10),%xmm14
2206 vmovdqa
-0x20(%r10),%xmm15
2208 .Ldo_avx2_epilogue
$suffix:
2210 $code.=<<___
if (!$win64);
2212 .cfi_def_cfa_register
%rsp
2219 if($avx > 2 && $avx512) {
2220 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
2221 my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
2222 my $PADBIT="%zmm30";
2224 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain
2225 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2226 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2227 map(s/%y/%z/,($MASK));
2235 $code.=<<___
if (!$win64);
2237 .cfi_def_cfa_register
%r10
2240 $code.=<<___
if ($win64);
2243 vmovdqa
%xmm6,-0xb0(%r10)
2244 vmovdqa
%xmm7,-0xa0(%r10)
2245 vmovdqa
%xmm8,-0x90(%r10)
2246 vmovdqa
%xmm9,-0x80(%r10)
2247 vmovdqa
%xmm10,-0x70(%r10)
2248 vmovdqa
%xmm11,-0x60(%r10)
2249 vmovdqa
%xmm12,-0x50(%r10)
2250 vmovdqa
%xmm13,-0x40(%r10)
2251 vmovdqa
%xmm14,-0x30(%r10)
2252 vmovdqa
%xmm15,-0x20(%r10)
2256 lea
.Lconst
(%rip),%rcx
2257 lea
48+64($ctx),$ctx # size optimization
2258 vmovdqa
96(%rcx),%y#$T2 # .Lpermd_avx2
2260 # expand pre-calculated table
2261 vmovdqu
`16*0-64`($ctx),%x#$D0 # will become expanded ${R0}
2263 vmovdqu
`16*1-64`($ctx),%x#$D1 # will become ... ${R1}
2265 vmovdqu
`16*2-64`($ctx),%x#$T0 # ... ${S1}
2266 vmovdqu
`16*3-64`($ctx),%x#$D2 # ... ${R2}
2267 vmovdqu
`16*4-64`($ctx),%x#$T1 # ... ${S2}
2268 vmovdqu
`16*5-64`($ctx),%x#$D3 # ... ${R3}
2269 vmovdqu
`16*6-64`($ctx),%x#$T3 # ... ${S3}
2270 vmovdqu
`16*7-64`($ctx),%x#$D4 # ... ${R4}
2271 vmovdqu
`16*8-64`($ctx),%x#$T4 # ... ${S4}
2272 vpermd
$D0,$T2,$R0 # 00003412 -> 14243444
2273 vpbroadcastq
64(%rcx),$MASK # .Lmask26
2277 vmovdqa64
$R0,0x00(%rsp){%k2} # save in case $len%128 != 0
2278 vpsrlq \
$32,$R0,$T0 # 14243444 -> 01020304
2280 vmovdqu64
$R1,0x00(%rsp,%rax){%k2}
2283 vmovdqa64
$S1,0x40(%rsp){%k2}
2286 vmovdqu64
$R2,0x40(%rsp,%rax){%k2}
2288 vmovdqa64
$S2,0x80(%rsp){%k2}
2289 vmovdqu64
$R3,0x80(%rsp,%rax){%k2}
2290 vmovdqa64
$S3,0xc0(%rsp){%k2}
2291 vmovdqu64
$R4,0xc0(%rsp,%rax){%k2}
2292 vmovdqa64
$S4,0x100(%rsp){%k2}
2294 ################################################################
2295 # calculate 5th through 8th powers of the key
2297 # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2298 # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2299 # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
2300 # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
2301 # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
2303 vpmuludq
$T0,$R0,$D0 # d0 = r0'*r0
2304 vpmuludq
$T0,$R1,$D1 # d1 = r0'*r1
2305 vpmuludq
$T0,$R2,$D2 # d2 = r0'*r2
2306 vpmuludq
$T0,$R3,$D3 # d3 = r0'*r3
2307 vpmuludq
$T0,$R4,$D4 # d4 = r0'*r4
2310 vpmuludq
$T1,$S4,$M0
2311 vpmuludq
$T1,$R0,$M1
2312 vpmuludq
$T1,$R1,$M2
2313 vpmuludq
$T1,$R2,$M3
2314 vpmuludq
$T1,$R3,$M4
2316 vpaddq
$M0,$D0,$D0 # d0 += r1'*5*r4
2317 vpaddq
$M1,$D1,$D1 # d1 += r1'*r0
2318 vpaddq
$M2,$D2,$D2 # d2 += r1'*r1
2319 vpaddq
$M3,$D3,$D3 # d3 += r1'*r2
2320 vpaddq
$M4,$D4,$D4 # d4 += r1'*r3
2322 vpmuludq
$T2,$S3,$M0
2323 vpmuludq
$T2,$S4,$M1
2324 vpmuludq
$T2,$R1,$M3
2325 vpmuludq
$T2,$R2,$M4
2326 vpmuludq
$T2,$R0,$M2
2328 vpaddq
$M0,$D0,$D0 # d0 += r2'*5*r3
2329 vpaddq
$M1,$D1,$D1 # d1 += r2'*5*r4
2330 vpaddq
$M3,$D3,$D3 # d3 += r2'*r1
2331 vpaddq
$M4,$D4,$D4 # d4 += r2'*r2
2332 vpaddq
$M2,$D2,$D2 # d2 += r2'*r0
2334 vpmuludq
$T3,$S2,$M0
2335 vpmuludq
$T3,$R0,$M3
2336 vpmuludq
$T3,$R1,$M4
2337 vpmuludq
$T3,$S3,$M1
2338 vpmuludq
$T3,$S4,$M2
2339 vpaddq
$M0,$D0,$D0 # d0 += r3'*5*r2
2340 vpaddq
$M3,$D3,$D3 # d3 += r3'*r0
2341 vpaddq
$M4,$D4,$D4 # d4 += r3'*r1
2342 vpaddq
$M1,$D1,$D1 # d1 += r3'*5*r3
2343 vpaddq
$M2,$D2,$D2 # d2 += r3'*5*r4
2345 vpmuludq
$T4,$S4,$M3
2346 vpmuludq
$T4,$R0,$M4
2347 vpmuludq
$T4,$S1,$M0
2348 vpmuludq
$T4,$S2,$M1
2349 vpmuludq
$T4,$S3,$M2
2350 vpaddq
$M3,$D3,$D3 # d3 += r2'*5*r4
2351 vpaddq
$M4,$D4,$D4 # d4 += r2'*r0
2352 vpaddq
$M0,$D0,$D0 # d0 += r2'*5*r1
2353 vpaddq
$M1,$D1,$D1 # d1 += r2'*5*r2
2354 vpaddq
$M2,$D2,$D2 # d2 += r2'*5*r3
2356 ################################################################
2358 vmovdqu64
16*0($inp),%z#$T3
2359 vmovdqu64
16*4($inp),%z#$T4
2362 ################################################################
2366 vpandq
$MASK,$D3,$D3
2367 vpaddq
$M3,$D4,$D4 # d3 -> d4
2370 vpandq
$MASK,$D0,$D0
2371 vpaddq
$M0,$D1,$D1 # d0 -> d1
2374 vpandq
$MASK,$D4,$D4
2377 vpandq
$MASK,$D1,$D1
2378 vpaddq
$M1,$D2,$D2 # d1 -> d2
2382 vpaddq
$M4,$D0,$D0 # d4 -> d0
2385 vpandq
$MASK,$D2,$D2
2386 vpaddq
$M2,$D3,$D3 # d2 -> d3
2389 vpandq
$MASK,$D0,$D0
2390 vpaddq
$M0,$D1,$D1 # d0 -> d1
2393 vpandq
$MASK,$D3,$D3
2394 vpaddq
$M3,$D4,$D4 # d3 -> d4
2396 ################################################################
2397 # at this point we have 14243444 in $R0-$S4 and 05060708 in
2400 vpunpcklqdq
$T4,$T3,$T0 # transpose input
2401 vpunpckhqdq
$T4,$T3,$T4
2403 # ... since input 64-bit lanes are ordered as 73625140, we could
2404 # "vperm" it to 76543210 (here and in each loop iteration), *or*
2405 # we could just flow along, hence the goal for $R0-$S4 is
2406 # 1858286838784888 ...
2408 vmovdqa32
128(%rcx),$M0 # .Lpermd_avx512:
2412 vpermd
$R0,$M0,$R0 # 14243444 -> 1---2---3---4---
2418 vpermd
$D0,$M0,${R0
}{%k1} # 05060708 -> 1858286838784888
2419 vpermd
$D1,$M0,${R1
}{%k1}
2420 vpermd
$D2,$M0,${R2
}{%k1}
2421 vpermd
$D3,$M0,${R3
}{%k1}
2422 vpermd
$D4,$M0,${R4
}{%k1}
2424 vpslld \
$2,$R1,$S1 # *5
2433 vpbroadcastq
32(%rcx),$PADBIT # .L129
2435 vpsrlq \
$52,$T0,$T2 # splat input
2440 vpsrlq \
$40,$T4,$T4 # 4
2441 vpandq
$MASK,$T2,$T2 # 2
2442 vpandq
$MASK,$T0,$T0 # 0
2443 #vpandq $MASK,$T1,$T1 # 1
2444 #vpandq $MASK,$T3,$T3 # 3
2445 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2447 vpaddq
$H2,$T2,$H2 # accumulate input
2454 ################################################################
2455 # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2456 # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2457 # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2458 # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2459 # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2460 # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2461 # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2462 # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2463 # \________/\___________/
2464 ################################################################
2465 #vpaddq $H2,$T2,$H2 # accumulate input
2467 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
2468 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
2469 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
2470 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
2471 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2473 # however, as h2 is "chronologically" first one available pull
2474 # corresponding operations up, so it's
2476 # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
2477 # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
2478 # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
2479 # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
2480 # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
2482 vpmuludq
$H2,$R1,$D3 # d3 = h2*r1
2484 vpmuludq
$H2,$R2,$D4 # d4 = h2*r2
2485 vpandq
$MASK,$T1,$T1 # 1
2486 vpmuludq
$H2,$S3,$D0 # d0 = h2*s3
2487 vpandq
$MASK,$T3,$T3 # 3
2488 vpmuludq
$H2,$S4,$D1 # d1 = h2*s4
2489 vporq
$PADBIT,$T4,$T4 # padbit, yes, always
2490 vpmuludq
$H2,$R0,$D2 # d2 = h2*r0
2491 vpaddq
$H1,$T1,$H1 # accumulate input
2495 vmovdqu64
16*0($inp),$T3 # load input
2496 vmovdqu64
16*4($inp),$T4
2498 vpmuludq
$H0,$R3,$M3
2499 vpmuludq
$H0,$R4,$M4
2500 vpmuludq
$H0,$R0,$M0
2501 vpmuludq
$H0,$R1,$M1
2502 vpaddq
$M3,$D3,$D3 # d3 += h0*r3
2503 vpaddq
$M4,$D4,$D4 # d4 += h0*r4
2504 vpaddq
$M0,$D0,$D0 # d0 += h0*r0
2505 vpaddq
$M1,$D1,$D1 # d1 += h0*r1
2507 vpmuludq
$H1,$R2,$M3
2508 vpmuludq
$H1,$R3,$M4
2509 vpmuludq
$H1,$S4,$M0
2510 vpmuludq
$H0,$R2,$M2
2511 vpaddq
$M3,$D3,$D3 # d3 += h1*r2
2512 vpaddq
$M4,$D4,$D4 # d4 += h1*r3
2513 vpaddq
$M0,$D0,$D0 # d0 += h1*s4
2514 vpaddq
$M2,$D2,$D2 # d2 += h0*r2
2516 vpunpcklqdq
$T4,$T3,$T0 # transpose input
2517 vpunpckhqdq
$T4,$T3,$T4
2519 vpmuludq
$H3,$R0,$M3
2520 vpmuludq
$H3,$R1,$M4
2521 vpmuludq
$H1,$R0,$M1
2522 vpmuludq
$H1,$R1,$M2
2523 vpaddq
$M3,$D3,$D3 # d3 += h3*r0
2524 vpaddq
$M4,$D4,$D4 # d4 += h3*r1
2525 vpaddq
$M1,$D1,$D1 # d1 += h1*r0
2526 vpaddq
$M2,$D2,$D2 # d2 += h1*r1
2528 vpmuludq
$H4,$S4,$M3
2529 vpmuludq
$H4,$R0,$M4
2530 vpmuludq
$H3,$S2,$M0
2531 vpmuludq
$H3,$S3,$M1
2532 vpaddq
$M3,$D3,$D3 # d3 += h4*s4
2533 vpmuludq
$H3,$S4,$M2
2534 vpaddq
$M4,$D4,$D4 # d4 += h4*r0
2535 vpaddq
$M0,$D0,$D0 # d0 += h3*s2
2536 vpaddq
$M1,$D1,$D1 # d1 += h3*s3
2537 vpaddq
$M2,$D2,$D2 # d2 += h3*s4
2539 vpmuludq
$H4,$S1,$M0
2540 vpmuludq
$H4,$S2,$M1
2541 vpmuludq
$H4,$S3,$M2
2542 vpaddq
$M0,$D0,$H0 # h0 = d0 + h4*s1
2543 vpaddq
$M1,$D1,$H1 # h1 = d2 + h4*s2
2544 vpaddq
$M2,$D2,$H2 # h2 = d3 + h4*s3
2546 ################################################################
2547 # lazy reduction (interleaved with input splat)
2549 vpsrlq \
$52,$T0,$T2 # splat input
2553 vpandq
$MASK,$D3,$D3
2554 vpaddq
$H3,$D4,$H4 # h3 -> h4
2559 vpandq
$MASK,$H0,$H0
2560 vpaddq
$D0,$H1,$H1 # h0 -> h1
2562 vpandq
$MASK,$T2,$T2 # 2
2565 vpandq
$MASK,$H4,$H4
2568 vpandq
$MASK,$H1,$H1
2569 vpaddq
$D1,$H2,$H2 # h1 -> h2
2573 vpaddq
$D4,$H0,$H0 # h4 -> h0
2575 vpaddq
$T2,$H2,$H2 # modulo-scheduled
2579 vpandq
$MASK,$H2,$H2
2580 vpaddq
$D2,$D3,$H3 # h2 -> h3
2585 vpandq
$MASK,$H0,$H0
2586 vpaddq
$D0,$H1,$H1 # h0 -> h1
2588 vpsrlq \
$40,$T4,$T4 # 4
2591 vpandq
$MASK,$H3,$H3
2592 vpaddq
$D3,$H4,$H4 # h3 -> h4
2594 vpandq
$MASK,$T0,$T0 # 0
2595 #vpandq $MASK,$T1,$T1 # 1
2596 #vpandq $MASK,$T3,$T3 # 3
2597 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2603 ################################################################
2604 # while above multiplications were by r^8 in all lanes, in last
2605 # iteration we multiply least significant lane by r^8 and most
2606 # significant one by r, that's why table gets shifted...
2608 vpsrlq \
$32,$R0,$R0 # 0105020603070408
2618 ################################################################
2619 # load either next or last 64 byte of input
2620 lea
($inp,$len),$inp
2622 #vpaddq $H2,$T2,$H2 # accumulate input
2625 vpmuludq
$H2,$R1,$D3 # d3 = h2*r1
2626 vpmuludq
$H2,$R2,$D4 # d4 = h2*r2
2627 vpmuludq
$H2,$S3,$D0 # d0 = h2*s3
2628 vpandq
$MASK,$T1,$T1 # 1
2629 vpmuludq
$H2,$S4,$D1 # d1 = h2*s4
2630 vpandq
$MASK,$T3,$T3 # 3
2631 vpmuludq
$H2,$R0,$D2 # d2 = h2*r0
2632 vporq
$PADBIT,$T4,$T4 # padbit, yes, always
2633 vpaddq
$H1,$T1,$H1 # accumulate input
2637 vmovdqu
16*0($inp),%x#$T0
2638 vpmuludq
$H0,$R3,$M3
2639 vpmuludq
$H0,$R4,$M4
2640 vpmuludq
$H0,$R0,$M0
2641 vpmuludq
$H0,$R1,$M1
2642 vpaddq
$M3,$D3,$D3 # d3 += h0*r3
2643 vpaddq
$M4,$D4,$D4 # d4 += h0*r4
2644 vpaddq
$M0,$D0,$D0 # d0 += h0*r0
2645 vpaddq
$M1,$D1,$D1 # d1 += h0*r1
2647 vmovdqu
16*1($inp),%x#$T1
2648 vpmuludq
$H1,$R2,$M3
2649 vpmuludq
$H1,$R3,$M4
2650 vpmuludq
$H1,$S4,$M0
2651 vpmuludq
$H0,$R2,$M2
2652 vpaddq
$M3,$D3,$D3 # d3 += h1*r2
2653 vpaddq
$M4,$D4,$D4 # d4 += h1*r3
2654 vpaddq
$M0,$D0,$D0 # d0 += h1*s4
2655 vpaddq
$M2,$D2,$D2 # d2 += h0*r2
2657 vinserti128 \
$1,16*2($inp),%y#$T0,%y#$T0
2658 vpmuludq
$H3,$R0,$M3
2659 vpmuludq
$H3,$R1,$M4
2660 vpmuludq
$H1,$R0,$M1
2661 vpmuludq
$H1,$R1,$M2
2662 vpaddq
$M3,$D3,$D3 # d3 += h3*r0
2663 vpaddq
$M4,$D4,$D4 # d4 += h3*r1
2664 vpaddq
$M1,$D1,$D1 # d1 += h1*r0
2665 vpaddq
$M2,$D2,$D2 # d2 += h1*r1
2667 vinserti128 \
$1,16*3($inp),%y#$T1,%y#$T1
2668 vpmuludq
$H4,$S4,$M3
2669 vpmuludq
$H4,$R0,$M4
2670 vpmuludq
$H3,$S2,$M0
2671 vpmuludq
$H3,$S3,$M1
2672 vpmuludq
$H3,$S4,$M2
2673 vpaddq
$M3,$D3,$H3 # h3 = d3 + h4*s4
2674 vpaddq
$M4,$D4,$D4 # d4 += h4*r0
2675 vpaddq
$M0,$D0,$D0 # d0 += h3*s2
2676 vpaddq
$M1,$D1,$D1 # d1 += h3*s3
2677 vpaddq
$M2,$D2,$D2 # d2 += h3*s4
2679 vpmuludq
$H4,$S1,$M0
2680 vpmuludq
$H4,$S2,$M1
2681 vpmuludq
$H4,$S3,$M2
2682 vpaddq
$M0,$D0,$H0 # h0 = d0 + h4*s1
2683 vpaddq
$M1,$D1,$H1 # h1 = d2 + h4*s2
2684 vpaddq
$M2,$D2,$H2 # h2 = d3 + h4*s3
2686 ################################################################
2687 # horizontal addition
2690 vpermq \
$0xb1,$H3,$D3
2691 vpermq \
$0xb1,$D4,$H4
2692 vpermq \
$0xb1,$H0,$D0
2693 vpermq \
$0xb1,$H1,$D1
2694 vpermq \
$0xb1,$H2,$D2
2702 vpermq \
$0x2,$H3,$D3
2703 vpermq \
$0x2,$H4,$D4
2704 vpermq \
$0x2,$H0,$D0
2705 vpermq \
$0x2,$H1,$D1
2706 vpermq \
$0x2,$H2,$D2
2713 vextracti64x4 \
$0x1,$H3,%y#$D3
2714 vextracti64x4 \
$0x1,$H4,%y#$D4
2715 vextracti64x4 \
$0x1,$H0,%y#$D0
2716 vextracti64x4 \
$0x1,$H1,%y#$D1
2717 vextracti64x4 \
$0x1,$H2,%y#$D2
2718 vpaddq
$D3,$H3,${H3
}{%k3}{z
} # keep single qword in case
2719 vpaddq
$D4,$H4,${H4
}{%k3}{z
} # it's passed to .Ltail_avx2
2720 vpaddq
$D0,$H0,${H0
}{%k3}{z
}
2721 vpaddq
$D1,$H1,${H1
}{%k3}{z
}
2722 vpaddq
$D2,$H2,${H2
}{%k3}{z
}
2724 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2725 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2727 ################################################################
2728 # lazy reduction (interleaved with input splat)
2732 vpsrldq \
$6,$T0,$T2 # splat input
2734 vpunpckhqdq
$T1,$T0,$T4 # 4
2735 vpaddq
$D3,$H4,$H4 # h3 -> h4
2739 vpunpcklqdq
$T3,$T2,$T2 # 2:3
2740 vpunpcklqdq
$T1,$T0,$T0 # 0:1
2741 vpaddq
$D0,$H1,$H1 # h0 -> h1
2750 vpaddq
$D1,$H2,$H2 # h1 -> h2
2755 vpsrlq \
$40,$T4,$T4 # 4
2756 vpaddq
$D4,$H0,$H0 # h4 -> h0
2760 vpand
$MASK,$T2,$T2 # 2
2761 vpand
$MASK,$T0,$T0 # 0
2762 vpaddq
$D2,$H3,$H3 # h2 -> h3
2766 vpaddq
$H2,$T2,$H2 # accumulate input for .Ltail_avx2
2767 vpand
$MASK,$T1,$T1 # 1
2768 vpaddq
$D0,$H1,$H1 # h0 -> h1
2772 vpand
$MASK,$T3,$T3 # 3
2773 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
2774 vpaddq
$D3,$H4,$H4 # h3 -> h4
2776 lea
0x90(%rsp),%rax # size optimization for .Ltail_avx2
2778 jnz
.Ltail_avx2
$suffix
2780 vpsubq
$T2,$H2,$H2 # undo input accumulation
2781 vmovd
%x#$H0,`4*0-48-64`($ctx)# save partially reduced
2782 vmovd
%x#$H1,`4*1-48-64`($ctx)
2783 vmovd
%x#$H2,`4*2-48-64`($ctx)
2784 vmovd
%x#$H3,`4*3-48-64`($ctx)
2785 vmovd
%x#$H4,`4*4-48-64`($ctx)
2788 $code.=<<___
if ($win64);
2789 movdqa
-0xb0(%r10),%xmm6
2790 movdqa
-0xa0(%r10),%xmm7
2791 movdqa
-0x90(%r10),%xmm8
2792 movdqa
-0x80(%r10),%xmm9
2793 movdqa
-0x70(%r10),%xmm10
2794 movdqa
-0x60(%r10),%xmm11
2795 movdqa
-0x50(%r10),%xmm12
2796 movdqa
-0x40(%r10),%xmm13
2797 movdqa
-0x30(%r10),%xmm14
2798 movdqa
-0x20(%r10),%xmm15
2800 .Ldo_avx512_epilogue
:
2802 $code.=<<___
if (!$win64);
2804 .cfi_def_cfa_register
%rsp
2815 &declare_function
("poly1305_blocks_avx2", 32, 4);
2816 poly1305_blocks_avxN
(0);
2817 &end_function
("poly1305_blocks_avx2");
2820 $code .= "#endif\n";
2823 #######################################################################
2825 # On entry we have input length divisible by 64. But since inner loop
2826 # processes 128 bytes per iteration, cases when length is not divisible
2827 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2828 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2829 # for this tail, we wouldn't have to even allocate stack frame...
2832 $code .= "#ifdef CONFIG_AS_AVX512\n";
2835 &declare_function
("poly1305_blocks_avx512", 32, 4);
2836 poly1305_blocks_avxN
(1);
2837 &end_function
("poly1305_blocks_avx512");
2840 $code .= "#endif\n";
2843 if (!$kernel && $avx>3) {
2844 ########################################################################
2845 # VPMADD52 version using 2^44 radix.
2847 # One can argue that base 2^52 would be more natural. Well, even though
2848 # some operations would be more natural, one has to recognize couple of
2849 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2850 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2851 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2852 # reference implementations], which means that more such operations
2853 # would have to be performed in inner loop, which in turn makes critical
2854 # path longer. In other words, even though base 2^44 reduction might
2855 # look less elegant, overall critical path is actually shorter...
2857 ########################################################################
2858 # Layout of opaque area is following.
2860 # unsigned __int64 h[3]; # current hash value base 2^44
2861 # unsigned __int64 s[2]; # key value*20 base 2^44
2862 # unsigned __int64 r[3]; # key value base 2^44
2863 # struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
2864 # # r^n positions reflect
2865 # # placement in register, not
2866 # # memory, R[3] is R[1]*20
2869 .type poly1305_init_base2_44
,\
@function,3
2871 poly1305_init_base2_44
:
2873 mov
%rax,0($ctx) # initialize hash value
2878 lea poly1305_blocks_vpmadd52
(%rip),%r10
2879 lea poly1305_emit_base2_44
(%rip),%r11
2881 mov \
$0x0ffffffc0fffffff,%rax
2882 mov \
$0x0ffffffc0ffffffc,%rcx
2884 mov \
$0x00000fffffffffff,%r8
2886 mov \
$0x00000fffffffffff,%r9
2889 mov
%r8,40($ctx) # r0
2892 mov
%rax,48($ctx) # r1
2893 lea
(%rax,%rax,4),%rax # *5
2894 mov
%rcx,56($ctx) # r2
2895 shl \
$2,%rax # magic <<2
2896 lea
(%rcx,%rcx,4),%rcx # *5
2897 shl \
$2,%rcx # magic <<2
2898 mov
%rax,24($ctx) # s1
2899 mov
%rcx,32($ctx) # s2
2900 movq \
$-1,64($ctx) # write impossible value
2902 $code.=<<___
if ($flavour !~ /elf32/);
2906 $code.=<<___
if ($flavour =~ /elf32/);
2913 .size poly1305_init_base2_44
,.-poly1305_init_base2_44
2916 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2917 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2918 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2921 .type poly1305_blocks_vpmadd52
,\
@function,4
2923 poly1305_blocks_vpmadd52
:
2925 jz
.Lno_data_vpmadd52
# too short
2928 mov
64($ctx),%r8 # peek on power of the key
2930 # if powers of the key are not calculated yet, process up to 3
2931 # blocks with this single-block subroutine, otherwise ensure that
2932 # length is divisible by 2 blocks and pass the rest down to next
2937 cmp \
$4,$len # is input long
2939 test
%r8,%r8 # is power value impossible?
2942 and $len,%rax # is input of favourable length?
2943 jz
.Lblocks_vpmadd52_4x
2949 lea
.L2_44_inp_permd
(%rip),%r10
2952 vmovq
$padbit,%x#$PAD
2953 vmovdqa64
0(%r10),$inp_permd # .L2_44_inp_permd
2954 vmovdqa64
32(%r10),$inp_shift # .L2_44_inp_shift
2955 vpermq \
$0xcf,$PAD,$PAD
2956 vmovdqa64
64(%r10),$reduc_mask # .L2_44_mask
2958 vmovdqu64
0($ctx),${Dlo
}{%k7}{z
} # load hash value
2959 vmovdqu64
40($ctx),${r2r1r0
}{%k7}{z
} # load keys
2960 vmovdqu64
32($ctx),${r1r0s2
}{%k7}{z
}
2961 vmovdqu64
24($ctx),${r0s2s1
}{%k7}{z
}
2963 vmovdqa64
96(%r10),$reduc_rght # .L2_44_shift_rgt
2964 vmovdqa64
128(%r10),$reduc_left # .L2_44_shift_lft
2970 vmovdqu32
0($inp),%x#$T0 # load input as ----3210
2973 vpermd
$T0,$inp_permd,$T0 # ----3210 -> --322110
2974 vpsrlvq
$inp_shift,$T0,$T0
2975 vpandq
$reduc_mask,$T0,$T0
2978 vpaddq
$T0,$Dlo,$Dlo # accumulate input
2980 vpermq \
$0,$Dlo,${H0
}{%k7}{z
} # smash hash value
2981 vpermq \
$0b01010101,$Dlo,${H1
}{%k7}{z
}
2982 vpermq \
$0b10101010,$Dlo,${H2
}{%k7}{z
}
2984 vpxord
$Dlo,$Dlo,$Dlo
2985 vpxord
$Dhi,$Dhi,$Dhi
2987 vpmadd52luq
$r2r1r0,$H0,$Dlo
2988 vpmadd52huq
$r2r1r0,$H0,$Dhi
2990 vpmadd52luq
$r1r0s2,$H1,$Dlo
2991 vpmadd52huq
$r1r0s2,$H1,$Dhi
2993 vpmadd52luq
$r0s2s1,$H2,$Dlo
2994 vpmadd52huq
$r0s2s1,$H2,$Dhi
2996 vpsrlvq
$reduc_rght,$Dlo,$T0 # 0 in topmost qword
2997 vpsllvq
$reduc_left,$Dhi,$Dhi # 0 in topmost qword
2998 vpandq
$reduc_mask,$Dlo,$Dlo
3000 vpaddq
$T0,$Dhi,$Dhi
3002 vpermq \
$0b10010011,$Dhi,$Dhi # 0 in lowest qword
3004 vpaddq
$Dhi,$Dlo,$Dlo # note topmost qword :-)
3006 vpsrlvq
$reduc_rght,$Dlo,$T0 # 0 in topmost word
3007 vpandq
$reduc_mask,$Dlo,$Dlo
3009 vpermq \
$0b10010011,$T0,$T0
3011 vpaddq
$T0,$Dlo,$Dlo
3013 vpermq \
$0b10010011,$Dlo,${T0
}{%k1}{z
}
3015 vpaddq
$T0,$Dlo,$Dlo
3018 vpaddq
$T0,$Dlo,$Dlo
3023 vmovdqu64
$Dlo,0($ctx){%k7} # store hash value
3026 jnz
.Lblocks_vpmadd52_4x
3030 .size poly1305_blocks_vpmadd52
,.-poly1305_blocks_vpmadd52
3034 ########################################################################
3035 # As implied by its name 4x subroutine processes 4 blocks in parallel
3036 # (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
3037 # and is handled in 256-bit %ymm registers.
3039 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3040 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3041 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3044 .type poly1305_blocks_vpmadd52_4x
,\
@function,4
3046 poly1305_blocks_vpmadd52_4x
:
3048 jz
.Lno_data_vpmadd52_4x
# too short
3051 mov
64($ctx),%r8 # peek on power of the key
3053 .Lblocks_vpmadd52_4x
:
3054 vpbroadcastq
$padbit,$PAD
3056 vmovdqa64
.Lx_mask44
(%rip),$mask44
3058 vmovdqa64
.Lx_mask42
(%rip),$mask42
3059 kmovw
%eax,%k1 # used in 2x path
3061 test
%r8,%r8 # is power value impossible?
3062 js
.Linit_vpmadd52
# if it is, then init R[4]
3064 vmovq
0($ctx),%x#$H0 # load current hash value
3065 vmovq
8($ctx),%x#$H1
3066 vmovq
16($ctx),%x#$H2
3068 test \
$3,$len # is length 4*n+2?
3069 jnz
.Lblocks_vpmadd52_2x_do
3071 .Lblocks_vpmadd52_4x_do
:
3072 vpbroadcastq
64($ctx),$R0 # load 4th power of the key
3073 vpbroadcastq
96($ctx),$R1
3074 vpbroadcastq
128($ctx),$R2
3075 vpbroadcastq
160($ctx),$S1
3077 .Lblocks_vpmadd52_4x_key_loaded
:
3078 vpsllq \
$2,$R2,$S2 # S2 = R2*5*4
3082 test \
$7,$len # is len 8*n?
3083 jz
.Lblocks_vpmadd52_8x
3085 vmovdqu64
16*0($inp),$T2 # load data
3086 vmovdqu64
16*2($inp),$T3
3089 vpunpcklqdq
$T3,$T2,$T1 # transpose data
3090 vpunpckhqdq
$T3,$T2,$T3
3092 # at this point 64-bit lanes are ordered as 3-1-2-0
3094 vpsrlq \
$24,$T3,$T2 # splat the data
3096 vpaddq
$T2,$H2,$H2 # accumulate input
3097 vpandq
$mask44,$T1,$T0
3101 vpandq
$mask44,$T1,$T1
3104 jz
.Ltail_vpmadd52_4x
3105 jmp
.Loop_vpmadd52_4x
3110 vmovq
24($ctx),%x#$S1 # load key
3111 vmovq
56($ctx),%x#$H2
3112 vmovq
32($ctx),%x#$S2
3113 vmovq
40($ctx),%x#$R0
3114 vmovq
48($ctx),%x#$R1
3122 .Lmul_init_vpmadd52
:
3123 vpxorq
$D0lo,$D0lo,$D0lo
3124 vpmadd52luq
$H2,$S1,$D0lo
3125 vpxorq
$D0hi,$D0hi,$D0hi
3126 vpmadd52huq
$H2,$S1,$D0hi
3127 vpxorq
$D1lo,$D1lo,$D1lo
3128 vpmadd52luq
$H2,$S2,$D1lo
3129 vpxorq
$D1hi,$D1hi,$D1hi
3130 vpmadd52huq
$H2,$S2,$D1hi
3131 vpxorq
$D2lo,$D2lo,$D2lo
3132 vpmadd52luq
$H2,$R0,$D2lo
3133 vpxorq
$D2hi,$D2hi,$D2hi
3134 vpmadd52huq
$H2,$R0,$D2hi
3136 vpmadd52luq
$H0,$R0,$D0lo
3137 vpmadd52huq
$H0,$R0,$D0hi
3138 vpmadd52luq
$H0,$R1,$D1lo
3139 vpmadd52huq
$H0,$R1,$D1hi
3140 vpmadd52luq
$H0,$R2,$D2lo
3141 vpmadd52huq
$H0,$R2,$D2hi
3143 vpmadd52luq
$H1,$S2,$D0lo
3144 vpmadd52huq
$H1,$S2,$D0hi
3145 vpmadd52luq
$H1,$R0,$D1lo
3146 vpmadd52huq
$H1,$R0,$D1hi
3147 vpmadd52luq
$H1,$R1,$D2lo
3148 vpmadd52huq
$H1,$R1,$D2hi
3150 ################################################################
3152 vpsrlq \
$44,$D0lo,$tmp
3153 vpsllq \
$8,$D0hi,$D0hi
3154 vpandq
$mask44,$D0lo,$H0
3155 vpaddq
$tmp,$D0hi,$D0hi
3157 vpaddq
$D0hi,$D1lo,$D1lo
3159 vpsrlq \
$44,$D1lo,$tmp
3160 vpsllq \
$8,$D1hi,$D1hi
3161 vpandq
$mask44,$D1lo,$H1
3162 vpaddq
$tmp,$D1hi,$D1hi
3164 vpaddq
$D1hi,$D2lo,$D2lo
3166 vpsrlq \
$42,$D2lo,$tmp
3167 vpsllq \
$10,$D2hi,$D2hi
3168 vpandq
$mask42,$D2lo,$H2
3169 vpaddq
$tmp,$D2hi,$D2hi
3171 vpaddq
$D2hi,$H0,$H0
3172 vpsllq \
$2,$D2hi,$D2hi
3174 vpaddq
$D2hi,$H0,$H0
3176 vpsrlq \
$44,$H0,$tmp # additional step
3177 vpandq
$mask44,$H0,$H0
3182 jz
.Ldone_init_vpmadd52
3184 vpunpcklqdq
$R1,$H1,$R1 # 1,2
3185 vpbroadcastq
%x#$H1,%x#$H1 # 2,2
3186 vpunpcklqdq
$R2,$H2,$R2
3187 vpbroadcastq
%x#$H2,%x#$H2
3188 vpunpcklqdq
$R0,$H0,$R0
3189 vpbroadcastq
%x#$H0,%x#$H0
3191 vpsllq \
$2,$R1,$S1 # S1 = R1*5*4
3192 vpsllq \
$2,$R2,$S2 # S2 = R2*5*4
3198 jmp
.Lmul_init_vpmadd52
3202 .Ldone_init_vpmadd52
:
3203 vinserti128 \
$1,%x#$R1,$H1,$R1 # 1,2,3,4
3204 vinserti128 \
$1,%x#$R2,$H2,$R2
3205 vinserti128 \
$1,%x#$R0,$H0,$R0
3207 vpermq \
$0b11011000,$R1,$R1 # 1,3,2,4
3208 vpermq \
$0b11011000,$R2,$R2
3209 vpermq \
$0b11011000,$R0,$R0
3211 vpsllq \
$2,$R1,$S1 # S1 = R1*5*4
3215 vmovq
0($ctx),%x#$H0 # load current hash value
3216 vmovq
8($ctx),%x#$H1
3217 vmovq
16($ctx),%x#$H2
3219 test \
$3,$len # is length 4*n+2?
3220 jnz
.Ldone_init_vpmadd52_2x
3222 vmovdqu64
$R0,64($ctx) # save key powers
3223 vpbroadcastq
%x#$R0,$R0 # broadcast 4th power
3224 vmovdqu64
$R1,96($ctx)
3225 vpbroadcastq
%x#$R1,$R1
3226 vmovdqu64
$R2,128($ctx)
3227 vpbroadcastq
%x#$R2,$R2
3228 vmovdqu64
$S1,160($ctx)
3229 vpbroadcastq
%x#$S1,$S1
3231 jmp
.Lblocks_vpmadd52_4x_key_loaded
3235 .Ldone_init_vpmadd52_2x
:
3236 vmovdqu64
$R0,64($ctx) # save key powers
3237 vpsrldq \
$8,$R0,$R0 # 0-1-0-2
3238 vmovdqu64
$R1,96($ctx)
3240 vmovdqu64
$R2,128($ctx)
3242 vmovdqu64
$S1,160($ctx)
3244 jmp
.Lblocks_vpmadd52_2x_key_loaded
3248 .Lblocks_vpmadd52_2x_do
:
3249 vmovdqu64
128+8($ctx),${R2
}{%k1}{z
}# load 2nd and 1st key powers
3250 vmovdqu64
160+8($ctx),${S1
}{%k1}{z
}
3251 vmovdqu64
64+8($ctx),${R0
}{%k1}{z
}
3252 vmovdqu64
96+8($ctx),${R1
}{%k1}{z
}
3254 .Lblocks_vpmadd52_2x_key_loaded
:
3255 vmovdqu64
16*0($inp),$T2 # load data
3259 vpunpcklqdq
$T3,$T2,$T1 # transpose data
3260 vpunpckhqdq
$T3,$T2,$T3
3262 # at this point 64-bit lanes are ordered as x-1-x-0
3264 vpsrlq \
$24,$T3,$T2 # splat the data
3266 vpaddq
$T2,$H2,$H2 # accumulate input
3267 vpandq
$mask44,$T1,$T0
3271 vpandq
$mask44,$T1,$T1
3273 jmp
.Ltail_vpmadd52_2x
3278 #vpaddq $T2,$H2,$H2 # accumulate input
3282 vpxorq
$D0lo,$D0lo,$D0lo
3283 vpmadd52luq
$H2,$S1,$D0lo
3284 vpxorq
$D0hi,$D0hi,$D0hi
3285 vpmadd52huq
$H2,$S1,$D0hi
3286 vpxorq
$D1lo,$D1lo,$D1lo
3287 vpmadd52luq
$H2,$S2,$D1lo
3288 vpxorq
$D1hi,$D1hi,$D1hi
3289 vpmadd52huq
$H2,$S2,$D1hi
3290 vpxorq
$D2lo,$D2lo,$D2lo
3291 vpmadd52luq
$H2,$R0,$D2lo
3292 vpxorq
$D2hi,$D2hi,$D2hi
3293 vpmadd52huq
$H2,$R0,$D2hi
3295 vmovdqu64
16*0($inp),$T2 # load data
3296 vmovdqu64
16*2($inp),$T3
3298 vpmadd52luq
$H0,$R0,$D0lo
3299 vpmadd52huq
$H0,$R0,$D0hi
3300 vpmadd52luq
$H0,$R1,$D1lo
3301 vpmadd52huq
$H0,$R1,$D1hi
3302 vpmadd52luq
$H0,$R2,$D2lo
3303 vpmadd52huq
$H0,$R2,$D2hi
3305 vpunpcklqdq
$T3,$T2,$T1 # transpose data
3306 vpunpckhqdq
$T3,$T2,$T3
3307 vpmadd52luq
$H1,$S2,$D0lo
3308 vpmadd52huq
$H1,$S2,$D0hi
3309 vpmadd52luq
$H1,$R0,$D1lo
3310 vpmadd52huq
$H1,$R0,$D1hi
3311 vpmadd52luq
$H1,$R1,$D2lo
3312 vpmadd52huq
$H1,$R1,$D2hi
3314 ################################################################
3315 # partial reduction (interleaved with data splat)
3316 vpsrlq \
$44,$D0lo,$tmp
3317 vpsllq \
$8,$D0hi,$D0hi
3318 vpandq
$mask44,$D0lo,$H0
3319 vpaddq
$tmp,$D0hi,$D0hi
3323 vpaddq
$D0hi,$D1lo,$D1lo
3325 vpsrlq \
$44,$D1lo,$tmp
3326 vpsllq \
$8,$D1hi,$D1hi
3327 vpandq
$mask44,$D1lo,$H1
3328 vpaddq
$tmp,$D1hi,$D1hi
3330 vpandq
$mask44,$T1,$T0
3333 vpaddq
$D1hi,$D2lo,$D2lo
3335 vpsrlq \
$42,$D2lo,$tmp
3336 vpsllq \
$10,$D2hi,$D2hi
3337 vpandq
$mask42,$D2lo,$H2
3338 vpaddq
$tmp,$D2hi,$D2hi
3340 vpaddq
$T2,$H2,$H2 # accumulate input
3341 vpaddq
$D2hi,$H0,$H0
3342 vpsllq \
$2,$D2hi,$D2hi
3344 vpaddq
$D2hi,$H0,$H0
3346 vpandq
$mask44,$T1,$T1
3348 vpsrlq \
$44,$H0,$tmp # additional step
3349 vpandq
$mask44,$H0,$H0
3353 sub \
$4,$len # len-=64
3354 jnz
.Loop_vpmadd52_4x
3357 vmovdqu64
128($ctx),$R2 # load all key powers
3358 vmovdqu64
160($ctx),$S1
3359 vmovdqu64
64($ctx),$R0
3360 vmovdqu64
96($ctx),$R1
3363 vpsllq \
$2,$R2,$S2 # S2 = R2*5*4
3367 #vpaddq $T2,$H2,$H2 # accumulate input
3371 vpxorq
$D0lo,$D0lo,$D0lo
3372 vpmadd52luq
$H2,$S1,$D0lo
3373 vpxorq
$D0hi,$D0hi,$D0hi
3374 vpmadd52huq
$H2,$S1,$D0hi
3375 vpxorq
$D1lo,$D1lo,$D1lo
3376 vpmadd52luq
$H2,$S2,$D1lo
3377 vpxorq
$D1hi,$D1hi,$D1hi
3378 vpmadd52huq
$H2,$S2,$D1hi
3379 vpxorq
$D2lo,$D2lo,$D2lo
3380 vpmadd52luq
$H2,$R0,$D2lo
3381 vpxorq
$D2hi,$D2hi,$D2hi
3382 vpmadd52huq
$H2,$R0,$D2hi
3384 vpmadd52luq
$H0,$R0,$D0lo
3385 vpmadd52huq
$H0,$R0,$D0hi
3386 vpmadd52luq
$H0,$R1,$D1lo
3387 vpmadd52huq
$H0,$R1,$D1hi
3388 vpmadd52luq
$H0,$R2,$D2lo
3389 vpmadd52huq
$H0,$R2,$D2hi
3391 vpmadd52luq
$H1,$S2,$D0lo
3392 vpmadd52huq
$H1,$S2,$D0hi
3393 vpmadd52luq
$H1,$R0,$D1lo
3394 vpmadd52huq
$H1,$R0,$D1hi
3395 vpmadd52luq
$H1,$R1,$D2lo
3396 vpmadd52huq
$H1,$R1,$D2hi
3398 ################################################################
3399 # horizontal addition
3403 vpsrldq \
$8,$D0lo,$T0
3404 vpsrldq \
$8,$D0hi,$H0
3405 vpsrldq \
$8,$D1lo,$T1
3406 vpsrldq \
$8,$D1hi,$H1
3407 vpaddq
$T0,$D0lo,$D0lo
3408 vpaddq
$H0,$D0hi,$D0hi
3409 vpsrldq \
$8,$D2lo,$T2
3410 vpsrldq \
$8,$D2hi,$H2
3411 vpaddq
$T1,$D1lo,$D1lo
3412 vpaddq
$H1,$D1hi,$D1hi
3413 vpermq \
$0x2,$D0lo,$T0
3414 vpermq \
$0x2,$D0hi,$H0
3415 vpaddq
$T2,$D2lo,$D2lo
3416 vpaddq
$H2,$D2hi,$D2hi
3418 vpermq \
$0x2,$D1lo,$T1
3419 vpermq \
$0x2,$D1hi,$H1
3420 vpaddq
$T0,$D0lo,${D0lo
}{%k1}{z
}
3421 vpaddq
$H0,$D0hi,${D0hi
}{%k1}{z
}
3422 vpermq \
$0x2,$D2lo,$T2
3423 vpermq \
$0x2,$D2hi,$H2
3424 vpaddq
$T1,$D1lo,${D1lo
}{%k1}{z
}
3425 vpaddq
$H1,$D1hi,${D1hi
}{%k1}{z
}
3426 vpaddq
$T2,$D2lo,${D2lo
}{%k1}{z
}
3427 vpaddq
$H2,$D2hi,${D2hi
}{%k1}{z
}
3429 ################################################################
3431 vpsrlq \
$44,$D0lo,$tmp
3432 vpsllq \
$8,$D0hi,$D0hi
3433 vpandq
$mask44,$D0lo,$H0
3434 vpaddq
$tmp,$D0hi,$D0hi
3436 vpaddq
$D0hi,$D1lo,$D1lo
3438 vpsrlq \
$44,$D1lo,$tmp
3439 vpsllq \
$8,$D1hi,$D1hi
3440 vpandq
$mask44,$D1lo,$H1
3441 vpaddq
$tmp,$D1hi,$D1hi
3443 vpaddq
$D1hi,$D2lo,$D2lo
3445 vpsrlq \
$42,$D2lo,$tmp
3446 vpsllq \
$10,$D2hi,$D2hi
3447 vpandq
$mask42,$D2lo,$H2
3448 vpaddq
$tmp,$D2hi,$D2hi
3450 vpaddq
$D2hi,$H0,$H0
3451 vpsllq \
$2,$D2hi,$D2hi
3453 vpaddq
$D2hi,$H0,$H0
3455 vpsrlq \
$44,$H0,$tmp # additional step
3456 vpandq
$mask44,$H0,$H0
3459 # at this point $len is
3460 # either 4*n+2 or 0...
3461 sub \
$2,$len # len-=32
3462 ja
.Lblocks_vpmadd52_4x_do
3464 vmovq
%x#$H0,0($ctx)
3465 vmovq
%x#$H1,8($ctx)
3466 vmovq
%x#$H2,16($ctx)
3469 .Lno_data_vpmadd52_4x
:
3471 .size poly1305_blocks_vpmadd52_4x
,.-poly1305_blocks_vpmadd52_4x
3475 ########################################################################
3476 # As implied by its name 8x subroutine processes 8 blocks in parallel...
3477 # This is intermediate version, as it's used only in cases when input
3478 # length is either 8*n, 8*n+1 or 8*n+2...
3480 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3481 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3482 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3483 my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10));
3486 .type poly1305_blocks_vpmadd52_8x
,\
@function,4
3488 poly1305_blocks_vpmadd52_8x
:
3490 jz
.Lno_data_vpmadd52_8x
# too short
3493 mov
64($ctx),%r8 # peek on power of the key
3495 vmovdqa64
.Lx_mask44
(%rip),$mask44
3496 vmovdqa64
.Lx_mask42
(%rip),$mask42
3498 test
%r8,%r8 # is power value impossible?
3499 js
.Linit_vpmadd52
# if it is, then init R[4]
3501 vmovq
0($ctx),%x#$H0 # load current hash value
3502 vmovq
8($ctx),%x#$H1
3503 vmovq
16($ctx),%x#$H2
3505 .Lblocks_vpmadd52_8x
:
3506 ################################################################
3507 # fist we calculate more key powers
3509 vmovdqu64
128($ctx),$R2 # load 1-3-2-4 powers
3510 vmovdqu64
160($ctx),$S1
3511 vmovdqu64
64($ctx),$R0
3512 vmovdqu64
96($ctx),$R1
3514 vpsllq \
$2,$R2,$S2 # S2 = R2*5*4
3518 vpbroadcastq
%x#$R2,$RR2 # broadcast 4th power
3519 vpbroadcastq
%x#$R0,$RR0
3520 vpbroadcastq
%x#$R1,$RR1
3522 vpxorq
$D0lo,$D0lo,$D0lo
3523 vpmadd52luq
$RR2,$S1,$D0lo
3524 vpxorq
$D0hi,$D0hi,$D0hi
3525 vpmadd52huq
$RR2,$S1,$D0hi
3526 vpxorq
$D1lo,$D1lo,$D1lo
3527 vpmadd52luq
$RR2,$S2,$D1lo
3528 vpxorq
$D1hi,$D1hi,$D1hi
3529 vpmadd52huq
$RR2,$S2,$D1hi
3530 vpxorq
$D2lo,$D2lo,$D2lo
3531 vpmadd52luq
$RR2,$R0,$D2lo
3532 vpxorq
$D2hi,$D2hi,$D2hi
3533 vpmadd52huq
$RR2,$R0,$D2hi
3535 vpmadd52luq
$RR0,$R0,$D0lo
3536 vpmadd52huq
$RR0,$R0,$D0hi
3537 vpmadd52luq
$RR0,$R1,$D1lo
3538 vpmadd52huq
$RR0,$R1,$D1hi
3539 vpmadd52luq
$RR0,$R2,$D2lo
3540 vpmadd52huq
$RR0,$R2,$D2hi
3542 vpmadd52luq
$RR1,$S2,$D0lo
3543 vpmadd52huq
$RR1,$S2,$D0hi
3544 vpmadd52luq
$RR1,$R0,$D1lo
3545 vpmadd52huq
$RR1,$R0,$D1hi
3546 vpmadd52luq
$RR1,$R1,$D2lo
3547 vpmadd52huq
$RR1,$R1,$D2hi
3549 ################################################################
3551 vpsrlq \
$44,$D0lo,$tmp
3552 vpsllq \
$8,$D0hi,$D0hi
3553 vpandq
$mask44,$D0lo,$RR0
3554 vpaddq
$tmp,$D0hi,$D0hi
3556 vpaddq
$D0hi,$D1lo,$D1lo
3558 vpsrlq \
$44,$D1lo,$tmp
3559 vpsllq \
$8,$D1hi,$D1hi
3560 vpandq
$mask44,$D1lo,$RR1
3561 vpaddq
$tmp,$D1hi,$D1hi
3563 vpaddq
$D1hi,$D2lo,$D2lo
3565 vpsrlq \
$42,$D2lo,$tmp
3566 vpsllq \
$10,$D2hi,$D2hi
3567 vpandq
$mask42,$D2lo,$RR2
3568 vpaddq
$tmp,$D2hi,$D2hi
3570 vpaddq
$D2hi,$RR0,$RR0
3571 vpsllq \
$2,$D2hi,$D2hi
3573 vpaddq
$D2hi,$RR0,$RR0
3575 vpsrlq \
$44,$RR0,$tmp # additional step
3576 vpandq
$mask44,$RR0,$RR0
3578 vpaddq
$tmp,$RR1,$RR1
3580 ################################################################
3581 # At this point Rx holds 1324 powers, RRx - 5768, and the goal
3582 # is 15263748, which reflects how data is loaded...
3584 vpunpcklqdq
$R2,$RR2,$T2 # 3748
3585 vpunpckhqdq
$R2,$RR2,$R2 # 1526
3586 vpunpcklqdq
$R0,$RR0,$T0
3587 vpunpckhqdq
$R0,$RR0,$R0
3588 vpunpcklqdq
$R1,$RR1,$T1
3589 vpunpckhqdq
$R1,$RR1,$R1
3591 ######## switch to %zmm
3592 map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3593 map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3594 map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3595 map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2);
3598 vshufi64x2 \
$0x44,$R2,$T2,$RR2 # 15263748
3599 vshufi64x2 \
$0x44,$R0,$T0,$RR0
3600 vshufi64x2 \
$0x44,$R1,$T1,$RR1
3602 vmovdqu64
16*0($inp),$T2 # load data
3603 vmovdqu64
16*4($inp),$T3
3606 vpsllq \
$2,$RR2,$SS2 # S2 = R2*5*4
3607 vpsllq \
$2,$RR1,$SS1 # S1 = R1*5*4
3608 vpaddq
$RR2,$SS2,$SS2
3609 vpaddq
$RR1,$SS1,$SS1
3610 vpsllq \
$2,$SS2,$SS2
3611 vpsllq \
$2,$SS1,$SS1
3613 vpbroadcastq
$padbit,$PAD
3614 vpbroadcastq
%x#$mask44,$mask44
3615 vpbroadcastq
%x#$mask42,$mask42
3617 vpbroadcastq
%x#$SS1,$S1 # broadcast 8th power
3618 vpbroadcastq
%x#$SS2,$S2
3619 vpbroadcastq
%x#$RR0,$R0
3620 vpbroadcastq
%x#$RR1,$R1
3621 vpbroadcastq
%x#$RR2,$R2
3623 vpunpcklqdq
$T3,$T2,$T1 # transpose data
3624 vpunpckhqdq
$T3,$T2,$T3
3626 # at this point 64-bit lanes are ordered as 73625140
3628 vpsrlq \
$24,$T3,$T2 # splat the data
3630 vpaddq
$T2,$H2,$H2 # accumulate input
3631 vpandq
$mask44,$T1,$T0
3635 vpandq
$mask44,$T1,$T1
3638 jz
.Ltail_vpmadd52_8x
3639 jmp
.Loop_vpmadd52_8x
3643 #vpaddq $T2,$H2,$H2 # accumulate input
3647 vpxorq
$D0lo,$D0lo,$D0lo
3648 vpmadd52luq
$H2,$S1,$D0lo
3649 vpxorq
$D0hi,$D0hi,$D0hi
3650 vpmadd52huq
$H2,$S1,$D0hi
3651 vpxorq
$D1lo,$D1lo,$D1lo
3652 vpmadd52luq
$H2,$S2,$D1lo
3653 vpxorq
$D1hi,$D1hi,$D1hi
3654 vpmadd52huq
$H2,$S2,$D1hi
3655 vpxorq
$D2lo,$D2lo,$D2lo
3656 vpmadd52luq
$H2,$R0,$D2lo
3657 vpxorq
$D2hi,$D2hi,$D2hi
3658 vpmadd52huq
$H2,$R0,$D2hi
3660 vmovdqu64
16*0($inp),$T2 # load data
3661 vmovdqu64
16*4($inp),$T3
3663 vpmadd52luq
$H0,$R0,$D0lo
3664 vpmadd52huq
$H0,$R0,$D0hi
3665 vpmadd52luq
$H0,$R1,$D1lo
3666 vpmadd52huq
$H0,$R1,$D1hi
3667 vpmadd52luq
$H0,$R2,$D2lo
3668 vpmadd52huq
$H0,$R2,$D2hi
3670 vpunpcklqdq
$T3,$T2,$T1 # transpose data
3671 vpunpckhqdq
$T3,$T2,$T3
3672 vpmadd52luq
$H1,$S2,$D0lo
3673 vpmadd52huq
$H1,$S2,$D0hi
3674 vpmadd52luq
$H1,$R0,$D1lo
3675 vpmadd52huq
$H1,$R0,$D1hi
3676 vpmadd52luq
$H1,$R1,$D2lo
3677 vpmadd52huq
$H1,$R1,$D2hi
3679 ################################################################
3680 # partial reduction (interleaved with data splat)
3681 vpsrlq \
$44,$D0lo,$tmp
3682 vpsllq \
$8,$D0hi,$D0hi
3683 vpandq
$mask44,$D0lo,$H0
3684 vpaddq
$tmp,$D0hi,$D0hi
3688 vpaddq
$D0hi,$D1lo,$D1lo
3690 vpsrlq \
$44,$D1lo,$tmp
3691 vpsllq \
$8,$D1hi,$D1hi
3692 vpandq
$mask44,$D1lo,$H1
3693 vpaddq
$tmp,$D1hi,$D1hi
3695 vpandq
$mask44,$T1,$T0
3698 vpaddq
$D1hi,$D2lo,$D2lo
3700 vpsrlq \
$42,$D2lo,$tmp
3701 vpsllq \
$10,$D2hi,$D2hi
3702 vpandq
$mask42,$D2lo,$H2
3703 vpaddq
$tmp,$D2hi,$D2hi
3705 vpaddq
$T2,$H2,$H2 # accumulate input
3706 vpaddq
$D2hi,$H0,$H0
3707 vpsllq \
$2,$D2hi,$D2hi
3709 vpaddq
$D2hi,$H0,$H0
3711 vpandq
$mask44,$T1,$T1
3713 vpsrlq \
$44,$H0,$tmp # additional step
3714 vpandq
$mask44,$H0,$H0
3718 sub \
$8,$len # len-=128
3719 jnz
.Loop_vpmadd52_8x
3722 #vpaddq $T2,$H2,$H2 # accumulate input
3726 vpxorq
$D0lo,$D0lo,$D0lo
3727 vpmadd52luq
$H2,$SS1,$D0lo
3728 vpxorq
$D0hi,$D0hi,$D0hi
3729 vpmadd52huq
$H2,$SS1,$D0hi
3730 vpxorq
$D1lo,$D1lo,$D1lo
3731 vpmadd52luq
$H2,$SS2,$D1lo
3732 vpxorq
$D1hi,$D1hi,$D1hi
3733 vpmadd52huq
$H2,$SS2,$D1hi
3734 vpxorq
$D2lo,$D2lo,$D2lo
3735 vpmadd52luq
$H2,$RR0,$D2lo
3736 vpxorq
$D2hi,$D2hi,$D2hi
3737 vpmadd52huq
$H2,$RR0,$D2hi
3739 vpmadd52luq
$H0,$RR0,$D0lo
3740 vpmadd52huq
$H0,$RR0,$D0hi
3741 vpmadd52luq
$H0,$RR1,$D1lo
3742 vpmadd52huq
$H0,$RR1,$D1hi
3743 vpmadd52luq
$H0,$RR2,$D2lo
3744 vpmadd52huq
$H0,$RR2,$D2hi
3746 vpmadd52luq
$H1,$SS2,$D0lo
3747 vpmadd52huq
$H1,$SS2,$D0hi
3748 vpmadd52luq
$H1,$RR0,$D1lo
3749 vpmadd52huq
$H1,$RR0,$D1hi
3750 vpmadd52luq
$H1,$RR1,$D2lo
3751 vpmadd52huq
$H1,$RR1,$D2hi
3753 ################################################################
3754 # horizontal addition
3758 vpsrldq \
$8,$D0lo,$T0
3759 vpsrldq \
$8,$D0hi,$H0
3760 vpsrldq \
$8,$D1lo,$T1
3761 vpsrldq \
$8,$D1hi,$H1
3762 vpaddq
$T0,$D0lo,$D0lo
3763 vpaddq
$H0,$D0hi,$D0hi
3764 vpsrldq \
$8,$D2lo,$T2
3765 vpsrldq \
$8,$D2hi,$H2
3766 vpaddq
$T1,$D1lo,$D1lo
3767 vpaddq
$H1,$D1hi,$D1hi
3768 vpermq \
$0x2,$D0lo,$T0
3769 vpermq \
$0x2,$D0hi,$H0
3770 vpaddq
$T2,$D2lo,$D2lo
3771 vpaddq
$H2,$D2hi,$D2hi
3773 vpermq \
$0x2,$D1lo,$T1
3774 vpermq \
$0x2,$D1hi,$H1
3775 vpaddq
$T0,$D0lo,$D0lo
3776 vpaddq
$H0,$D0hi,$D0hi
3777 vpermq \
$0x2,$D2lo,$T2
3778 vpermq \
$0x2,$D2hi,$H2
3779 vpaddq
$T1,$D1lo,$D1lo
3780 vpaddq
$H1,$D1hi,$D1hi
3781 vextracti64x4 \
$1,$D0lo,%y#$T0
3782 vextracti64x4 \
$1,$D0hi,%y#$H0
3783 vpaddq
$T2,$D2lo,$D2lo
3784 vpaddq
$H2,$D2hi,$D2hi
3786 vextracti64x4 \
$1,$D1lo,%y#$T1
3787 vextracti64x4 \
$1,$D1hi,%y#$H1
3788 vextracti64x4 \
$1,$D2lo,%y#$T2
3789 vextracti64x4 \
$1,$D2hi,%y#$H2
3791 ######## switch back to %ymm
3792 map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3793 map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3794 map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3797 vpaddq
$T0,$D0lo,${D0lo
}{%k1}{z
}
3798 vpaddq
$H0,$D0hi,${D0hi
}{%k1}{z
}
3799 vpaddq
$T1,$D1lo,${D1lo
}{%k1}{z
}
3800 vpaddq
$H1,$D1hi,${D1hi
}{%k1}{z
}
3801 vpaddq
$T2,$D2lo,${D2lo
}{%k1}{z
}
3802 vpaddq
$H2,$D2hi,${D2hi
}{%k1}{z
}
3804 ################################################################
3806 vpsrlq \
$44,$D0lo,$tmp
3807 vpsllq \
$8,$D0hi,$D0hi
3808 vpandq
$mask44,$D0lo,$H0
3809 vpaddq
$tmp,$D0hi,$D0hi
3811 vpaddq
$D0hi,$D1lo,$D1lo
3813 vpsrlq \
$44,$D1lo,$tmp
3814 vpsllq \
$8,$D1hi,$D1hi
3815 vpandq
$mask44,$D1lo,$H1
3816 vpaddq
$tmp,$D1hi,$D1hi
3818 vpaddq
$D1hi,$D2lo,$D2lo
3820 vpsrlq \
$42,$D2lo,$tmp
3821 vpsllq \
$10,$D2hi,$D2hi
3822 vpandq
$mask42,$D2lo,$H2
3823 vpaddq
$tmp,$D2hi,$D2hi
3825 vpaddq
$D2hi,$H0,$H0
3826 vpsllq \
$2,$D2hi,$D2hi
3828 vpaddq
$D2hi,$H0,$H0
3830 vpsrlq \
$44,$H0,$tmp # additional step
3831 vpandq
$mask44,$H0,$H0
3835 ################################################################
3837 vmovq
%x#$H0,0($ctx)
3838 vmovq
%x#$H1,8($ctx)
3839 vmovq
%x#$H2,16($ctx)
3842 .Lno_data_vpmadd52_8x
:
3844 .size poly1305_blocks_vpmadd52_8x
,.-poly1305_blocks_vpmadd52_8x
3848 .type poly1305_emit_base2_44
,\
@function,3
3850 poly1305_emit_base2_44
:
3851 mov
0($ctx),%r8 # load hash value
3867 add \
$5,%r8 # compare to modulus
3871 shr \
$2,%r10 # did 130-bit value overflow?
3875 add
0($nonce),%rax # accumulate nonce
3877 mov
%rax,0($mac) # write result
3881 .size poly1305_emit_base2_44
,.-poly1305_emit_base2_44
3887 { # chacha20-poly1305 helpers
3888 my ($out,$inp,$otp,$len)=$win64 ?
("%rcx","%rdx","%r8", "%r9") : # Win64 order
3889 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
3891 .globl xor128_encrypt_n_pad
3892 .type xor128_encrypt_n_pad
,\
@abi-omnipotent
3894 xor128_encrypt_n_pad
:
3897 mov
$len,%r10 # put len aside
3898 shr \
$4,$len # len / 16
3902 movdqu
($inp,$otp),%xmm0
3904 movdqu
%xmm0,($out,$otp)
3910 and \
$15,%r10 # len % 16
3936 .size xor128_encrypt_n_pad
,.-xor128_encrypt_n_pad
3938 .globl xor128_decrypt_n_pad
3939 .type xor128_decrypt_n_pad
,\
@abi-omnipotent
3941 xor128_decrypt_n_pad
:
3944 mov
$len,%r10 # put len aside
3945 shr \
$4,$len # len / 16
3949 movdqu
($inp,$otp),%xmm0
3952 movdqu
%xmm1,($out,$otp)
3959 and \
$15,%r10 # len % 16
3968 mov
($inp,$otp),%r11b
3987 .size xor128_decrypt_n_pad
,.-xor128_decrypt_n_pad
3991 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3992 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
4000 .extern __imp_RtlVirtualUnwind
4001 .type se_handler
,\
@abi-omnipotent
4015 mov
120($context),%rax # pull context->Rax
4016 mov
248($context),%rbx # pull context->Rip
4018 mov
8($disp),%rsi # disp->ImageBase
4019 mov
56($disp),%r11 # disp->HandlerData
4021 mov
0(%r11),%r10d # HandlerData[0]
4022 lea
(%rsi,%r10),%r10 # prologue label
4023 cmp %r10,%rbx # context->Rip<.Lprologue
4024 jb
.Lcommon_seh_tail
4026 mov
152($context),%rax # pull context->Rsp
4028 mov
4(%r11),%r10d # HandlerData[1]
4029 lea
(%rsi,%r10),%r10 # epilogue label
4030 cmp %r10,%rbx # context->Rip>=.Lepilogue
4031 jae
.Lcommon_seh_tail
4041 mov
%rbx,144($context) # restore context->Rbx
4042 mov
%rbp,160($context) # restore context->Rbp
4043 mov
%r12,216($context) # restore context->R12
4044 mov
%r13,224($context) # restore context->R13
4045 mov
%r14,232($context) # restore context->R14
4046 mov
%r15,240($context) # restore context->R14
4048 jmp
.Lcommon_seh_tail
4049 .size se_handler
,.-se_handler
4051 .type avx_handler
,\
@abi-omnipotent
4065 mov
120($context),%rax # pull context->Rax
4066 mov
248($context),%rbx # pull context->Rip
4068 mov
8($disp),%rsi # disp->ImageBase
4069 mov
56($disp),%r11 # disp->HandlerData
4071 mov
0(%r11),%r10d # HandlerData[0]
4072 lea
(%rsi,%r10),%r10 # prologue label
4073 cmp %r10,%rbx # context->Rip<prologue label
4074 jb
.Lcommon_seh_tail
4076 mov
152($context),%rax # pull context->Rsp
4078 mov
4(%r11),%r10d # HandlerData[1]
4079 lea
(%rsi,%r10),%r10 # epilogue label
4080 cmp %r10,%rbx # context->Rip>=epilogue label
4081 jae
.Lcommon_seh_tail
4083 mov
208($context),%rax # pull context->R11
4087 lea
512($context),%rdi # &context.Xmm6
4089 .long
0xa548f3fc # cld; rep movsq
4094 mov
%rax,152($context) # restore context->Rsp
4095 mov
%rsi,168($context) # restore context->Rsi
4096 mov
%rdi,176($context) # restore context->Rdi
4098 mov
40($disp),%rdi # disp->ContextRecord
4099 mov
$context,%rsi # context
4100 mov \
$154,%ecx # sizeof(CONTEXT)
4101 .long
0xa548f3fc # cld; rep movsq
4104 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
4105 mov
8(%rsi),%rdx # arg2, disp->ImageBase
4106 mov
0(%rsi),%r8 # arg3, disp->ControlPc
4107 mov
16(%rsi),%r9 # arg4, disp->FunctionEntry
4108 mov
40(%rsi),%r10 # disp->ContextRecord
4109 lea
56(%rsi),%r11 # &disp->HandlerData
4110 lea
24(%rsi),%r12 # &disp->EstablisherFrame
4111 mov
%r10,32(%rsp) # arg5
4112 mov
%r11,40(%rsp) # arg6
4113 mov
%r12,48(%rsp) # arg7
4114 mov
%rcx,56(%rsp) # arg8, (NULL)
4115 call
*__imp_RtlVirtualUnwind
(%rip)
4117 mov \
$1,%eax # ExceptionContinueSearch
4129 .size avx_handler
,.-avx_handler
4133 .rva
.LSEH_begin_poly1305_init_x86_64
4134 .rva
.LSEH_end_poly1305_init_x86_64
4135 .rva
.LSEH_info_poly1305_init_x86_64
4137 .rva
.LSEH_begin_poly1305_blocks_x86_64
4138 .rva
.LSEH_end_poly1305_blocks_x86_64
4139 .rva
.LSEH_info_poly1305_blocks_x86_64
4141 .rva
.LSEH_begin_poly1305_emit_x86_64
4142 .rva
.LSEH_end_poly1305_emit_x86_64
4143 .rva
.LSEH_info_poly1305_emit_x86_64
4145 $code.=<<___
if ($avx);
4146 .rva
.LSEH_begin_poly1305_blocks_avx
4148 .rva
.LSEH_info_poly1305_blocks_avx_1
4152 .rva
.LSEH_info_poly1305_blocks_avx_2
4155 .rva
.LSEH_end_poly1305_blocks_avx
4156 .rva
.LSEH_info_poly1305_blocks_avx_3
4158 .rva
.LSEH_begin_poly1305_emit_avx
4159 .rva
.LSEH_end_poly1305_emit_avx
4160 .rva
.LSEH_info_poly1305_emit_avx
4162 $code.=<<___
if ($avx>1);
4163 .rva
.LSEH_begin_poly1305_blocks_avx2
4164 .rva
.Lbase2_64_avx2
4165 .rva
.LSEH_info_poly1305_blocks_avx2_1
4167 .rva
.Lbase2_64_avx2
4169 .rva
.LSEH_info_poly1305_blocks_avx2_2
4172 .rva
.LSEH_end_poly1305_blocks_avx2
4173 .rva
.LSEH_info_poly1305_blocks_avx2_3
4175 $code.=<<___
if ($avx>2);
4176 .rva
.LSEH_begin_poly1305_blocks_avx512
4177 .rva
.LSEH_end_poly1305_blocks_avx512
4178 .rva
.LSEH_info_poly1305_blocks_avx512
4183 .LSEH_info_poly1305_init_x86_64
:
4186 .rva
.LSEH_begin_poly1305_init_x86_64
,.LSEH_begin_poly1305_init_x86_64
4188 .LSEH_info_poly1305_blocks_x86_64
:
4191 .rva
.Lblocks_body
,.Lblocks_epilogue
4193 .LSEH_info_poly1305_emit_x86_64
:
4196 .rva
.LSEH_begin_poly1305_emit_x86_64
,.LSEH_begin_poly1305_emit_x86_64
4198 $code.=<<___
if ($avx);
4199 .LSEH_info_poly1305_blocks_avx_1
:
4202 .rva
.Lblocks_avx_body
,.Lblocks_avx_epilogue
# HandlerData[]
4204 .LSEH_info_poly1305_blocks_avx_2
:
4207 .rva
.Lbase2_64_avx_body
,.Lbase2_64_avx_epilogue
# HandlerData[]
4209 .LSEH_info_poly1305_blocks_avx_3
:
4212 .rva
.Ldo_avx_body
,.Ldo_avx_epilogue
# HandlerData[]
4214 .LSEH_info_poly1305_emit_avx
:
4217 .rva
.LSEH_begin_poly1305_emit_avx
,.LSEH_begin_poly1305_emit_avx
4219 $code.=<<___
if ($avx>1);
4220 .LSEH_info_poly1305_blocks_avx2_1
:
4223 .rva
.Lblocks_avx2_body
,.Lblocks_avx2_epilogue
# HandlerData[]
4225 .LSEH_info_poly1305_blocks_avx2_2
:
4228 .rva
.Lbase2_64_avx2_body
,.Lbase2_64_avx2_epilogue
# HandlerData[]
4230 .LSEH_info_poly1305_blocks_avx2_3
:
4233 .rva
.Ldo_avx2_body
,.Ldo_avx2_epilogue
# HandlerData[]
4235 $code.=<<___
if ($avx>2);
4236 .LSEH_info_poly1305_blocks_avx512
:
4239 .rva
.Ldo_avx512_body
,.Ldo_avx512_epilogue
# HandlerData[]
4246 last if (!s/^#/\/\
// and !/^$/);
4251 foreach (split('\n',$code)) {
4252 s/\`([^\`]*)\`/eval($1)/ge;
4253 s/%r([a-z]+)#d/%e$1/g;
4254 s/%r([0-9]+)#d/%r$1d/g;
4255 s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
4258 s/(^\.type.*),[0-9]+$/\1/;
4259 s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/;