Allow IPv6 address entry in tools>ping - Loosens valid character check
[tomato/davidwu.git] / release / src / router / openssl / crypto / sha / asm / sha1-armv4-large.pl
blobfe8207f77f8ccf0e54ea07482a02961e21cdcb9f
1 #!/usr/bin/env perl
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # sha1_block procedure for ARMv4.
12 # January 2007.
14 # Size/performance trade-off
15 # ====================================================================
16 # impl size in bytes comp cycles[*] measured performance
17 # ====================================================================
18 # thumb 304 3212 4420
19 # armv4-small 392/+29% 1958/+64% 2250/+96%
20 # armv4-compact 740/+89% 1552/+26% 1840/+22%
21 # armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
22 # full unroll ~5100/+260% ~1260/+4% ~1300/+5%
23 # ====================================================================
24 # thumb = same as 'small' but in Thumb instructions[**] and
25 # with recurring code in two private functions;
26 # small = detached Xload/update, loops are folded;
27 # compact = detached Xload/update, 5x unroll;
28 # large = interleaved Xload/update, 5x unroll;
29 # full unroll = interleaved Xload/update, full unroll, estimated[!];
31 # [*] Manually counted instructions in "grand" loop body. Measured
32 # performance is affected by prologue and epilogue overhead,
33 # i-cache availability, branch penalties, etc.
34 # [**] While each Thumb instruction is twice smaller, they are not as
35 # diverse as ARM ones: e.g., there are only two arithmetic
36 # instructions with 3 arguments, no [fixed] rotate, addressing
37 # modes are limited. As result it takes more instructions to do
38 # the same job in Thumb, therefore the code is never twice as
39 # small and always slower.
40 # [***] which is also ~35% better than compiler generated code. Dual-
41 # issue Cortex A8 core was measured to process input block in
42 # ~990 cycles.
44 # August 2010.
46 # Rescheduling for dual-issue pipeline resulted in 13% improvement on
47 # Cortex A8 core and in absolute terms ~870 cycles per input block
48 # [or 13.6 cycles per byte].
50 # February 2011.
52 # Profiler-assisted and platform-specific optimization resulted in 10%
53 # improvement on Cortex A8 core and 12.2 cycles per byte.
55 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
56 open STDOUT,">$output";
58 $ctx="r0";
59 $inp="r1";
60 $len="r2";
61 $a="r3";
62 $b="r4";
63 $c="r5";
64 $d="r6";
65 $e="r7";
66 $K="r8";
67 $t0="r9";
68 $t1="r10";
69 $t2="r11";
70 $t3="r12";
71 $Xi="r14";
72 @V=($a,$b,$c,$d,$e);
74 sub Xupdate {
75 my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
76 $code.=<<___;
77 ldr $t0,[$Xi,#15*4]
78 ldr $t1,[$Xi,#13*4]
79 ldr $t2,[$Xi,#7*4]
80 add $e,$K,$e,ror#2 @ E+=K_xx_xx
81 ldr $t3,[$Xi,#2*4]
82 eor $t0,$t0,$t1
83 eor $t2,$t2,$t3 @ 1 cycle stall
84 eor $t1,$c,$d @ F_xx_xx
85 mov $t0,$t0,ror#31
86 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
87 eor $t0,$t0,$t2,ror#31
88 str $t0,[$Xi,#-4]!
89 $opt1 @ F_xx_xx
90 $opt2 @ F_xx_xx
91 add $e,$e,$t0 @ E+=X[i]
92 ___
95 sub BODY_00_15 {
96 my ($a,$b,$c,$d,$e)=@_;
97 $code.=<<___;
98 #if __ARM_ARCH__<7
99 ldrb $t1,[$inp,#2]
100 ldrb $t0,[$inp,#3]
101 ldrb $t2,[$inp,#1]
102 add $e,$K,$e,ror#2 @ E+=K_00_19
103 ldrb $t3,[$inp],#4
104 orr $t0,$t0,$t1,lsl#8
105 eor $t1,$c,$d @ F_xx_xx
106 orr $t0,$t0,$t2,lsl#16
107 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
108 orr $t0,$t0,$t3,lsl#24
109 #else
110 ldr $t0,[$inp],#4 @ handles unaligned
111 add $e,$K,$e,ror#2 @ E+=K_00_19
112 eor $t1,$c,$d @ F_xx_xx
113 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
114 #ifdef __ARMEL__
115 rev $t0,$t0 @ byte swap
116 #endif
117 #endif
118 and $t1,$b,$t1,ror#2
119 add $e,$e,$t0 @ E+=X[i]
120 eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
121 str $t0,[$Xi,#-4]!
122 add $e,$e,$t1 @ E+=F_00_19(B,C,D)
126 sub BODY_16_19 {
127 my ($a,$b,$c,$d,$e)=@_;
128 &Xupdate(@_,"and $t1,$b,$t1,ror#2");
129 $code.=<<___;
130 eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
131 add $e,$e,$t1 @ E+=F_00_19(B,C,D)
135 sub BODY_20_39 {
136 my ($a,$b,$c,$d,$e)=@_;
137 &Xupdate(@_,"eor $t1,$b,$t1,ror#2");
138 $code.=<<___;
139 add $e,$e,$t1 @ E+=F_20_39(B,C,D)
143 sub BODY_40_59 {
144 my ($a,$b,$c,$d,$e)=@_;
145 &Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
146 $code.=<<___;
147 add $e,$e,$t1 @ E+=F_40_59(B,C,D)
148 add $e,$e,$t2,ror#2
152 $code=<<___;
153 #include "arm_arch.h"
155 .text
157 .global sha1_block_data_order
158 .type sha1_block_data_order,%function
160 .align 2
161 sha1_block_data_order:
162 stmdb sp!,{r4-r12,lr}
163 add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
164 ldmia $ctx,{$a,$b,$c,$d,$e}
165 .Lloop:
166 ldr $K,.LK_00_19
167 mov $Xi,sp
168 sub sp,sp,#15*4
169 mov $c,$c,ror#30
170 mov $d,$d,ror#30
171 mov $e,$e,ror#30 @ [6]
172 .L_00_15:
174 for($i=0;$i<5;$i++) {
175 &BODY_00_15(@V); unshift(@V,pop(@V));
177 $code.=<<___;
178 teq $Xi,sp
179 bne .L_00_15 @ [((11+4)*5+2)*3]
181 &BODY_00_15(@V); unshift(@V,pop(@V));
182 &BODY_16_19(@V); unshift(@V,pop(@V));
183 &BODY_16_19(@V); unshift(@V,pop(@V));
184 &BODY_16_19(@V); unshift(@V,pop(@V));
185 &BODY_16_19(@V); unshift(@V,pop(@V));
186 $code.=<<___;
188 ldr $K,.LK_20_39 @ [+15+16*4]
189 sub sp,sp,#25*4
190 cmn sp,#0 @ [+3], clear carry to denote 20_39
191 .L_20_39_or_60_79:
193 for($i=0;$i<5;$i++) {
194 &BODY_20_39(@V); unshift(@V,pop(@V));
196 $code.=<<___;
197 teq $Xi,sp @ preserve carry
198 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
199 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
201 ldr $K,.LK_40_59
202 sub sp,sp,#20*4 @ [+2]
203 .L_40_59:
205 for($i=0;$i<5;$i++) {
206 &BODY_40_59(@V); unshift(@V,pop(@V));
208 $code.=<<___;
209 teq $Xi,sp
210 bne .L_40_59 @ [+((12+5)*5+2)*4]
212 ldr $K,.LK_60_79
213 sub sp,sp,#20*4
214 cmp sp,#0 @ set carry to denote 60_79
215 b .L_20_39_or_60_79 @ [+4], spare 300 bytes
216 .L_done:
217 add sp,sp,#80*4 @ "deallocate" stack frame
218 ldmia $ctx,{$K,$t0,$t1,$t2,$t3}
219 add $a,$K,$a
220 add $b,$t0,$b
221 add $c,$t1,$c,ror#2
222 add $d,$t2,$d,ror#2
223 add $e,$t3,$e,ror#2
224 stmia $ctx,{$a,$b,$c,$d,$e}
225 teq $inp,$len
226 bne .Lloop @ [+18], total 1307
228 #if __ARM_ARCH__>=5
229 ldmia sp!,{r4-r12,pc}
230 #else
231 ldmia sp!,{r4-r12,lr}
232 tst lr,#1
233 moveq pc,lr @ be binary compatible with V4, yet
234 bx lr @ interoperable with Thumb ISA:-)
235 #endif
236 .align 2
237 .LK_00_19: .word 0x5a827999
238 .LK_20_39: .word 0x6ed9eba1
239 .LK_40_59: .word 0x8f1bbcdc
240 .LK_60_79: .word 0xca62c1d6
241 .size sha1_block_data_order,.-sha1_block_data_order
242 .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
243 .align 2
246 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
247 print $code;
248 close STDOUT; # enforce flush