3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # sha1_block procedure for ARMv4.
14 # Size/performance trade-off
15 # ====================================================================
16 # impl size in bytes comp cycles[*] measured performance
17 # ====================================================================
19 # armv4-small 392/+29% 1958/+64% 2250/+96%
20 # armv4-compact 740/+89% 1552/+26% 1840/+22%
21 # armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
22 # full unroll ~5100/+260% ~1260/+4% ~1300/+5%
23 # ====================================================================
24 # thumb = same as 'small' but in Thumb instructions[**] and
25 # with recurring code in two private functions;
26 # small = detached Xload/update, loops are folded;
27 # compact = detached Xload/update, 5x unroll;
28 # large = interleaved Xload/update, 5x unroll;
29 # full unroll = interleaved Xload/update, full unroll, estimated[!];
31 # [*] Manually counted instructions in "grand" loop body. Measured
32 # performance is affected by prologue and epilogue overhead,
33 # i-cache availability, branch penalties, etc.
34 # [**] While each Thumb instruction is twice smaller, they are not as
35 # diverse as ARM ones: e.g., there are only two arithmetic
36 # instructions with 3 arguments, no [fixed] rotate, addressing
37 # modes are limited. As result it takes more instructions to do
38 # the same job in Thumb, therefore the code is never twice as
39 # small and always slower.
40 # [***] which is also ~35% better than compiler generated code. Dual-
41 # issue Cortex A8 core was measured to process input block in
46 # Rescheduling for dual-issue pipeline resulted in 13% improvement on
47 # Cortex A8 core and in absolute terms ~870 cycles per input block
48 # [or 13.6 cycles per byte].
52 # Profiler-assisted and platform-specific optimization resulted in 10%
53 # improvement on Cortex A8 core and 12.2 cycles per byte.
55 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
56 open STDOUT
,">$output";
75 my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
80 add
$e,$K,$e,ror
#2 @ E+=K_xx_xx
83 eor
$t2,$t2,$t3 @
1 cycle stall
84 eor
$t1,$c,$d @ F_xx_xx
86 add
$e,$e,$a,ror
#27 @ E+=ROR(A,27)
87 eor
$t0,$t0,$t2,ror
#31
91 add
$e,$e,$t0 @ E
+=X
[i
]
96 my ($a,$b,$c,$d,$e)=@_;
102 add
$e,$K,$e,ror
#2 @ E+=K_00_19
104 orr
$t0,$t0,$t1,lsl
#8
105 eor
$t1,$c,$d @ F_xx_xx
106 orr
$t0,$t0,$t2,lsl
#16
107 add
$e,$e,$a,ror
#27 @ E+=ROR(A,27)
108 orr
$t0,$t0,$t3,lsl
#24
110 ldr
$t0,[$inp],#4 @ handles unaligned
111 add
$e,$K,$e,ror
#2 @ E+=K_00_19
112 eor
$t1,$c,$d @ F_xx_xx
113 add
$e,$e,$a,ror
#27 @ E+=ROR(A,27)
115 rev
$t0,$t0 @ byte swap
119 add
$e,$e,$t0 @ E
+=X
[i
]
120 eor
$t1,$t1,$d,ror
#2 @ F_00_19(B,C,D)
122 add
$e,$e,$t1 @ E
+=F_00_19
(B
,C
,D
)
127 my ($a,$b,$c,$d,$e)=@_;
128 &Xupdate
(@_,"and $t1,$b,$t1,ror#2");
130 eor
$t1,$t1,$d,ror
#2 @ F_00_19(B,C,D)
131 add
$e,$e,$t1 @ E
+=F_00_19
(B
,C
,D
)
136 my ($a,$b,$c,$d,$e)=@_;
137 &Xupdate
(@_,"eor $t1,$b,$t1,ror#2");
139 add
$e,$e,$t1 @ E
+=F_20_39
(B
,C
,D
)
144 my ($a,$b,$c,$d,$e)=@_;
145 &Xupdate
(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
147 add
$e,$e,$t1 @ E
+=F_40_59
(B
,C
,D
)
153 #include "arm_arch.h"
157 .global sha1_block_data_order
158 .type sha1_block_data_order
,%function
161 sha1_block_data_order
:
162 stmdb sp
!,{r4
-r12
,lr
}
163 add
$len,$inp,$len,lsl
#6 @ $len to point at the end of $inp
164 ldmia
$ctx,{$a,$b,$c,$d,$e}
171 mov
$e,$e,ror
#30 @ [6]
174 for($i=0;$i<5;$i++) {
175 &BODY_00_15
(@V); unshift(@V,pop(@V));
179 bne
.L_00_15 @
[((11+4)*5+2)*3]
181 &BODY_00_15
(@V); unshift(@V,pop(@V));
182 &BODY_16_19
(@V); unshift(@V,pop(@V));
183 &BODY_16_19
(@V); unshift(@V,pop(@V));
184 &BODY_16_19
(@V); unshift(@V,pop(@V));
185 &BODY_16_19
(@V); unshift(@V,pop(@V));
188 ldr
$K,.LK_20_39 @
[+15+16*4]
190 cmn sp
,#0 @ [+3], clear carry to denote 20_39
193 for($i=0;$i<5;$i++) {
194 &BODY_20_39
(@V); unshift(@V,pop(@V));
197 teq
$Xi,sp @ preserve carry
198 bne
.L_20_39_or_60_79 @
[+((12+3)*5+2)*4]
199 bcs
.L_done @
[+((12+3)*5+2)*4], spare
300 bytes
202 sub sp
,sp
,#20*4 @ [+2]
205 for($i=0;$i<5;$i++) {
206 &BODY_40_59
(@V); unshift(@V,pop(@V));
210 bne
.L_40_59 @
[+((12+5)*5+2)*4]
214 cmp sp
,#0 @ set carry to denote 60_79
215 b
.L_20_39_or_60_79 @
[+4], spare
300 bytes
217 add sp
,sp
,#80*4 @ "deallocate" stack frame
218 ldmia
$ctx,{$K,$t0,$t1,$t2,$t3}
224 stmia
$ctx,{$a,$b,$c,$d,$e}
226 bne
.Lloop @
[+18], total
1307
229 ldmia sp
!,{r4
-r12
,pc
}
231 ldmia sp
!,{r4
-r12
,lr
}
233 moveq pc
,lr @ be binary compatible with V4
, yet
234 bx lr @ interoperable with Thumb ISA
:-)
237 .LK_00_19
: .word
0x5a827999
238 .LK_20_39
: .word
0x6ed9eba1
239 .LK_40_59
: .word
0x8f1bbcdc
240 .LK_60_79
: .word
0xca62c1d6
241 .size sha1_block_data_order
,.-sha1_block_data_order
242 .asciz
"SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
246 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
248 close STDOUT
; # enforce flush