7 .globl sha1_block_data_order
8 .type sha1_block_data_order,%function
11 sha1_block_data_order:
12 #if __ARM_MAX_ARCH__>=7
13 sub r3,pc,#8 @ sha1_block_data_order
14 ldr r12,.LOPENSSL_armcap
15 ldr r12,[r3,r12] @ OPENSSL_armcap_P
24 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
25 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
26 ldmia r0,{r3,r4,r5,r6,r7}
33 mov r7,r7,ror#30 @ [6]
39 add r7,r8,r7,ror#2 @ E+=K_00_19
42 eor r10,r5,r6 @ F_xx_xx
44 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
47 ldr r9,[r1],#4 @ handles unaligned
48 add r7,r8,r7,ror#2 @ E+=K_00_19
49 eor r10,r5,r6 @ F_xx_xx
50 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
56 add r7,r7,r9 @ E+=X[i]
57 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
59 add r7,r7,r10 @ E+=F_00_19(B,C,D)
64 add r6,r8,r6,ror#2 @ E+=K_00_19
67 eor r10,r4,r5 @ F_xx_xx
69 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
72 ldr r9,[r1],#4 @ handles unaligned
73 add r6,r8,r6,ror#2 @ E+=K_00_19
74 eor r10,r4,r5 @ F_xx_xx
75 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
81 add r6,r6,r9 @ E+=X[i]
82 eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
84 add r6,r6,r10 @ E+=F_00_19(B,C,D)
89 add r5,r8,r5,ror#2 @ E+=K_00_19
92 eor r10,r3,r4 @ F_xx_xx
94 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
97 ldr r9,[r1],#4 @ handles unaligned
98 add r5,r8,r5,ror#2 @ E+=K_00_19
99 eor r10,r3,r4 @ F_xx_xx
100 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
102 rev r9,r9 @ byte swap
106 add r5,r5,r9 @ E+=X[i]
107 eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
109 add r5,r5,r10 @ E+=F_00_19(B,C,D)
114 add r4,r8,r4,ror#2 @ E+=K_00_19
117 eor r10,r7,r3 @ F_xx_xx
119 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
122 ldr r9,[r1],#4 @ handles unaligned
123 add r4,r8,r4,ror#2 @ E+=K_00_19
124 eor r10,r7,r3 @ F_xx_xx
125 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
127 rev r9,r9 @ byte swap
131 add r4,r4,r9 @ E+=X[i]
132 eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
134 add r4,r4,r10 @ E+=F_00_19(B,C,D)
139 add r3,r8,r3,ror#2 @ E+=K_00_19
142 eor r10,r6,r7 @ F_xx_xx
144 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
147 ldr r9,[r1],#4 @ handles unaligned
148 add r3,r8,r3,ror#2 @ E+=K_00_19
149 eor r10,r6,r7 @ F_xx_xx
150 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
152 rev r9,r9 @ byte swap
156 add r3,r3,r9 @ E+=X[i]
157 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
159 add r3,r3,r10 @ E+=F_00_19(B,C,D)
161 bne .L_00_15 @ [((11+4)*5+2)*3]
167 add r7,r8,r7,ror#2 @ E+=K_00_19
170 eor r10,r5,r6 @ F_xx_xx
172 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
175 ldr r9,[r1],#4 @ handles unaligned
176 add r7,r8,r7,ror#2 @ E+=K_00_19
177 eor r10,r5,r6 @ F_xx_xx
178 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
180 rev r9,r9 @ byte swap
184 add r7,r7,r9 @ E+=X[i]
185 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
187 add r7,r7,r10 @ E+=F_00_19(B,C,D)
191 add r6,r8,r6,ror#2 @ E+=K_xx_xx
194 eor r11,r11,r12 @ 1 cycle stall
195 eor r10,r4,r5 @ F_xx_xx
197 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
200 and r10,r3,r10,ror#2 @ F_xx_xx
202 add r6,r6,r9 @ E+=X[i]
203 eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
204 add r6,r6,r10 @ E+=F_00_19(B,C,D)
208 add r5,r8,r5,ror#2 @ E+=K_xx_xx
211 eor r11,r11,r12 @ 1 cycle stall
212 eor r10,r3,r4 @ F_xx_xx
214 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
217 and r10,r7,r10,ror#2 @ F_xx_xx
219 add r5,r5,r9 @ E+=X[i]
220 eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
221 add r5,r5,r10 @ E+=F_00_19(B,C,D)
225 add r4,r8,r4,ror#2 @ E+=K_xx_xx
228 eor r11,r11,r12 @ 1 cycle stall
229 eor r10,r7,r3 @ F_xx_xx
231 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
234 and r10,r6,r10,ror#2 @ F_xx_xx
236 add r4,r4,r9 @ E+=X[i]
237 eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
238 add r4,r4,r10 @ E+=F_00_19(B,C,D)
242 add r3,r8,r3,ror#2 @ E+=K_xx_xx
245 eor r11,r11,r12 @ 1 cycle stall
246 eor r10,r6,r7 @ F_xx_xx
248 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
251 and r10,r5,r10,ror#2 @ F_xx_xx
253 add r3,r3,r9 @ E+=X[i]
254 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
255 add r3,r3,r10 @ E+=F_00_19(B,C,D)
257 ldr r8,.LK_20_39 @ [+15+16*4]
258 cmn sp,#0 @ [+3], clear carry to denote 20_39
263 add r7,r8,r7,ror#2 @ E+=K_xx_xx
266 eor r11,r11,r12 @ 1 cycle stall
267 eor r10,r5,r6 @ F_xx_xx
269 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
272 eor r10,r4,r10,ror#2 @ F_xx_xx
274 add r7,r7,r9 @ E+=X[i]
275 add r7,r7,r10 @ E+=F_20_39(B,C,D)
279 add r6,r8,r6,ror#2 @ E+=K_xx_xx
282 eor r11,r11,r12 @ 1 cycle stall
283 eor r10,r4,r5 @ F_xx_xx
285 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
288 eor r10,r3,r10,ror#2 @ F_xx_xx
290 add r6,r6,r9 @ E+=X[i]
291 add r6,r6,r10 @ E+=F_20_39(B,C,D)
295 add r5,r8,r5,ror#2 @ E+=K_xx_xx
298 eor r11,r11,r12 @ 1 cycle stall
299 eor r10,r3,r4 @ F_xx_xx
301 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
304 eor r10,r7,r10,ror#2 @ F_xx_xx
306 add r5,r5,r9 @ E+=X[i]
307 add r5,r5,r10 @ E+=F_20_39(B,C,D)
311 add r4,r8,r4,ror#2 @ E+=K_xx_xx
314 eor r11,r11,r12 @ 1 cycle stall
315 eor r10,r7,r3 @ F_xx_xx
317 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
320 eor r10,r6,r10,ror#2 @ F_xx_xx
322 add r4,r4,r9 @ E+=X[i]
323 add r4,r4,r10 @ E+=F_20_39(B,C,D)
327 add r3,r8,r3,ror#2 @ E+=K_xx_xx
330 eor r11,r11,r12 @ 1 cycle stall
331 eor r10,r6,r7 @ F_xx_xx
333 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
336 eor r10,r5,r10,ror#2 @ F_xx_xx
338 add r3,r3,r9 @ E+=X[i]
339 add r3,r3,r10 @ E+=F_20_39(B,C,D)
340 teq r14,sp @ preserve carry
341 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
342 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
345 sub sp,sp,#20*4 @ [+2]
350 add r7,r8,r7,ror#2 @ E+=K_xx_xx
353 eor r11,r11,r12 @ 1 cycle stall
354 eor r10,r5,r6 @ F_xx_xx
356 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
359 and r10,r4,r10,ror#2 @ F_xx_xx
360 and r11,r5,r6 @ F_xx_xx
361 add r7,r7,r9 @ E+=X[i]
362 add r7,r7,r10 @ E+=F_40_59(B,C,D)
367 add r6,r8,r6,ror#2 @ E+=K_xx_xx
370 eor r11,r11,r12 @ 1 cycle stall
371 eor r10,r4,r5 @ F_xx_xx
373 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
376 and r10,r3,r10,ror#2 @ F_xx_xx
377 and r11,r4,r5 @ F_xx_xx
378 add r6,r6,r9 @ E+=X[i]
379 add r6,r6,r10 @ E+=F_40_59(B,C,D)
384 add r5,r8,r5,ror#2 @ E+=K_xx_xx
387 eor r11,r11,r12 @ 1 cycle stall
388 eor r10,r3,r4 @ F_xx_xx
390 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
393 and r10,r7,r10,ror#2 @ F_xx_xx
394 and r11,r3,r4 @ F_xx_xx
395 add r5,r5,r9 @ E+=X[i]
396 add r5,r5,r10 @ E+=F_40_59(B,C,D)
401 add r4,r8,r4,ror#2 @ E+=K_xx_xx
404 eor r11,r11,r12 @ 1 cycle stall
405 eor r10,r7,r3 @ F_xx_xx
407 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
410 and r10,r6,r10,ror#2 @ F_xx_xx
411 and r11,r7,r3 @ F_xx_xx
412 add r4,r4,r9 @ E+=X[i]
413 add r4,r4,r10 @ E+=F_40_59(B,C,D)
418 add r3,r8,r3,ror#2 @ E+=K_xx_xx
421 eor r11,r11,r12 @ 1 cycle stall
422 eor r10,r6,r7 @ F_xx_xx
424 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
427 and r10,r5,r10,ror#2 @ F_xx_xx
428 and r11,r6,r7 @ F_xx_xx
429 add r3,r3,r9 @ E+=X[i]
430 add r3,r3,r10 @ E+=F_40_59(B,C,D)
433 bne .L_40_59 @ [+((12+5)*5+2)*4]
437 cmp sp,#0 @ set carry to denote 60_79
438 b .L_20_39_or_60_79 @ [+4], spare 300 bytes
440 add sp,sp,#80*4 @ "deallocate" stack frame
441 ldmia r0,{r8,r9,r10,r11,r12}
447 stmia r0,{r3,r4,r5,r6,r7}
449 bne .Lloop @ [+18], total 1307
452 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
454 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
456 moveq pc,lr @ be binary compatible with V4, yet
457 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
459 .size sha1_block_data_order,.-sha1_block_data_order
462 .LK_00_19:.word 0x5a827999
463 .LK_20_39:.word 0x6ed9eba1
464 .LK_40_59:.word 0x8f1bbcdc
465 .LK_60_79:.word 0xca62c1d6
466 #if __ARM_MAX_ARCH__>=7
468 .word OPENSSL_armcap_P-sha1_block_data_order
470 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
473 #if __ARM_MAX_ARCH__>=7
477 .type sha1_block_data_order_neon,%function
479 sha1_block_data_order_neon:
481 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
482 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
483 @ dmb @ errata #451034 on early Cortex A8
484 @ vstmdb sp!,{d8-d15} @ ABI specification says so
486 sub sp,sp,#64 @ alloca
488 bic sp,sp,#15 @ align for 128-bit stores
490 ldmia r0,{r3,r4,r5,r6,r7} @ load context
493 vld1.8 {q0,q1},[r1]! @ handles unaligned
496 vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
497 vrev32.8 q0,q0 @ yes, even on
498 vrev32.8 q1,q1 @ big-endian...
503 vst1.32 {q8},[r12,:128]!
505 vst1.32 {q9},[r12,:128]!
506 vst1.32 {q10},[r12,:128]!
507 ldr r9,[sp] @ big RAW stall
530 vst1.32 {q13},[r12,:128]!
534 vext.8 q13,q15,q12,#4
563 vld1.32 {d28[],d29[]},[r8,:32]!
578 vst1.32 {q13},[r12,:128]!
581 vext.8 q13,q15,q12,#4
624 vst1.32 {q13},[r12,:128]!
627 vext.8 q13,q15,q12,#4
657 vext.8 q12,q10,q15,#4
670 vst1.32 {q13},[r12,:128]!
673 vext.8 q13,q15,q12,#4
696 vext.8 q12,q10,q11,#8
715 vst1.32 {q13},[r12,:128]!
756 vst1.32 {q13},[r12,:128]!
785 vld1.32 {d28[],d29[]},[r8,:32]!
793 vst1.32 {q13},[r12,:128]!
829 vst1.32 {q13},[r12,:128]!
865 vst1.32 {q13},[r12,:128]!
902 vst1.32 {q13},[r12,:128]!
938 vst1.32 {q13},[r12,:128]!
975 vld1.32 {d28[],d29[]},[r8,:32]!
983 vst1.32 {q13},[r12,:128]!
1008 vext.8 q12,q10,q11,#8
1018 vadd.i32 q13,q11,q14
1027 vst1.32 {q13},[r12,:128]!
1053 vext.8 q12,q11,q0,#8
1072 vst1.32 {q13},[r12,:128]!
1116 vst1.32 {q13},[r12,:128]!
1160 vst1.32 {q13},[r12,:128]!
1180 vst1.32 {q13},[r12,:128]!
1185 vld1.8 {q0,q1},[r1]!
1188 vld1.8 {q2,q3},[r1]!
1191 vld1.32 {d28[],d29[]},[r8,:32]!
1221 vst1.32 {q8},[r12,:128]!
1252 vst1.32 {q9},[r12,:128]!
1283 vst1.32 {q10},[r12,:128]!
1307 ldmia r0,{r9,r10,r11,r12} @ accumulate context
1316 stmia r0,{r3,r4,r5,r6,r7}
1320 @ vldmia sp!,{d8-d15}
1321 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
1322 .size sha1_block_data_order_neon,.-sha1_block_data_order_neon
1324 #if __ARM_MAX_ARCH__>=7
1325 .type sha1_block_data_order_armv8,%function
1327 sha1_block_data_order_armv8:
1329 vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
1334 vld1.32 {d2[0]},[r0]
1336 vld1.32 {d16[],d17[]},[r3,:32]!
1337 vld1.32 {d18[],d19[]},[r3,:32]!
1338 vld1.32 {d20[],d21[]},[r3,:32]!
1339 vld1.32 {d22[],d23[]},[r3,:32]
1342 vld1.8 {q4,q5},[r1]!
1343 vld1.8 {q6,q7},[r1]!
1349 vmov q14,q0 @ offload
1354 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 0
1355 .byte 0x68,0x0c,0x02,0xf2 @ sha1c q0,q1,q12
1357 .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
1358 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 1
1359 .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
1361 .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
1362 .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
1363 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 2
1364 .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
1366 .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
1367 .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
1368 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 3
1369 .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
1371 .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
1372 .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
1373 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 4
1374 .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
1376 .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
1377 .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
1378 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 5
1379 .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
1381 .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
1382 .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
1383 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 6
1384 .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
1386 .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
1387 .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
1388 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 7
1389 .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
1391 .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
1392 .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
1393 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 8
1394 .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
1396 .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
1397 .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
1398 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 9
1399 .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
1401 .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
1402 .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
1403 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 10
1404 .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
1406 .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
1407 .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
1408 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 11
1409 .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
1411 .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
1412 .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
1413 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 12
1414 .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
1416 .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
1417 .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
1418 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 13
1419 .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
1421 .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
1422 .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
1423 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 14
1424 .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
1426 .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
1427 .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
1428 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 15
1429 .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
1431 .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
1432 .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
1433 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 16
1434 .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
1436 .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
1437 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 17
1438 .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
1441 .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 18
1442 .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
1444 .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 19
1445 .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
1452 vst1.32 {d2[0]},[r0]
1454 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
1456 .size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
1458 #if __ARM_MAX_ARCH__>=7
1459 .comm OPENSSL_armcap_P,4,4
1460 .hidden OPENSSL_armcap_P