Revert of Update the extension whitelist for application host change. (patchset ...
[chromium-blink-merge.git] / third_party / boringssl / linux-arm / crypto / sha / sha256-armv4.S
blob3c410107229159abc12f9214c0a5ee92db192d12
1 #include "arm_arch.h"
3 .text
4 .code   32
6 .type   K256,%object
7 .align  5
8 K256:
9 .word   0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
10 .word   0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
11 .word   0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
12 .word   0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
13 .word   0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
14 .word   0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
15 .word   0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
16 .word   0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
17 .word   0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
18 .word   0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
19 .word   0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
20 .word   0xd192e819,0xd6990624,0xf40e3585,0x106aa070
21 .word   0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
22 .word   0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
23 .word   0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
24 .word   0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
25 .size   K256,.-K256
26 .word   0                               @ terminator
27 #if __ARM_MAX_ARCH__>=7
28 .LOPENSSL_armcap:
29 .word   OPENSSL_armcap_P-sha256_block_data_order
30 #endif
31 .align  5
33 .global sha256_block_data_order
34 .type   sha256_block_data_order,%function
35 sha256_block_data_order:
36         sub     r3,pc,#8                @ sha256_block_data_order
37         add     r2,r1,r2,lsl#6  @ len to point at the end of inp
38 #if __ARM_MAX_ARCH__>=7
39         ldr     r12,.LOPENSSL_armcap
40         ldr     r12,[r3,r12]            @ OPENSSL_armcap_P
41         tst     r12,#ARMV8_SHA256
42         bne     .LARMv8
43         tst     r12,#ARMV7_NEON
44         bne     .LNEON
45 #endif
46         stmdb   sp!,{r0,r1,r2,r4-r11,lr}
47         ldmia   r0,{r4,r5,r6,r7,r8,r9,r10,r11}
48         sub     r14,r3,#256+32  @ K256
49         sub     sp,sp,#16*4             @ alloca(X[16])
50 .Loop:
51 # if __ARM_ARCH__>=7
52         ldr     r2,[r1],#4
53 # else
54         ldrb    r2,[r1,#3]
55 # endif
56         eor     r3,r5,r6                @ magic
57         eor     r12,r12,r12
58 #if __ARM_ARCH__>=7
59         @ ldr   r2,[r1],#4                      @ 0
60 # if 0==15
61         str     r1,[sp,#17*4]                   @ make room for r1
62 # endif
63         eor     r0,r8,r8,ror#5
64         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
65         eor     r0,r0,r8,ror#19 @ Sigma1(e)
66         rev     r2,r2
67 #else
68         @ ldrb  r2,[r1,#3]                      @ 0
69         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
70         ldrb    r12,[r1,#2]
71         ldrb    r0,[r1,#1]
72         orr     r2,r2,r12,lsl#8
73         ldrb    r12,[r1],#4
74         orr     r2,r2,r0,lsl#16
75 # if 0==15
76         str     r1,[sp,#17*4]                   @ make room for r1
77 # endif
78         eor     r0,r8,r8,ror#5
79         orr     r2,r2,r12,lsl#24
80         eor     r0,r0,r8,ror#19 @ Sigma1(e)
81 #endif
82         ldr     r12,[r14],#4                    @ *K256++
83         add     r11,r11,r2                      @ h+=X[i]
84         str     r2,[sp,#0*4]
85         eor     r2,r9,r10
86         add     r11,r11,r0,ror#6        @ h+=Sigma1(e)
87         and     r2,r2,r8
88         add     r11,r11,r12                     @ h+=K256[i]
89         eor     r2,r2,r10                       @ Ch(e,f,g)
90         eor     r0,r4,r4,ror#11
91         add     r11,r11,r2                      @ h+=Ch(e,f,g)
92 #if 0==31
93         and     r12,r12,#0xff
94         cmp     r12,#0xf2                       @ done?
95 #endif
96 #if 0<15
97 # if __ARM_ARCH__>=7
98         ldr     r2,[r1],#4                      @ prefetch
99 # else
100         ldrb    r2,[r1,#3]
101 # endif
102         eor     r12,r4,r5                       @ a^b, b^c in next round
103 #else
104         ldr     r2,[sp,#2*4]            @ from future BODY_16_xx
105         eor     r12,r4,r5                       @ a^b, b^c in next round
106         ldr     r1,[sp,#15*4]   @ from future BODY_16_xx
107 #endif
108         eor     r0,r0,r4,ror#20 @ Sigma0(a)
109         and     r3,r3,r12                       @ (b^c)&=(a^b)
110         add     r7,r7,r11                       @ d+=h
111         eor     r3,r3,r5                        @ Maj(a,b,c)
112         add     r11,r11,r0,ror#2        @ h+=Sigma0(a)
113         @ add   r11,r11,r3                      @ h+=Maj(a,b,c)
114 #if __ARM_ARCH__>=7
115         @ ldr   r2,[r1],#4                      @ 1
116 # if 1==15
117         str     r1,[sp,#17*4]                   @ make room for r1
118 # endif
119         eor     r0,r7,r7,ror#5
120         add     r11,r11,r3                      @ h+=Maj(a,b,c) from the past
121         eor     r0,r0,r7,ror#19 @ Sigma1(e)
122         rev     r2,r2
123 #else
124         @ ldrb  r2,[r1,#3]                      @ 1
125         add     r11,r11,r3                      @ h+=Maj(a,b,c) from the past
126         ldrb    r3,[r1,#2]
127         ldrb    r0,[r1,#1]
128         orr     r2,r2,r3,lsl#8
129         ldrb    r3,[r1],#4
130         orr     r2,r2,r0,lsl#16
131 # if 1==15
132         str     r1,[sp,#17*4]                   @ make room for r1
133 # endif
134         eor     r0,r7,r7,ror#5
135         orr     r2,r2,r3,lsl#24
136         eor     r0,r0,r7,ror#19 @ Sigma1(e)
137 #endif
138         ldr     r3,[r14],#4                     @ *K256++
139         add     r10,r10,r2                      @ h+=X[i]
140         str     r2,[sp,#1*4]
141         eor     r2,r8,r9
142         add     r10,r10,r0,ror#6        @ h+=Sigma1(e)
143         and     r2,r2,r7
144         add     r10,r10,r3                      @ h+=K256[i]
145         eor     r2,r2,r9                        @ Ch(e,f,g)
146         eor     r0,r11,r11,ror#11
147         add     r10,r10,r2                      @ h+=Ch(e,f,g)
148 #if 1==31
149         and     r3,r3,#0xff
150         cmp     r3,#0xf2                        @ done?
151 #endif
152 #if 1<15
153 # if __ARM_ARCH__>=7
154         ldr     r2,[r1],#4                      @ prefetch
155 # else
156         ldrb    r2,[r1,#3]
157 # endif
158         eor     r3,r11,r4                       @ a^b, b^c in next round
159 #else
160         ldr     r2,[sp,#3*4]            @ from future BODY_16_xx
161         eor     r3,r11,r4                       @ a^b, b^c in next round
162         ldr     r1,[sp,#0*4]    @ from future BODY_16_xx
163 #endif
164         eor     r0,r0,r11,ror#20        @ Sigma0(a)
165         and     r12,r12,r3                      @ (b^c)&=(a^b)
166         add     r6,r6,r10                       @ d+=h
167         eor     r12,r12,r4                      @ Maj(a,b,c)
168         add     r10,r10,r0,ror#2        @ h+=Sigma0(a)
169         @ add   r10,r10,r12                     @ h+=Maj(a,b,c)
170 #if __ARM_ARCH__>=7
171         @ ldr   r2,[r1],#4                      @ 2
172 # if 2==15
173         str     r1,[sp,#17*4]                   @ make room for r1
174 # endif
175         eor     r0,r6,r6,ror#5
176         add     r10,r10,r12                     @ h+=Maj(a,b,c) from the past
177         eor     r0,r0,r6,ror#19 @ Sigma1(e)
178         rev     r2,r2
179 #else
180         @ ldrb  r2,[r1,#3]                      @ 2
181         add     r10,r10,r12                     @ h+=Maj(a,b,c) from the past
182         ldrb    r12,[r1,#2]
183         ldrb    r0,[r1,#1]
184         orr     r2,r2,r12,lsl#8
185         ldrb    r12,[r1],#4
186         orr     r2,r2,r0,lsl#16
187 # if 2==15
188         str     r1,[sp,#17*4]                   @ make room for r1
189 # endif
190         eor     r0,r6,r6,ror#5
191         orr     r2,r2,r12,lsl#24
192         eor     r0,r0,r6,ror#19 @ Sigma1(e)
193 #endif
194         ldr     r12,[r14],#4                    @ *K256++
195         add     r9,r9,r2                        @ h+=X[i]
196         str     r2,[sp,#2*4]
197         eor     r2,r7,r8
198         add     r9,r9,r0,ror#6  @ h+=Sigma1(e)
199         and     r2,r2,r6
200         add     r9,r9,r12                       @ h+=K256[i]
201         eor     r2,r2,r8                        @ Ch(e,f,g)
202         eor     r0,r10,r10,ror#11
203         add     r9,r9,r2                        @ h+=Ch(e,f,g)
204 #if 2==31
205         and     r12,r12,#0xff
206         cmp     r12,#0xf2                       @ done?
207 #endif
208 #if 2<15
209 # if __ARM_ARCH__>=7
210         ldr     r2,[r1],#4                      @ prefetch
211 # else
212         ldrb    r2,[r1,#3]
213 # endif
214         eor     r12,r10,r11                     @ a^b, b^c in next round
215 #else
216         ldr     r2,[sp,#4*4]            @ from future BODY_16_xx
217         eor     r12,r10,r11                     @ a^b, b^c in next round
218         ldr     r1,[sp,#1*4]    @ from future BODY_16_xx
219 #endif
220         eor     r0,r0,r10,ror#20        @ Sigma0(a)
221         and     r3,r3,r12                       @ (b^c)&=(a^b)
222         add     r5,r5,r9                        @ d+=h
223         eor     r3,r3,r11                       @ Maj(a,b,c)
224         add     r9,r9,r0,ror#2  @ h+=Sigma0(a)
225         @ add   r9,r9,r3                        @ h+=Maj(a,b,c)
226 #if __ARM_ARCH__>=7
227         @ ldr   r2,[r1],#4                      @ 3
228 # if 3==15
229         str     r1,[sp,#17*4]                   @ make room for r1
230 # endif
231         eor     r0,r5,r5,ror#5
232         add     r9,r9,r3                        @ h+=Maj(a,b,c) from the past
233         eor     r0,r0,r5,ror#19 @ Sigma1(e)
234         rev     r2,r2
235 #else
236         @ ldrb  r2,[r1,#3]                      @ 3
237         add     r9,r9,r3                        @ h+=Maj(a,b,c) from the past
238         ldrb    r3,[r1,#2]
239         ldrb    r0,[r1,#1]
240         orr     r2,r2,r3,lsl#8
241         ldrb    r3,[r1],#4
242         orr     r2,r2,r0,lsl#16
243 # if 3==15
244         str     r1,[sp,#17*4]                   @ make room for r1
245 # endif
246         eor     r0,r5,r5,ror#5
247         orr     r2,r2,r3,lsl#24
248         eor     r0,r0,r5,ror#19 @ Sigma1(e)
249 #endif
250         ldr     r3,[r14],#4                     @ *K256++
251         add     r8,r8,r2                        @ h+=X[i]
252         str     r2,[sp,#3*4]
253         eor     r2,r6,r7
254         add     r8,r8,r0,ror#6  @ h+=Sigma1(e)
255         and     r2,r2,r5
256         add     r8,r8,r3                        @ h+=K256[i]
257         eor     r2,r2,r7                        @ Ch(e,f,g)
258         eor     r0,r9,r9,ror#11
259         add     r8,r8,r2                        @ h+=Ch(e,f,g)
260 #if 3==31
261         and     r3,r3,#0xff
262         cmp     r3,#0xf2                        @ done?
263 #endif
264 #if 3<15
265 # if __ARM_ARCH__>=7
266         ldr     r2,[r1],#4                      @ prefetch
267 # else
268         ldrb    r2,[r1,#3]
269 # endif
270         eor     r3,r9,r10                       @ a^b, b^c in next round
271 #else
272         ldr     r2,[sp,#5*4]            @ from future BODY_16_xx
273         eor     r3,r9,r10                       @ a^b, b^c in next round
274         ldr     r1,[sp,#2*4]    @ from future BODY_16_xx
275 #endif
276         eor     r0,r0,r9,ror#20 @ Sigma0(a)
277         and     r12,r12,r3                      @ (b^c)&=(a^b)
278         add     r4,r4,r8                        @ d+=h
279         eor     r12,r12,r10                     @ Maj(a,b,c)
280         add     r8,r8,r0,ror#2  @ h+=Sigma0(a)
281         @ add   r8,r8,r12                       @ h+=Maj(a,b,c)
282 #if __ARM_ARCH__>=7
283         @ ldr   r2,[r1],#4                      @ 4
284 # if 4==15
285         str     r1,[sp,#17*4]                   @ make room for r1
286 # endif
287         eor     r0,r4,r4,ror#5
288         add     r8,r8,r12                       @ h+=Maj(a,b,c) from the past
289         eor     r0,r0,r4,ror#19 @ Sigma1(e)
290         rev     r2,r2
291 #else
292         @ ldrb  r2,[r1,#3]                      @ 4
293         add     r8,r8,r12                       @ h+=Maj(a,b,c) from the past
294         ldrb    r12,[r1,#2]
295         ldrb    r0,[r1,#1]
296         orr     r2,r2,r12,lsl#8
297         ldrb    r12,[r1],#4
298         orr     r2,r2,r0,lsl#16
299 # if 4==15
300         str     r1,[sp,#17*4]                   @ make room for r1
301 # endif
302         eor     r0,r4,r4,ror#5
303         orr     r2,r2,r12,lsl#24
304         eor     r0,r0,r4,ror#19 @ Sigma1(e)
305 #endif
306         ldr     r12,[r14],#4                    @ *K256++
307         add     r7,r7,r2                        @ h+=X[i]
308         str     r2,[sp,#4*4]
309         eor     r2,r5,r6
310         add     r7,r7,r0,ror#6  @ h+=Sigma1(e)
311         and     r2,r2,r4
312         add     r7,r7,r12                       @ h+=K256[i]
313         eor     r2,r2,r6                        @ Ch(e,f,g)
314         eor     r0,r8,r8,ror#11
315         add     r7,r7,r2                        @ h+=Ch(e,f,g)
316 #if 4==31
317         and     r12,r12,#0xff
318         cmp     r12,#0xf2                       @ done?
319 #endif
320 #if 4<15
321 # if __ARM_ARCH__>=7
322         ldr     r2,[r1],#4                      @ prefetch
323 # else
324         ldrb    r2,[r1,#3]
325 # endif
326         eor     r12,r8,r9                       @ a^b, b^c in next round
327 #else
328         ldr     r2,[sp,#6*4]            @ from future BODY_16_xx
329         eor     r12,r8,r9                       @ a^b, b^c in next round
330         ldr     r1,[sp,#3*4]    @ from future BODY_16_xx
331 #endif
332         eor     r0,r0,r8,ror#20 @ Sigma0(a)
333         and     r3,r3,r12                       @ (b^c)&=(a^b)
334         add     r11,r11,r7                      @ d+=h
335         eor     r3,r3,r9                        @ Maj(a,b,c)
336         add     r7,r7,r0,ror#2  @ h+=Sigma0(a)
337         @ add   r7,r7,r3                        @ h+=Maj(a,b,c)
338 #if __ARM_ARCH__>=7
339         @ ldr   r2,[r1],#4                      @ 5
340 # if 5==15
341         str     r1,[sp,#17*4]                   @ make room for r1
342 # endif
343         eor     r0,r11,r11,ror#5
344         add     r7,r7,r3                        @ h+=Maj(a,b,c) from the past
345         eor     r0,r0,r11,ror#19        @ Sigma1(e)
346         rev     r2,r2
347 #else
348         @ ldrb  r2,[r1,#3]                      @ 5
349         add     r7,r7,r3                        @ h+=Maj(a,b,c) from the past
350         ldrb    r3,[r1,#2]
351         ldrb    r0,[r1,#1]
352         orr     r2,r2,r3,lsl#8
353         ldrb    r3,[r1],#4
354         orr     r2,r2,r0,lsl#16
355 # if 5==15
356         str     r1,[sp,#17*4]                   @ make room for r1
357 # endif
358         eor     r0,r11,r11,ror#5
359         orr     r2,r2,r3,lsl#24
360         eor     r0,r0,r11,ror#19        @ Sigma1(e)
361 #endif
362         ldr     r3,[r14],#4                     @ *K256++
363         add     r6,r6,r2                        @ h+=X[i]
364         str     r2,[sp,#5*4]
365         eor     r2,r4,r5
366         add     r6,r6,r0,ror#6  @ h+=Sigma1(e)
367         and     r2,r2,r11
368         add     r6,r6,r3                        @ h+=K256[i]
369         eor     r2,r2,r5                        @ Ch(e,f,g)
370         eor     r0,r7,r7,ror#11
371         add     r6,r6,r2                        @ h+=Ch(e,f,g)
372 #if 5==31
373         and     r3,r3,#0xff
374         cmp     r3,#0xf2                        @ done?
375 #endif
376 #if 5<15
377 # if __ARM_ARCH__>=7
378         ldr     r2,[r1],#4                      @ prefetch
379 # else
380         ldrb    r2,[r1,#3]
381 # endif
382         eor     r3,r7,r8                        @ a^b, b^c in next round
383 #else
384         ldr     r2,[sp,#7*4]            @ from future BODY_16_xx
385         eor     r3,r7,r8                        @ a^b, b^c in next round
386         ldr     r1,[sp,#4*4]    @ from future BODY_16_xx
387 #endif
388         eor     r0,r0,r7,ror#20 @ Sigma0(a)
389         and     r12,r12,r3                      @ (b^c)&=(a^b)
390         add     r10,r10,r6                      @ d+=h
391         eor     r12,r12,r8                      @ Maj(a,b,c)
392         add     r6,r6,r0,ror#2  @ h+=Sigma0(a)
393         @ add   r6,r6,r12                       @ h+=Maj(a,b,c)
394 #if __ARM_ARCH__>=7
395         @ ldr   r2,[r1],#4                      @ 6
396 # if 6==15
397         str     r1,[sp,#17*4]                   @ make room for r1
398 # endif
399         eor     r0,r10,r10,ror#5
400         add     r6,r6,r12                       @ h+=Maj(a,b,c) from the past
401         eor     r0,r0,r10,ror#19        @ Sigma1(e)
402         rev     r2,r2
403 #else
404         @ ldrb  r2,[r1,#3]                      @ 6
405         add     r6,r6,r12                       @ h+=Maj(a,b,c) from the past
406         ldrb    r12,[r1,#2]
407         ldrb    r0,[r1,#1]
408         orr     r2,r2,r12,lsl#8
409         ldrb    r12,[r1],#4
410         orr     r2,r2,r0,lsl#16
411 # if 6==15
412         str     r1,[sp,#17*4]                   @ make room for r1
413 # endif
414         eor     r0,r10,r10,ror#5
415         orr     r2,r2,r12,lsl#24
416         eor     r0,r0,r10,ror#19        @ Sigma1(e)
417 #endif
418         ldr     r12,[r14],#4                    @ *K256++
419         add     r5,r5,r2                        @ h+=X[i]
420         str     r2,[sp,#6*4]
421         eor     r2,r11,r4
422         add     r5,r5,r0,ror#6  @ h+=Sigma1(e)
423         and     r2,r2,r10
424         add     r5,r5,r12                       @ h+=K256[i]
425         eor     r2,r2,r4                        @ Ch(e,f,g)
426         eor     r0,r6,r6,ror#11
427         add     r5,r5,r2                        @ h+=Ch(e,f,g)
428 #if 6==31
429         and     r12,r12,#0xff
430         cmp     r12,#0xf2                       @ done?
431 #endif
432 #if 6<15
433 # if __ARM_ARCH__>=7
434         ldr     r2,[r1],#4                      @ prefetch
435 # else
436         ldrb    r2,[r1,#3]
437 # endif
438         eor     r12,r6,r7                       @ a^b, b^c in next round
439 #else
440         ldr     r2,[sp,#8*4]            @ from future BODY_16_xx
441         eor     r12,r6,r7                       @ a^b, b^c in next round
442         ldr     r1,[sp,#5*4]    @ from future BODY_16_xx
443 #endif
444         eor     r0,r0,r6,ror#20 @ Sigma0(a)
445         and     r3,r3,r12                       @ (b^c)&=(a^b)
446         add     r9,r9,r5                        @ d+=h
447         eor     r3,r3,r7                        @ Maj(a,b,c)
448         add     r5,r5,r0,ror#2  @ h+=Sigma0(a)
449         @ add   r5,r5,r3                        @ h+=Maj(a,b,c)
450 #if __ARM_ARCH__>=7
451         @ ldr   r2,[r1],#4                      @ 7
452 # if 7==15
453         str     r1,[sp,#17*4]                   @ make room for r1
454 # endif
455         eor     r0,r9,r9,ror#5
456         add     r5,r5,r3                        @ h+=Maj(a,b,c) from the past
457         eor     r0,r0,r9,ror#19 @ Sigma1(e)
458         rev     r2,r2
459 #else
460         @ ldrb  r2,[r1,#3]                      @ 7
461         add     r5,r5,r3                        @ h+=Maj(a,b,c) from the past
462         ldrb    r3,[r1,#2]
463         ldrb    r0,[r1,#1]
464         orr     r2,r2,r3,lsl#8
465         ldrb    r3,[r1],#4
466         orr     r2,r2,r0,lsl#16
467 # if 7==15
468         str     r1,[sp,#17*4]                   @ make room for r1
469 # endif
470         eor     r0,r9,r9,ror#5
471         orr     r2,r2,r3,lsl#24
472         eor     r0,r0,r9,ror#19 @ Sigma1(e)
473 #endif
474         ldr     r3,[r14],#4                     @ *K256++
475         add     r4,r4,r2                        @ h+=X[i]
476         str     r2,[sp,#7*4]
477         eor     r2,r10,r11
478         add     r4,r4,r0,ror#6  @ h+=Sigma1(e)
479         and     r2,r2,r9
480         add     r4,r4,r3                        @ h+=K256[i]
481         eor     r2,r2,r11                       @ Ch(e,f,g)
482         eor     r0,r5,r5,ror#11
483         add     r4,r4,r2                        @ h+=Ch(e,f,g)
484 #if 7==31
485         and     r3,r3,#0xff
486         cmp     r3,#0xf2                        @ done?
487 #endif
488 #if 7<15
489 # if __ARM_ARCH__>=7
490         ldr     r2,[r1],#4                      @ prefetch
491 # else
492         ldrb    r2,[r1,#3]
493 # endif
494         eor     r3,r5,r6                        @ a^b, b^c in next round
495 #else
496         ldr     r2,[sp,#9*4]            @ from future BODY_16_xx
497         eor     r3,r5,r6                        @ a^b, b^c in next round
498         ldr     r1,[sp,#6*4]    @ from future BODY_16_xx
499 #endif
500         eor     r0,r0,r5,ror#20 @ Sigma0(a)
501         and     r12,r12,r3                      @ (b^c)&=(a^b)
502         add     r8,r8,r4                        @ d+=h
503         eor     r12,r12,r6                      @ Maj(a,b,c)
504         add     r4,r4,r0,ror#2  @ h+=Sigma0(a)
505         @ add   r4,r4,r12                       @ h+=Maj(a,b,c)
506 #if __ARM_ARCH__>=7
507         @ ldr   r2,[r1],#4                      @ 8
508 # if 8==15
509         str     r1,[sp,#17*4]                   @ make room for r1
510 # endif
511         eor     r0,r8,r8,ror#5
512         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
513         eor     r0,r0,r8,ror#19 @ Sigma1(e)
514         rev     r2,r2
515 #else
516         @ ldrb  r2,[r1,#3]                      @ 8
517         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
518         ldrb    r12,[r1,#2]
519         ldrb    r0,[r1,#1]
520         orr     r2,r2,r12,lsl#8
521         ldrb    r12,[r1],#4
522         orr     r2,r2,r0,lsl#16
523 # if 8==15
524         str     r1,[sp,#17*4]                   @ make room for r1
525 # endif
526         eor     r0,r8,r8,ror#5
527         orr     r2,r2,r12,lsl#24
528         eor     r0,r0,r8,ror#19 @ Sigma1(e)
529 #endif
530         ldr     r12,[r14],#4                    @ *K256++
531         add     r11,r11,r2                      @ h+=X[i]
532         str     r2,[sp,#8*4]
533         eor     r2,r9,r10
534         add     r11,r11,r0,ror#6        @ h+=Sigma1(e)
535         and     r2,r2,r8
536         add     r11,r11,r12                     @ h+=K256[i]
537         eor     r2,r2,r10                       @ Ch(e,f,g)
538         eor     r0,r4,r4,ror#11
539         add     r11,r11,r2                      @ h+=Ch(e,f,g)
540 #if 8==31
541         and     r12,r12,#0xff
542         cmp     r12,#0xf2                       @ done?
543 #endif
544 #if 8<15
545 # if __ARM_ARCH__>=7
546         ldr     r2,[r1],#4                      @ prefetch
547 # else
548         ldrb    r2,[r1,#3]
549 # endif
550         eor     r12,r4,r5                       @ a^b, b^c in next round
551 #else
552         ldr     r2,[sp,#10*4]           @ from future BODY_16_xx
553         eor     r12,r4,r5                       @ a^b, b^c in next round
554         ldr     r1,[sp,#7*4]    @ from future BODY_16_xx
555 #endif
556         eor     r0,r0,r4,ror#20 @ Sigma0(a)
557         and     r3,r3,r12                       @ (b^c)&=(a^b)
558         add     r7,r7,r11                       @ d+=h
559         eor     r3,r3,r5                        @ Maj(a,b,c)
560         add     r11,r11,r0,ror#2        @ h+=Sigma0(a)
561         @ add   r11,r11,r3                      @ h+=Maj(a,b,c)
562 #if __ARM_ARCH__>=7
563         @ ldr   r2,[r1],#4                      @ 9
564 # if 9==15
565         str     r1,[sp,#17*4]                   @ make room for r1
566 # endif
567         eor     r0,r7,r7,ror#5
568         add     r11,r11,r3                      @ h+=Maj(a,b,c) from the past
569         eor     r0,r0,r7,ror#19 @ Sigma1(e)
570         rev     r2,r2
571 #else
572         @ ldrb  r2,[r1,#3]                      @ 9
573         add     r11,r11,r3                      @ h+=Maj(a,b,c) from the past
574         ldrb    r3,[r1,#2]
575         ldrb    r0,[r1,#1]
576         orr     r2,r2,r3,lsl#8
577         ldrb    r3,[r1],#4
578         orr     r2,r2,r0,lsl#16
579 # if 9==15
580         str     r1,[sp,#17*4]                   @ make room for r1
581 # endif
582         eor     r0,r7,r7,ror#5
583         orr     r2,r2,r3,lsl#24
584         eor     r0,r0,r7,ror#19 @ Sigma1(e)
585 #endif
586         ldr     r3,[r14],#4                     @ *K256++
587         add     r10,r10,r2                      @ h+=X[i]
588         str     r2,[sp,#9*4]
589         eor     r2,r8,r9
590         add     r10,r10,r0,ror#6        @ h+=Sigma1(e)
591         and     r2,r2,r7
592         add     r10,r10,r3                      @ h+=K256[i]
593         eor     r2,r2,r9                        @ Ch(e,f,g)
594         eor     r0,r11,r11,ror#11
595         add     r10,r10,r2                      @ h+=Ch(e,f,g)
596 #if 9==31
597         and     r3,r3,#0xff
598         cmp     r3,#0xf2                        @ done?
599 #endif
600 #if 9<15
601 # if __ARM_ARCH__>=7
602         ldr     r2,[r1],#4                      @ prefetch
603 # else
604         ldrb    r2,[r1,#3]
605 # endif
606         eor     r3,r11,r4                       @ a^b, b^c in next round
607 #else
608         ldr     r2,[sp,#11*4]           @ from future BODY_16_xx
609         eor     r3,r11,r4                       @ a^b, b^c in next round
610         ldr     r1,[sp,#8*4]    @ from future BODY_16_xx
611 #endif
612         eor     r0,r0,r11,ror#20        @ Sigma0(a)
613         and     r12,r12,r3                      @ (b^c)&=(a^b)
614         add     r6,r6,r10                       @ d+=h
615         eor     r12,r12,r4                      @ Maj(a,b,c)
616         add     r10,r10,r0,ror#2        @ h+=Sigma0(a)
617         @ add   r10,r10,r12                     @ h+=Maj(a,b,c)
618 #if __ARM_ARCH__>=7
619         @ ldr   r2,[r1],#4                      @ 10
620 # if 10==15
621         str     r1,[sp,#17*4]                   @ make room for r1
622 # endif
623         eor     r0,r6,r6,ror#5
624         add     r10,r10,r12                     @ h+=Maj(a,b,c) from the past
625         eor     r0,r0,r6,ror#19 @ Sigma1(e)
626         rev     r2,r2
627 #else
628         @ ldrb  r2,[r1,#3]                      @ 10
629         add     r10,r10,r12                     @ h+=Maj(a,b,c) from the past
630         ldrb    r12,[r1,#2]
631         ldrb    r0,[r1,#1]
632         orr     r2,r2,r12,lsl#8
633         ldrb    r12,[r1],#4
634         orr     r2,r2,r0,lsl#16
635 # if 10==15
636         str     r1,[sp,#17*4]                   @ make room for r1
637 # endif
638         eor     r0,r6,r6,ror#5
639         orr     r2,r2,r12,lsl#24
640         eor     r0,r0,r6,ror#19 @ Sigma1(e)
641 #endif
642         ldr     r12,[r14],#4                    @ *K256++
643         add     r9,r9,r2                        @ h+=X[i]
644         str     r2,[sp,#10*4]
645         eor     r2,r7,r8
646         add     r9,r9,r0,ror#6  @ h+=Sigma1(e)
647         and     r2,r2,r6
648         add     r9,r9,r12                       @ h+=K256[i]
649         eor     r2,r2,r8                        @ Ch(e,f,g)
650         eor     r0,r10,r10,ror#11
651         add     r9,r9,r2                        @ h+=Ch(e,f,g)
652 #if 10==31
653         and     r12,r12,#0xff
654         cmp     r12,#0xf2                       @ done?
655 #endif
656 #if 10<15
657 # if __ARM_ARCH__>=7
658         ldr     r2,[r1],#4                      @ prefetch
659 # else
660         ldrb    r2,[r1,#3]
661 # endif
662         eor     r12,r10,r11                     @ a^b, b^c in next round
663 #else
664         ldr     r2,[sp,#12*4]           @ from future BODY_16_xx
665         eor     r12,r10,r11                     @ a^b, b^c in next round
666         ldr     r1,[sp,#9*4]    @ from future BODY_16_xx
667 #endif
668         eor     r0,r0,r10,ror#20        @ Sigma0(a)
669         and     r3,r3,r12                       @ (b^c)&=(a^b)
670         add     r5,r5,r9                        @ d+=h
671         eor     r3,r3,r11                       @ Maj(a,b,c)
672         add     r9,r9,r0,ror#2  @ h+=Sigma0(a)
673         @ add   r9,r9,r3                        @ h+=Maj(a,b,c)
674 #if __ARM_ARCH__>=7
675         @ ldr   r2,[r1],#4                      @ 11
676 # if 11==15
677         str     r1,[sp,#17*4]                   @ make room for r1
678 # endif
679         eor     r0,r5,r5,ror#5
680         add     r9,r9,r3                        @ h+=Maj(a,b,c) from the past
681         eor     r0,r0,r5,ror#19 @ Sigma1(e)
682         rev     r2,r2
683 #else
684         @ ldrb  r2,[r1,#3]                      @ 11
685         add     r9,r9,r3                        @ h+=Maj(a,b,c) from the past
686         ldrb    r3,[r1,#2]
687         ldrb    r0,[r1,#1]
688         orr     r2,r2,r3,lsl#8
689         ldrb    r3,[r1],#4
690         orr     r2,r2,r0,lsl#16
691 # if 11==15
692         str     r1,[sp,#17*4]                   @ make room for r1
693 # endif
694         eor     r0,r5,r5,ror#5
695         orr     r2,r2,r3,lsl#24
696         eor     r0,r0,r5,ror#19 @ Sigma1(e)
697 #endif
698         ldr     r3,[r14],#4                     @ *K256++
699         add     r8,r8,r2                        @ h+=X[i]
700         str     r2,[sp,#11*4]
701         eor     r2,r6,r7
702         add     r8,r8,r0,ror#6  @ h+=Sigma1(e)
703         and     r2,r2,r5
704         add     r8,r8,r3                        @ h+=K256[i]
705         eor     r2,r2,r7                        @ Ch(e,f,g)
706         eor     r0,r9,r9,ror#11
707         add     r8,r8,r2                        @ h+=Ch(e,f,g)
708 #if 11==31
709         and     r3,r3,#0xff
710         cmp     r3,#0xf2                        @ done?
711 #endif
712 #if 11<15
713 # if __ARM_ARCH__>=7
714         ldr     r2,[r1],#4                      @ prefetch
715 # else
716         ldrb    r2,[r1,#3]
717 # endif
718         eor     r3,r9,r10                       @ a^b, b^c in next round
719 #else
720         ldr     r2,[sp,#13*4]           @ from future BODY_16_xx
721         eor     r3,r9,r10                       @ a^b, b^c in next round
722         ldr     r1,[sp,#10*4]   @ from future BODY_16_xx
723 #endif
724         eor     r0,r0,r9,ror#20 @ Sigma0(a)
725         and     r12,r12,r3                      @ (b^c)&=(a^b)
726         add     r4,r4,r8                        @ d+=h
727         eor     r12,r12,r10                     @ Maj(a,b,c)
728         add     r8,r8,r0,ror#2  @ h+=Sigma0(a)
729         @ add   r8,r8,r12                       @ h+=Maj(a,b,c)
730 #if __ARM_ARCH__>=7
731         @ ldr   r2,[r1],#4                      @ 12
732 # if 12==15
733         str     r1,[sp,#17*4]                   @ make room for r1
734 # endif
735         eor     r0,r4,r4,ror#5
736         add     r8,r8,r12                       @ h+=Maj(a,b,c) from the past
737         eor     r0,r0,r4,ror#19 @ Sigma1(e)
738         rev     r2,r2
739 #else
740         @ ldrb  r2,[r1,#3]                      @ 12
741         add     r8,r8,r12                       @ h+=Maj(a,b,c) from the past
742         ldrb    r12,[r1,#2]
743         ldrb    r0,[r1,#1]
744         orr     r2,r2,r12,lsl#8
745         ldrb    r12,[r1],#4
746         orr     r2,r2,r0,lsl#16
747 # if 12==15
748         str     r1,[sp,#17*4]                   @ make room for r1
749 # endif
750         eor     r0,r4,r4,ror#5
751         orr     r2,r2,r12,lsl#24
752         eor     r0,r0,r4,ror#19 @ Sigma1(e)
753 #endif
754         ldr     r12,[r14],#4                    @ *K256++
755         add     r7,r7,r2                        @ h+=X[i]
756         str     r2,[sp,#12*4]
757         eor     r2,r5,r6
758         add     r7,r7,r0,ror#6  @ h+=Sigma1(e)
759         and     r2,r2,r4
760         add     r7,r7,r12                       @ h+=K256[i]
761         eor     r2,r2,r6                        @ Ch(e,f,g)
762         eor     r0,r8,r8,ror#11
763         add     r7,r7,r2                        @ h+=Ch(e,f,g)
764 #if 12==31
765         and     r12,r12,#0xff
766         cmp     r12,#0xf2                       @ done?
767 #endif
768 #if 12<15
769 # if __ARM_ARCH__>=7
770         ldr     r2,[r1],#4                      @ prefetch
771 # else
772         ldrb    r2,[r1,#3]
773 # endif
774         eor     r12,r8,r9                       @ a^b, b^c in next round
775 #else
776         ldr     r2,[sp,#14*4]           @ from future BODY_16_xx
777         eor     r12,r8,r9                       @ a^b, b^c in next round
778         ldr     r1,[sp,#11*4]   @ from future BODY_16_xx
779 #endif
780         eor     r0,r0,r8,ror#20 @ Sigma0(a)
781         and     r3,r3,r12                       @ (b^c)&=(a^b)
782         add     r11,r11,r7                      @ d+=h
783         eor     r3,r3,r9                        @ Maj(a,b,c)
784         add     r7,r7,r0,ror#2  @ h+=Sigma0(a)
785         @ add   r7,r7,r3                        @ h+=Maj(a,b,c)
786 #if __ARM_ARCH__>=7
787         @ ldr   r2,[r1],#4                      @ 13
788 # if 13==15
789         str     r1,[sp,#17*4]                   @ make room for r1
790 # endif
791         eor     r0,r11,r11,ror#5
792         add     r7,r7,r3                        @ h+=Maj(a,b,c) from the past
793         eor     r0,r0,r11,ror#19        @ Sigma1(e)
794         rev     r2,r2
795 #else
796         @ ldrb  r2,[r1,#3]                      @ 13
797         add     r7,r7,r3                        @ h+=Maj(a,b,c) from the past
798         ldrb    r3,[r1,#2]
799         ldrb    r0,[r1,#1]
800         orr     r2,r2,r3,lsl#8
801         ldrb    r3,[r1],#4
802         orr     r2,r2,r0,lsl#16
803 # if 13==15
804         str     r1,[sp,#17*4]                   @ make room for r1
805 # endif
806         eor     r0,r11,r11,ror#5
807         orr     r2,r2,r3,lsl#24
808         eor     r0,r0,r11,ror#19        @ Sigma1(e)
809 #endif
810         ldr     r3,[r14],#4                     @ *K256++
811         add     r6,r6,r2                        @ h+=X[i]
812         str     r2,[sp,#13*4]
813         eor     r2,r4,r5
814         add     r6,r6,r0,ror#6  @ h+=Sigma1(e)
815         and     r2,r2,r11
816         add     r6,r6,r3                        @ h+=K256[i]
817         eor     r2,r2,r5                        @ Ch(e,f,g)
818         eor     r0,r7,r7,ror#11
819         add     r6,r6,r2                        @ h+=Ch(e,f,g)
820 #if 13==31
821         and     r3,r3,#0xff
822         cmp     r3,#0xf2                        @ done?
823 #endif
824 #if 13<15
825 # if __ARM_ARCH__>=7
826         ldr     r2,[r1],#4                      @ prefetch
827 # else
828         ldrb    r2,[r1,#3]
829 # endif
830         eor     r3,r7,r8                        @ a^b, b^c in next round
831 #else
832         ldr     r2,[sp,#15*4]           @ from future BODY_16_xx
833         eor     r3,r7,r8                        @ a^b, b^c in next round
834         ldr     r1,[sp,#12*4]   @ from future BODY_16_xx
835 #endif
836         eor     r0,r0,r7,ror#20 @ Sigma0(a)
837         and     r12,r12,r3                      @ (b^c)&=(a^b)
838         add     r10,r10,r6                      @ d+=h
839         eor     r12,r12,r8                      @ Maj(a,b,c)
840         add     r6,r6,r0,ror#2  @ h+=Sigma0(a)
841         @ add   r6,r6,r12                       @ h+=Maj(a,b,c)
842 #if __ARM_ARCH__>=7
843         @ ldr   r2,[r1],#4                      @ 14
844 # if 14==15
845         str     r1,[sp,#17*4]                   @ make room for r1
846 # endif
847         eor     r0,r10,r10,ror#5
848         add     r6,r6,r12                       @ h+=Maj(a,b,c) from the past
849         eor     r0,r0,r10,ror#19        @ Sigma1(e)
850         rev     r2,r2
851 #else
852         @ ldrb  r2,[r1,#3]                      @ 14
853         add     r6,r6,r12                       @ h+=Maj(a,b,c) from the past
854         ldrb    r12,[r1,#2]
855         ldrb    r0,[r1,#1]
856         orr     r2,r2,r12,lsl#8
857         ldrb    r12,[r1],#4
858         orr     r2,r2,r0,lsl#16
859 # if 14==15
860         str     r1,[sp,#17*4]                   @ make room for r1
861 # endif
862         eor     r0,r10,r10,ror#5
863         orr     r2,r2,r12,lsl#24
864         eor     r0,r0,r10,ror#19        @ Sigma1(e)
865 #endif
866         ldr     r12,[r14],#4                    @ *K256++
867         add     r5,r5,r2                        @ h+=X[i]
868         str     r2,[sp,#14*4]
869         eor     r2,r11,r4
870         add     r5,r5,r0,ror#6  @ h+=Sigma1(e)
871         and     r2,r2,r10
872         add     r5,r5,r12                       @ h+=K256[i]
873         eor     r2,r2,r4                        @ Ch(e,f,g)
874         eor     r0,r6,r6,ror#11
875         add     r5,r5,r2                        @ h+=Ch(e,f,g)
876 #if 14==31
877         and     r12,r12,#0xff
878         cmp     r12,#0xf2                       @ done?
879 #endif
880 #if 14<15
881 # if __ARM_ARCH__>=7
882         ldr     r2,[r1],#4                      @ prefetch
883 # else
884         ldrb    r2,[r1,#3]
885 # endif
886         eor     r12,r6,r7                       @ a^b, b^c in next round
887 #else
888         ldr     r2,[sp,#0*4]            @ from future BODY_16_xx
889         eor     r12,r6,r7                       @ a^b, b^c in next round
890         ldr     r1,[sp,#13*4]   @ from future BODY_16_xx
891 #endif
892         eor     r0,r0,r6,ror#20 @ Sigma0(a)
893         and     r3,r3,r12                       @ (b^c)&=(a^b)
894         add     r9,r9,r5                        @ d+=h
895         eor     r3,r3,r7                        @ Maj(a,b,c)
896         add     r5,r5,r0,ror#2  @ h+=Sigma0(a)
897         @ add   r5,r5,r3                        @ h+=Maj(a,b,c)
898 #if __ARM_ARCH__>=7
899         @ ldr   r2,[r1],#4                      @ 15
900 # if 15==15
901         str     r1,[sp,#17*4]                   @ make room for r1
902 # endif
903         eor     r0,r9,r9,ror#5
904         add     r5,r5,r3                        @ h+=Maj(a,b,c) from the past
905         eor     r0,r0,r9,ror#19 @ Sigma1(e)
906         rev     r2,r2
907 #else
908         @ ldrb  r2,[r1,#3]                      @ 15
909         add     r5,r5,r3                        @ h+=Maj(a,b,c) from the past
910         ldrb    r3,[r1,#2]
911         ldrb    r0,[r1,#1]
912         orr     r2,r2,r3,lsl#8
913         ldrb    r3,[r1],#4
914         orr     r2,r2,r0,lsl#16
915 # if 15==15
916         str     r1,[sp,#17*4]                   @ make room for r1
917 # endif
918         eor     r0,r9,r9,ror#5
919         orr     r2,r2,r3,lsl#24
920         eor     r0,r0,r9,ror#19 @ Sigma1(e)
921 #endif
922         ldr     r3,[r14],#4                     @ *K256++
923         add     r4,r4,r2                        @ h+=X[i]
924         str     r2,[sp,#15*4]
925         eor     r2,r10,r11
926         add     r4,r4,r0,ror#6  @ h+=Sigma1(e)
927         and     r2,r2,r9
928         add     r4,r4,r3                        @ h+=K256[i]
929         eor     r2,r2,r11                       @ Ch(e,f,g)
930         eor     r0,r5,r5,ror#11
931         add     r4,r4,r2                        @ h+=Ch(e,f,g)
932 #if 15==31
933         and     r3,r3,#0xff
934         cmp     r3,#0xf2                        @ done?
935 #endif
936 #if 15<15
937 # if __ARM_ARCH__>=7
938         ldr     r2,[r1],#4                      @ prefetch
939 # else
940         ldrb    r2,[r1,#3]
941 # endif
942         eor     r3,r5,r6                        @ a^b, b^c in next round
943 #else
944         ldr     r2,[sp,#1*4]            @ from future BODY_16_xx
945         eor     r3,r5,r6                        @ a^b, b^c in next round
946         ldr     r1,[sp,#14*4]   @ from future BODY_16_xx
947 #endif
948         eor     r0,r0,r5,ror#20 @ Sigma0(a)
949         and     r12,r12,r3                      @ (b^c)&=(a^b)
950         add     r8,r8,r4                        @ d+=h
951         eor     r12,r12,r6                      @ Maj(a,b,c)
952         add     r4,r4,r0,ror#2  @ h+=Sigma0(a)
953         @ add   r4,r4,r12                       @ h+=Maj(a,b,c)
954 .Lrounds_16_xx:
955         @ ldr   r2,[sp,#1*4]            @ 16
956         @ ldr   r1,[sp,#14*4]
957         mov     r0,r2,ror#7
958         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
959         mov     r12,r1,ror#17
960         eor     r0,r0,r2,ror#18
961         eor     r12,r12,r1,ror#19
962         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
963         ldr     r2,[sp,#0*4]
964         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
965         ldr     r1,[sp,#9*4]
967         add     r12,r12,r0
968         eor     r0,r8,r8,ror#5  @ from BODY_00_15
969         add     r2,r2,r12
970         eor     r0,r0,r8,ror#19 @ Sigma1(e)
971         add     r2,r2,r1                        @ X[i]
972         ldr     r12,[r14],#4                    @ *K256++
973         add     r11,r11,r2                      @ h+=X[i]
974         str     r2,[sp,#0*4]
975         eor     r2,r9,r10
976         add     r11,r11,r0,ror#6        @ h+=Sigma1(e)
977         and     r2,r2,r8
978         add     r11,r11,r12                     @ h+=K256[i]
979         eor     r2,r2,r10                       @ Ch(e,f,g)
980         eor     r0,r4,r4,ror#11
981         add     r11,r11,r2                      @ h+=Ch(e,f,g)
982 #if 16==31
983         and     r12,r12,#0xff
984         cmp     r12,#0xf2                       @ done?
985 #endif
986 #if 16<15
987 # if __ARM_ARCH__>=7
988         ldr     r2,[r1],#4                      @ prefetch
989 # else
990         ldrb    r2,[r1,#3]
991 # endif
992         eor     r12,r4,r5                       @ a^b, b^c in next round
993 #else
994         ldr     r2,[sp,#2*4]            @ from future BODY_16_xx
995         eor     r12,r4,r5                       @ a^b, b^c in next round
996         ldr     r1,[sp,#15*4]   @ from future BODY_16_xx
997 #endif
998         eor     r0,r0,r4,ror#20 @ Sigma0(a)
999         and     r3,r3,r12                       @ (b^c)&=(a^b)
1000         add     r7,r7,r11                       @ d+=h
1001         eor     r3,r3,r5                        @ Maj(a,b,c)
1002         add     r11,r11,r0,ror#2        @ h+=Sigma0(a)
1003         @ add   r11,r11,r3                      @ h+=Maj(a,b,c)
1004         @ ldr   r2,[sp,#2*4]            @ 17
1005         @ ldr   r1,[sp,#15*4]
1006         mov     r0,r2,ror#7
1007         add     r11,r11,r3                      @ h+=Maj(a,b,c) from the past
1008         mov     r3,r1,ror#17
1009         eor     r0,r0,r2,ror#18
1010         eor     r3,r3,r1,ror#19
1011         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1012         ldr     r2,[sp,#1*4]
1013         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1014         ldr     r1,[sp,#10*4]
1016         add     r3,r3,r0
1017         eor     r0,r7,r7,ror#5  @ from BODY_00_15
1018         add     r2,r2,r3
1019         eor     r0,r0,r7,ror#19 @ Sigma1(e)
1020         add     r2,r2,r1                        @ X[i]
1021         ldr     r3,[r14],#4                     @ *K256++
1022         add     r10,r10,r2                      @ h+=X[i]
1023         str     r2,[sp,#1*4]
1024         eor     r2,r8,r9
1025         add     r10,r10,r0,ror#6        @ h+=Sigma1(e)
1026         and     r2,r2,r7
1027         add     r10,r10,r3                      @ h+=K256[i]
1028         eor     r2,r2,r9                        @ Ch(e,f,g)
1029         eor     r0,r11,r11,ror#11
1030         add     r10,r10,r2                      @ h+=Ch(e,f,g)
1031 #if 17==31
1032         and     r3,r3,#0xff
1033         cmp     r3,#0xf2                        @ done?
1034 #endif
1035 #if 17<15
1036 # if __ARM_ARCH__>=7
1037         ldr     r2,[r1],#4                      @ prefetch
1038 # else
1039         ldrb    r2,[r1,#3]
1040 # endif
1041         eor     r3,r11,r4                       @ a^b, b^c in next round
1042 #else
1043         ldr     r2,[sp,#3*4]            @ from future BODY_16_xx
1044         eor     r3,r11,r4                       @ a^b, b^c in next round
1045         ldr     r1,[sp,#0*4]    @ from future BODY_16_xx
1046 #endif
1047         eor     r0,r0,r11,ror#20        @ Sigma0(a)
1048         and     r12,r12,r3                      @ (b^c)&=(a^b)
1049         add     r6,r6,r10                       @ d+=h
1050         eor     r12,r12,r4                      @ Maj(a,b,c)
1051         add     r10,r10,r0,ror#2        @ h+=Sigma0(a)
1052         @ add   r10,r10,r12                     @ h+=Maj(a,b,c)
1053         @ ldr   r2,[sp,#3*4]            @ 18
1054         @ ldr   r1,[sp,#0*4]
1055         mov     r0,r2,ror#7
1056         add     r10,r10,r12                     @ h+=Maj(a,b,c) from the past
1057         mov     r12,r1,ror#17
1058         eor     r0,r0,r2,ror#18
1059         eor     r12,r12,r1,ror#19
1060         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1061         ldr     r2,[sp,#2*4]
1062         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1063         ldr     r1,[sp,#11*4]
1065         add     r12,r12,r0
1066         eor     r0,r6,r6,ror#5  @ from BODY_00_15
1067         add     r2,r2,r12
1068         eor     r0,r0,r6,ror#19 @ Sigma1(e)
1069         add     r2,r2,r1                        @ X[i]
1070         ldr     r12,[r14],#4                    @ *K256++
1071         add     r9,r9,r2                        @ h+=X[i]
1072         str     r2,[sp,#2*4]
1073         eor     r2,r7,r8
1074         add     r9,r9,r0,ror#6  @ h+=Sigma1(e)
1075         and     r2,r2,r6
1076         add     r9,r9,r12                       @ h+=K256[i]
1077         eor     r2,r2,r8                        @ Ch(e,f,g)
1078         eor     r0,r10,r10,ror#11
1079         add     r9,r9,r2                        @ h+=Ch(e,f,g)
1080 #if 18==31
1081         and     r12,r12,#0xff
1082         cmp     r12,#0xf2                       @ done?
1083 #endif
1084 #if 18<15
1085 # if __ARM_ARCH__>=7
1086         ldr     r2,[r1],#4                      @ prefetch
1087 # else
1088         ldrb    r2,[r1,#3]
1089 # endif
1090         eor     r12,r10,r11                     @ a^b, b^c in next round
1091 #else
1092         ldr     r2,[sp,#4*4]            @ from future BODY_16_xx
1093         eor     r12,r10,r11                     @ a^b, b^c in next round
1094         ldr     r1,[sp,#1*4]    @ from future BODY_16_xx
1095 #endif
1096         eor     r0,r0,r10,ror#20        @ Sigma0(a)
1097         and     r3,r3,r12                       @ (b^c)&=(a^b)
1098         add     r5,r5,r9                        @ d+=h
1099         eor     r3,r3,r11                       @ Maj(a,b,c)
1100         add     r9,r9,r0,ror#2  @ h+=Sigma0(a)
1101         @ add   r9,r9,r3                        @ h+=Maj(a,b,c)
1102         @ ldr   r2,[sp,#4*4]            @ 19
1103         @ ldr   r1,[sp,#1*4]
1104         mov     r0,r2,ror#7
1105         add     r9,r9,r3                        @ h+=Maj(a,b,c) from the past
1106         mov     r3,r1,ror#17
1107         eor     r0,r0,r2,ror#18
1108         eor     r3,r3,r1,ror#19
1109         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1110         ldr     r2,[sp,#3*4]
1111         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1112         ldr     r1,[sp,#12*4]
1114         add     r3,r3,r0
1115         eor     r0,r5,r5,ror#5  @ from BODY_00_15
1116         add     r2,r2,r3
1117         eor     r0,r0,r5,ror#19 @ Sigma1(e)
1118         add     r2,r2,r1                        @ X[i]
1119         ldr     r3,[r14],#4                     @ *K256++
1120         add     r8,r8,r2                        @ h+=X[i]
1121         str     r2,[sp,#3*4]
1122         eor     r2,r6,r7
1123         add     r8,r8,r0,ror#6  @ h+=Sigma1(e)
1124         and     r2,r2,r5
1125         add     r8,r8,r3                        @ h+=K256[i]
1126         eor     r2,r2,r7                        @ Ch(e,f,g)
1127         eor     r0,r9,r9,ror#11
1128         add     r8,r8,r2                        @ h+=Ch(e,f,g)
1129 #if 19==31
1130         and     r3,r3,#0xff
1131         cmp     r3,#0xf2                        @ done?
1132 #endif
1133 #if 19<15
1134 # if __ARM_ARCH__>=7
1135         ldr     r2,[r1],#4                      @ prefetch
1136 # else
1137         ldrb    r2,[r1,#3]
1138 # endif
1139         eor     r3,r9,r10                       @ a^b, b^c in next round
1140 #else
1141         ldr     r2,[sp,#5*4]            @ from future BODY_16_xx
1142         eor     r3,r9,r10                       @ a^b, b^c in next round
1143         ldr     r1,[sp,#2*4]    @ from future BODY_16_xx
1144 #endif
1145         eor     r0,r0,r9,ror#20 @ Sigma0(a)
1146         and     r12,r12,r3                      @ (b^c)&=(a^b)
1147         add     r4,r4,r8                        @ d+=h
1148         eor     r12,r12,r10                     @ Maj(a,b,c)
1149         add     r8,r8,r0,ror#2  @ h+=Sigma0(a)
1150         @ add   r8,r8,r12                       @ h+=Maj(a,b,c)
1151         @ ldr   r2,[sp,#5*4]            @ 20
1152         @ ldr   r1,[sp,#2*4]
1153         mov     r0,r2,ror#7
1154         add     r8,r8,r12                       @ h+=Maj(a,b,c) from the past
1155         mov     r12,r1,ror#17
1156         eor     r0,r0,r2,ror#18
1157         eor     r12,r12,r1,ror#19
1158         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1159         ldr     r2,[sp,#4*4]
1160         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1161         ldr     r1,[sp,#13*4]
1163         add     r12,r12,r0
1164         eor     r0,r4,r4,ror#5  @ from BODY_00_15
1165         add     r2,r2,r12
1166         eor     r0,r0,r4,ror#19 @ Sigma1(e)
1167         add     r2,r2,r1                        @ X[i]
1168         ldr     r12,[r14],#4                    @ *K256++
1169         add     r7,r7,r2                        @ h+=X[i]
1170         str     r2,[sp,#4*4]
1171         eor     r2,r5,r6
1172         add     r7,r7,r0,ror#6  @ h+=Sigma1(e)
1173         and     r2,r2,r4
1174         add     r7,r7,r12                       @ h+=K256[i]
1175         eor     r2,r2,r6                        @ Ch(e,f,g)
1176         eor     r0,r8,r8,ror#11
1177         add     r7,r7,r2                        @ h+=Ch(e,f,g)
1178 #if 20==31
1179         and     r12,r12,#0xff
1180         cmp     r12,#0xf2                       @ done?
1181 #endif
1182 #if 20<15
1183 # if __ARM_ARCH__>=7
1184         ldr     r2,[r1],#4                      @ prefetch
1185 # else
1186         ldrb    r2,[r1,#3]
1187 # endif
1188         eor     r12,r8,r9                       @ a^b, b^c in next round
1189 #else
1190         ldr     r2,[sp,#6*4]            @ from future BODY_16_xx
1191         eor     r12,r8,r9                       @ a^b, b^c in next round
1192         ldr     r1,[sp,#3*4]    @ from future BODY_16_xx
1193 #endif
1194         eor     r0,r0,r8,ror#20 @ Sigma0(a)
1195         and     r3,r3,r12                       @ (b^c)&=(a^b)
1196         add     r11,r11,r7                      @ d+=h
1197         eor     r3,r3,r9                        @ Maj(a,b,c)
1198         add     r7,r7,r0,ror#2  @ h+=Sigma0(a)
1199         @ add   r7,r7,r3                        @ h+=Maj(a,b,c)
1200         @ ldr   r2,[sp,#6*4]            @ 21
1201         @ ldr   r1,[sp,#3*4]
1202         mov     r0,r2,ror#7
1203         add     r7,r7,r3                        @ h+=Maj(a,b,c) from the past
1204         mov     r3,r1,ror#17
1205         eor     r0,r0,r2,ror#18
1206         eor     r3,r3,r1,ror#19
1207         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1208         ldr     r2,[sp,#5*4]
1209         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1210         ldr     r1,[sp,#14*4]
1212         add     r3,r3,r0
1213         eor     r0,r11,r11,ror#5        @ from BODY_00_15
1214         add     r2,r2,r3
1215         eor     r0,r0,r11,ror#19        @ Sigma1(e)
1216         add     r2,r2,r1                        @ X[i]
1217         ldr     r3,[r14],#4                     @ *K256++
1218         add     r6,r6,r2                        @ h+=X[i]
1219         str     r2,[sp,#5*4]
1220         eor     r2,r4,r5
1221         add     r6,r6,r0,ror#6  @ h+=Sigma1(e)
1222         and     r2,r2,r11
1223         add     r6,r6,r3                        @ h+=K256[i]
1224         eor     r2,r2,r5                        @ Ch(e,f,g)
1225         eor     r0,r7,r7,ror#11
1226         add     r6,r6,r2                        @ h+=Ch(e,f,g)
1227 #if 21==31
1228         and     r3,r3,#0xff
1229         cmp     r3,#0xf2                        @ done?
1230 #endif
1231 #if 21<15
1232 # if __ARM_ARCH__>=7
1233         ldr     r2,[r1],#4                      @ prefetch
1234 # else
1235         ldrb    r2,[r1,#3]
1236 # endif
1237         eor     r3,r7,r8                        @ a^b, b^c in next round
1238 #else
1239         ldr     r2,[sp,#7*4]            @ from future BODY_16_xx
1240         eor     r3,r7,r8                        @ a^b, b^c in next round
1241         ldr     r1,[sp,#4*4]    @ from future BODY_16_xx
1242 #endif
1243         eor     r0,r0,r7,ror#20 @ Sigma0(a)
1244         and     r12,r12,r3                      @ (b^c)&=(a^b)
1245         add     r10,r10,r6                      @ d+=h
1246         eor     r12,r12,r8                      @ Maj(a,b,c)
1247         add     r6,r6,r0,ror#2  @ h+=Sigma0(a)
1248         @ add   r6,r6,r12                       @ h+=Maj(a,b,c)
1249         @ ldr   r2,[sp,#7*4]            @ 22
1250         @ ldr   r1,[sp,#4*4]
1251         mov     r0,r2,ror#7
1252         add     r6,r6,r12                       @ h+=Maj(a,b,c) from the past
1253         mov     r12,r1,ror#17
1254         eor     r0,r0,r2,ror#18
1255         eor     r12,r12,r1,ror#19
1256         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1257         ldr     r2,[sp,#6*4]
1258         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1259         ldr     r1,[sp,#15*4]
1261         add     r12,r12,r0
1262         eor     r0,r10,r10,ror#5        @ from BODY_00_15
1263         add     r2,r2,r12
1264         eor     r0,r0,r10,ror#19        @ Sigma1(e)
1265         add     r2,r2,r1                        @ X[i]
1266         ldr     r12,[r14],#4                    @ *K256++
1267         add     r5,r5,r2                        @ h+=X[i]
1268         str     r2,[sp,#6*4]
1269         eor     r2,r11,r4
1270         add     r5,r5,r0,ror#6  @ h+=Sigma1(e)
1271         and     r2,r2,r10
1272         add     r5,r5,r12                       @ h+=K256[i]
1273         eor     r2,r2,r4                        @ Ch(e,f,g)
1274         eor     r0,r6,r6,ror#11
1275         add     r5,r5,r2                        @ h+=Ch(e,f,g)
1276 #if 22==31
1277         and     r12,r12,#0xff
1278         cmp     r12,#0xf2                       @ done?
1279 #endif
1280 #if 22<15
1281 # if __ARM_ARCH__>=7
1282         ldr     r2,[r1],#4                      @ prefetch
1283 # else
1284         ldrb    r2,[r1,#3]
1285 # endif
1286         eor     r12,r6,r7                       @ a^b, b^c in next round
1287 #else
1288         ldr     r2,[sp,#8*4]            @ from future BODY_16_xx
1289         eor     r12,r6,r7                       @ a^b, b^c in next round
1290         ldr     r1,[sp,#5*4]    @ from future BODY_16_xx
1291 #endif
1292         eor     r0,r0,r6,ror#20 @ Sigma0(a)
1293         and     r3,r3,r12                       @ (b^c)&=(a^b)
1294         add     r9,r9,r5                        @ d+=h
1295         eor     r3,r3,r7                        @ Maj(a,b,c)
1296         add     r5,r5,r0,ror#2  @ h+=Sigma0(a)
1297         @ add   r5,r5,r3                        @ h+=Maj(a,b,c)
1298         @ ldr   r2,[sp,#8*4]            @ 23
1299         @ ldr   r1,[sp,#5*4]
1300         mov     r0,r2,ror#7
1301         add     r5,r5,r3                        @ h+=Maj(a,b,c) from the past
1302         mov     r3,r1,ror#17
1303         eor     r0,r0,r2,ror#18
1304         eor     r3,r3,r1,ror#19
1305         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1306         ldr     r2,[sp,#7*4]
1307         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1308         ldr     r1,[sp,#0*4]
1310         add     r3,r3,r0
1311         eor     r0,r9,r9,ror#5  @ from BODY_00_15
1312         add     r2,r2,r3
1313         eor     r0,r0,r9,ror#19 @ Sigma1(e)
1314         add     r2,r2,r1                        @ X[i]
1315         ldr     r3,[r14],#4                     @ *K256++
1316         add     r4,r4,r2                        @ h+=X[i]
1317         str     r2,[sp,#7*4]
1318         eor     r2,r10,r11
1319         add     r4,r4,r0,ror#6  @ h+=Sigma1(e)
1320         and     r2,r2,r9
1321         add     r4,r4,r3                        @ h+=K256[i]
1322         eor     r2,r2,r11                       @ Ch(e,f,g)
1323         eor     r0,r5,r5,ror#11
1324         add     r4,r4,r2                        @ h+=Ch(e,f,g)
1325 #if 23==31
1326         and     r3,r3,#0xff
1327         cmp     r3,#0xf2                        @ done?
1328 #endif
1329 #if 23<15
1330 # if __ARM_ARCH__>=7
1331         ldr     r2,[r1],#4                      @ prefetch
1332 # else
1333         ldrb    r2,[r1,#3]
1334 # endif
1335         eor     r3,r5,r6                        @ a^b, b^c in next round
1336 #else
1337         ldr     r2,[sp,#9*4]            @ from future BODY_16_xx
1338         eor     r3,r5,r6                        @ a^b, b^c in next round
1339         ldr     r1,[sp,#6*4]    @ from future BODY_16_xx
1340 #endif
1341         eor     r0,r0,r5,ror#20 @ Sigma0(a)
1342         and     r12,r12,r3                      @ (b^c)&=(a^b)
1343         add     r8,r8,r4                        @ d+=h
1344         eor     r12,r12,r6                      @ Maj(a,b,c)
1345         add     r4,r4,r0,ror#2  @ h+=Sigma0(a)
1346         @ add   r4,r4,r12                       @ h+=Maj(a,b,c)
1347         @ ldr   r2,[sp,#9*4]            @ 24
1348         @ ldr   r1,[sp,#6*4]
1349         mov     r0,r2,ror#7
1350         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
1351         mov     r12,r1,ror#17
1352         eor     r0,r0,r2,ror#18
1353         eor     r12,r12,r1,ror#19
1354         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1355         ldr     r2,[sp,#8*4]
1356         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1357         ldr     r1,[sp,#1*4]
1359         add     r12,r12,r0
1360         eor     r0,r8,r8,ror#5  @ from BODY_00_15
1361         add     r2,r2,r12
1362         eor     r0,r0,r8,ror#19 @ Sigma1(e)
1363         add     r2,r2,r1                        @ X[i]
1364         ldr     r12,[r14],#4                    @ *K256++
1365         add     r11,r11,r2                      @ h+=X[i]
1366         str     r2,[sp,#8*4]
1367         eor     r2,r9,r10
1368         add     r11,r11,r0,ror#6        @ h+=Sigma1(e)
1369         and     r2,r2,r8
1370         add     r11,r11,r12                     @ h+=K256[i]
1371         eor     r2,r2,r10                       @ Ch(e,f,g)
1372         eor     r0,r4,r4,ror#11
1373         add     r11,r11,r2                      @ h+=Ch(e,f,g)
1374 #if 24==31
1375         and     r12,r12,#0xff
1376         cmp     r12,#0xf2                       @ done?
1377 #endif
1378 #if 24<15
1379 # if __ARM_ARCH__>=7
1380         ldr     r2,[r1],#4                      @ prefetch
1381 # else
1382         ldrb    r2,[r1,#3]
1383 # endif
1384         eor     r12,r4,r5                       @ a^b, b^c in next round
1385 #else
1386         ldr     r2,[sp,#10*4]           @ from future BODY_16_xx
1387         eor     r12,r4,r5                       @ a^b, b^c in next round
1388         ldr     r1,[sp,#7*4]    @ from future BODY_16_xx
1389 #endif
1390         eor     r0,r0,r4,ror#20 @ Sigma0(a)
1391         and     r3,r3,r12                       @ (b^c)&=(a^b)
1392         add     r7,r7,r11                       @ d+=h
1393         eor     r3,r3,r5                        @ Maj(a,b,c)
1394         add     r11,r11,r0,ror#2        @ h+=Sigma0(a)
1395         @ add   r11,r11,r3                      @ h+=Maj(a,b,c)
1396         @ ldr   r2,[sp,#10*4]           @ 25
1397         @ ldr   r1,[sp,#7*4]
1398         mov     r0,r2,ror#7
1399         add     r11,r11,r3                      @ h+=Maj(a,b,c) from the past
1400         mov     r3,r1,ror#17
1401         eor     r0,r0,r2,ror#18
1402         eor     r3,r3,r1,ror#19
1403         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1404         ldr     r2,[sp,#9*4]
1405         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1406         ldr     r1,[sp,#2*4]
1408         add     r3,r3,r0
1409         eor     r0,r7,r7,ror#5  @ from BODY_00_15
1410         add     r2,r2,r3
1411         eor     r0,r0,r7,ror#19 @ Sigma1(e)
1412         add     r2,r2,r1                        @ X[i]
1413         ldr     r3,[r14],#4                     @ *K256++
1414         add     r10,r10,r2                      @ h+=X[i]
1415         str     r2,[sp,#9*4]
1416         eor     r2,r8,r9
1417         add     r10,r10,r0,ror#6        @ h+=Sigma1(e)
1418         and     r2,r2,r7
1419         add     r10,r10,r3                      @ h+=K256[i]
1420         eor     r2,r2,r9                        @ Ch(e,f,g)
1421         eor     r0,r11,r11,ror#11
1422         add     r10,r10,r2                      @ h+=Ch(e,f,g)
1423 #if 25==31
1424         and     r3,r3,#0xff
1425         cmp     r3,#0xf2                        @ done?
1426 #endif
1427 #if 25<15
1428 # if __ARM_ARCH__>=7
1429         ldr     r2,[r1],#4                      @ prefetch
1430 # else
1431         ldrb    r2,[r1,#3]
1432 # endif
1433         eor     r3,r11,r4                       @ a^b, b^c in next round
1434 #else
1435         ldr     r2,[sp,#11*4]           @ from future BODY_16_xx
1436         eor     r3,r11,r4                       @ a^b, b^c in next round
1437         ldr     r1,[sp,#8*4]    @ from future BODY_16_xx
1438 #endif
1439         eor     r0,r0,r11,ror#20        @ Sigma0(a)
1440         and     r12,r12,r3                      @ (b^c)&=(a^b)
1441         add     r6,r6,r10                       @ d+=h
1442         eor     r12,r12,r4                      @ Maj(a,b,c)
1443         add     r10,r10,r0,ror#2        @ h+=Sigma0(a)
1444         @ add   r10,r10,r12                     @ h+=Maj(a,b,c)
1445         @ ldr   r2,[sp,#11*4]           @ 26
1446         @ ldr   r1,[sp,#8*4]
1447         mov     r0,r2,ror#7
1448         add     r10,r10,r12                     @ h+=Maj(a,b,c) from the past
1449         mov     r12,r1,ror#17
1450         eor     r0,r0,r2,ror#18
1451         eor     r12,r12,r1,ror#19
1452         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1453         ldr     r2,[sp,#10*4]
1454         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1455         ldr     r1,[sp,#3*4]
1457         add     r12,r12,r0
1458         eor     r0,r6,r6,ror#5  @ from BODY_00_15
1459         add     r2,r2,r12
1460         eor     r0,r0,r6,ror#19 @ Sigma1(e)
1461         add     r2,r2,r1                        @ X[i]
1462         ldr     r12,[r14],#4                    @ *K256++
1463         add     r9,r9,r2                        @ h+=X[i]
1464         str     r2,[sp,#10*4]
1465         eor     r2,r7,r8
1466         add     r9,r9,r0,ror#6  @ h+=Sigma1(e)
1467         and     r2,r2,r6
1468         add     r9,r9,r12                       @ h+=K256[i]
1469         eor     r2,r2,r8                        @ Ch(e,f,g)
1470         eor     r0,r10,r10,ror#11
1471         add     r9,r9,r2                        @ h+=Ch(e,f,g)
1472 #if 26==31
1473         and     r12,r12,#0xff
1474         cmp     r12,#0xf2                       @ done?
1475 #endif
1476 #if 26<15
1477 # if __ARM_ARCH__>=7
1478         ldr     r2,[r1],#4                      @ prefetch
1479 # else
1480         ldrb    r2,[r1,#3]
1481 # endif
1482         eor     r12,r10,r11                     @ a^b, b^c in next round
1483 #else
1484         ldr     r2,[sp,#12*4]           @ from future BODY_16_xx
1485         eor     r12,r10,r11                     @ a^b, b^c in next round
1486         ldr     r1,[sp,#9*4]    @ from future BODY_16_xx
1487 #endif
1488         eor     r0,r0,r10,ror#20        @ Sigma0(a)
1489         and     r3,r3,r12                       @ (b^c)&=(a^b)
1490         add     r5,r5,r9                        @ d+=h
1491         eor     r3,r3,r11                       @ Maj(a,b,c)
1492         add     r9,r9,r0,ror#2  @ h+=Sigma0(a)
1493         @ add   r9,r9,r3                        @ h+=Maj(a,b,c)
1494         @ ldr   r2,[sp,#12*4]           @ 27
1495         @ ldr   r1,[sp,#9*4]
1496         mov     r0,r2,ror#7
1497         add     r9,r9,r3                        @ h+=Maj(a,b,c) from the past
1498         mov     r3,r1,ror#17
1499         eor     r0,r0,r2,ror#18
1500         eor     r3,r3,r1,ror#19
1501         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1502         ldr     r2,[sp,#11*4]
1503         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1504         ldr     r1,[sp,#4*4]
1506         add     r3,r3,r0
1507         eor     r0,r5,r5,ror#5  @ from BODY_00_15
1508         add     r2,r2,r3
1509         eor     r0,r0,r5,ror#19 @ Sigma1(e)
1510         add     r2,r2,r1                        @ X[i]
1511         ldr     r3,[r14],#4                     @ *K256++
1512         add     r8,r8,r2                        @ h+=X[i]
1513         str     r2,[sp,#11*4]
1514         eor     r2,r6,r7
1515         add     r8,r8,r0,ror#6  @ h+=Sigma1(e)
1516         and     r2,r2,r5
1517         add     r8,r8,r3                        @ h+=K256[i]
1518         eor     r2,r2,r7                        @ Ch(e,f,g)
1519         eor     r0,r9,r9,ror#11
1520         add     r8,r8,r2                        @ h+=Ch(e,f,g)
1521 #if 27==31
1522         and     r3,r3,#0xff
1523         cmp     r3,#0xf2                        @ done?
1524 #endif
1525 #if 27<15
1526 # if __ARM_ARCH__>=7
1527         ldr     r2,[r1],#4                      @ prefetch
1528 # else
1529         ldrb    r2,[r1,#3]
1530 # endif
1531         eor     r3,r9,r10                       @ a^b, b^c in next round
1532 #else
1533         ldr     r2,[sp,#13*4]           @ from future BODY_16_xx
1534         eor     r3,r9,r10                       @ a^b, b^c in next round
1535         ldr     r1,[sp,#10*4]   @ from future BODY_16_xx
1536 #endif
1537         eor     r0,r0,r9,ror#20 @ Sigma0(a)
1538         and     r12,r12,r3                      @ (b^c)&=(a^b)
1539         add     r4,r4,r8                        @ d+=h
1540         eor     r12,r12,r10                     @ Maj(a,b,c)
1541         add     r8,r8,r0,ror#2  @ h+=Sigma0(a)
1542         @ add   r8,r8,r12                       @ h+=Maj(a,b,c)
1543         @ ldr   r2,[sp,#13*4]           @ 28
1544         @ ldr   r1,[sp,#10*4]
1545         mov     r0,r2,ror#7
1546         add     r8,r8,r12                       @ h+=Maj(a,b,c) from the past
1547         mov     r12,r1,ror#17
1548         eor     r0,r0,r2,ror#18
1549         eor     r12,r12,r1,ror#19
1550         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1551         ldr     r2,[sp,#12*4]
1552         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1553         ldr     r1,[sp,#5*4]
1555         add     r12,r12,r0
1556         eor     r0,r4,r4,ror#5  @ from BODY_00_15
1557         add     r2,r2,r12
1558         eor     r0,r0,r4,ror#19 @ Sigma1(e)
1559         add     r2,r2,r1                        @ X[i]
1560         ldr     r12,[r14],#4                    @ *K256++
1561         add     r7,r7,r2                        @ h+=X[i]
1562         str     r2,[sp,#12*4]
1563         eor     r2,r5,r6
1564         add     r7,r7,r0,ror#6  @ h+=Sigma1(e)
1565         and     r2,r2,r4
1566         add     r7,r7,r12                       @ h+=K256[i]
1567         eor     r2,r2,r6                        @ Ch(e,f,g)
1568         eor     r0,r8,r8,ror#11
1569         add     r7,r7,r2                        @ h+=Ch(e,f,g)
1570 #if 28==31
1571         and     r12,r12,#0xff
1572         cmp     r12,#0xf2                       @ done?
1573 #endif
1574 #if 28<15
1575 # if __ARM_ARCH__>=7
1576         ldr     r2,[r1],#4                      @ prefetch
1577 # else
1578         ldrb    r2,[r1,#3]
1579 # endif
1580         eor     r12,r8,r9                       @ a^b, b^c in next round
1581 #else
1582         ldr     r2,[sp,#14*4]           @ from future BODY_16_xx
1583         eor     r12,r8,r9                       @ a^b, b^c in next round
1584         ldr     r1,[sp,#11*4]   @ from future BODY_16_xx
1585 #endif
1586         eor     r0,r0,r8,ror#20 @ Sigma0(a)
1587         and     r3,r3,r12                       @ (b^c)&=(a^b)
1588         add     r11,r11,r7                      @ d+=h
1589         eor     r3,r3,r9                        @ Maj(a,b,c)
1590         add     r7,r7,r0,ror#2  @ h+=Sigma0(a)
1591         @ add   r7,r7,r3                        @ h+=Maj(a,b,c)
1592         @ ldr   r2,[sp,#14*4]           @ 29
1593         @ ldr   r1,[sp,#11*4]
1594         mov     r0,r2,ror#7
1595         add     r7,r7,r3                        @ h+=Maj(a,b,c) from the past
1596         mov     r3,r1,ror#17
1597         eor     r0,r0,r2,ror#18
1598         eor     r3,r3,r1,ror#19
1599         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1600         ldr     r2,[sp,#13*4]
1601         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1602         ldr     r1,[sp,#6*4]
1604         add     r3,r3,r0
1605         eor     r0,r11,r11,ror#5        @ from BODY_00_15
1606         add     r2,r2,r3
1607         eor     r0,r0,r11,ror#19        @ Sigma1(e)
1608         add     r2,r2,r1                        @ X[i]
1609         ldr     r3,[r14],#4                     @ *K256++
1610         add     r6,r6,r2                        @ h+=X[i]
1611         str     r2,[sp,#13*4]
1612         eor     r2,r4,r5
1613         add     r6,r6,r0,ror#6  @ h+=Sigma1(e)
1614         and     r2,r2,r11
1615         add     r6,r6,r3                        @ h+=K256[i]
1616         eor     r2,r2,r5                        @ Ch(e,f,g)
1617         eor     r0,r7,r7,ror#11
1618         add     r6,r6,r2                        @ h+=Ch(e,f,g)
1619 #if 29==31
1620         and     r3,r3,#0xff
1621         cmp     r3,#0xf2                        @ done?
1622 #endif
1623 #if 29<15
1624 # if __ARM_ARCH__>=7
1625         ldr     r2,[r1],#4                      @ prefetch
1626 # else
1627         ldrb    r2,[r1,#3]
1628 # endif
1629         eor     r3,r7,r8                        @ a^b, b^c in next round
1630 #else
1631         ldr     r2,[sp,#15*4]           @ from future BODY_16_xx
1632         eor     r3,r7,r8                        @ a^b, b^c in next round
1633         ldr     r1,[sp,#12*4]   @ from future BODY_16_xx
1634 #endif
1635         eor     r0,r0,r7,ror#20 @ Sigma0(a)
1636         and     r12,r12,r3                      @ (b^c)&=(a^b)
1637         add     r10,r10,r6                      @ d+=h
1638         eor     r12,r12,r8                      @ Maj(a,b,c)
1639         add     r6,r6,r0,ror#2  @ h+=Sigma0(a)
1640         @ add   r6,r6,r12                       @ h+=Maj(a,b,c)
1641         @ ldr   r2,[sp,#15*4]           @ 30
1642         @ ldr   r1,[sp,#12*4]
1643         mov     r0,r2,ror#7
1644         add     r6,r6,r12                       @ h+=Maj(a,b,c) from the past
1645         mov     r12,r1,ror#17
1646         eor     r0,r0,r2,ror#18
1647         eor     r12,r12,r1,ror#19
1648         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1649         ldr     r2,[sp,#14*4]
1650         eor     r12,r12,r1,lsr#10       @ sigma1(X[i+14])
1651         ldr     r1,[sp,#7*4]
1653         add     r12,r12,r0
1654         eor     r0,r10,r10,ror#5        @ from BODY_00_15
1655         add     r2,r2,r12
1656         eor     r0,r0,r10,ror#19        @ Sigma1(e)
1657         add     r2,r2,r1                        @ X[i]
1658         ldr     r12,[r14],#4                    @ *K256++
1659         add     r5,r5,r2                        @ h+=X[i]
1660         str     r2,[sp,#14*4]
1661         eor     r2,r11,r4
1662         add     r5,r5,r0,ror#6  @ h+=Sigma1(e)
1663         and     r2,r2,r10
1664         add     r5,r5,r12                       @ h+=K256[i]
1665         eor     r2,r2,r4                        @ Ch(e,f,g)
1666         eor     r0,r6,r6,ror#11
1667         add     r5,r5,r2                        @ h+=Ch(e,f,g)
1668 #if 30==31
1669         and     r12,r12,#0xff
1670         cmp     r12,#0xf2                       @ done?
1671 #endif
1672 #if 30<15
1673 # if __ARM_ARCH__>=7
1674         ldr     r2,[r1],#4                      @ prefetch
1675 # else
1676         ldrb    r2,[r1,#3]
1677 # endif
1678         eor     r12,r6,r7                       @ a^b, b^c in next round
1679 #else
1680         ldr     r2,[sp,#0*4]            @ from future BODY_16_xx
1681         eor     r12,r6,r7                       @ a^b, b^c in next round
1682         ldr     r1,[sp,#13*4]   @ from future BODY_16_xx
1683 #endif
1684         eor     r0,r0,r6,ror#20 @ Sigma0(a)
1685         and     r3,r3,r12                       @ (b^c)&=(a^b)
1686         add     r9,r9,r5                        @ d+=h
1687         eor     r3,r3,r7                        @ Maj(a,b,c)
1688         add     r5,r5,r0,ror#2  @ h+=Sigma0(a)
1689         @ add   r5,r5,r3                        @ h+=Maj(a,b,c)
1690         @ ldr   r2,[sp,#0*4]            @ 31
1691         @ ldr   r1,[sp,#13*4]
1692         mov     r0,r2,ror#7
1693         add     r5,r5,r3                        @ h+=Maj(a,b,c) from the past
1694         mov     r3,r1,ror#17
1695         eor     r0,r0,r2,ror#18
1696         eor     r3,r3,r1,ror#19
1697         eor     r0,r0,r2,lsr#3  @ sigma0(X[i+1])
1698         ldr     r2,[sp,#15*4]
1699         eor     r3,r3,r1,lsr#10 @ sigma1(X[i+14])
1700         ldr     r1,[sp,#8*4]
1702         add     r3,r3,r0
1703         eor     r0,r9,r9,ror#5  @ from BODY_00_15
1704         add     r2,r2,r3
1705         eor     r0,r0,r9,ror#19 @ Sigma1(e)
1706         add     r2,r2,r1                        @ X[i]
1707         ldr     r3,[r14],#4                     @ *K256++
1708         add     r4,r4,r2                        @ h+=X[i]
1709         str     r2,[sp,#15*4]
1710         eor     r2,r10,r11
1711         add     r4,r4,r0,ror#6  @ h+=Sigma1(e)
1712         and     r2,r2,r9
1713         add     r4,r4,r3                        @ h+=K256[i]
1714         eor     r2,r2,r11                       @ Ch(e,f,g)
1715         eor     r0,r5,r5,ror#11
1716         add     r4,r4,r2                        @ h+=Ch(e,f,g)
1717 #if 31==31
1718         and     r3,r3,#0xff
1719         cmp     r3,#0xf2                        @ done?
1720 #endif
1721 #if 31<15
1722 # if __ARM_ARCH__>=7
1723         ldr     r2,[r1],#4                      @ prefetch
1724 # else
1725         ldrb    r2,[r1,#3]
1726 # endif
1727         eor     r3,r5,r6                        @ a^b, b^c in next round
1728 #else
1729         ldr     r2,[sp,#1*4]            @ from future BODY_16_xx
1730         eor     r3,r5,r6                        @ a^b, b^c in next round
1731         ldr     r1,[sp,#14*4]   @ from future BODY_16_xx
1732 #endif
1733         eor     r0,r0,r5,ror#20 @ Sigma0(a)
1734         and     r12,r12,r3                      @ (b^c)&=(a^b)
1735         add     r8,r8,r4                        @ d+=h
1736         eor     r12,r12,r6                      @ Maj(a,b,c)
1737         add     r4,r4,r0,ror#2  @ h+=Sigma0(a)
1738         @ add   r4,r4,r12                       @ h+=Maj(a,b,c)
1739         ldreq   r3,[sp,#16*4]           @ pull ctx
1740         bne     .Lrounds_16_xx
1742         add     r4,r4,r12               @ h+=Maj(a,b,c) from the past
1743         ldr     r0,[r3,#0]
1744         ldr     r2,[r3,#4]
1745         ldr     r12,[r3,#8]
1746         add     r4,r4,r0
1747         ldr     r0,[r3,#12]
1748         add     r5,r5,r2
1749         ldr     r2,[r3,#16]
1750         add     r6,r6,r12
1751         ldr     r12,[r3,#20]
1752         add     r7,r7,r0
1753         ldr     r0,[r3,#24]
1754         add     r8,r8,r2
1755         ldr     r2,[r3,#28]
1756         add     r9,r9,r12
1757         ldr     r1,[sp,#17*4]           @ pull inp
1758         ldr     r12,[sp,#18*4]          @ pull inp+len
1759         add     r10,r10,r0
1760         add     r11,r11,r2
1761         stmia   r3,{r4,r5,r6,r7,r8,r9,r10,r11}
1762         cmp     r1,r12
1763         sub     r14,r14,#256    @ rewind Ktbl
1764         bne     .Loop
1766         add     sp,sp,#19*4     @ destroy frame
1767 #if __ARM_ARCH__>=5
1768         ldmia   sp!,{r4-r11,pc}
1769 #else
1770         ldmia   sp!,{r4-r11,lr}
1771         tst     lr,#1
1772         moveq   pc,lr                   @ be binary compatible with V4, yet
1773         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
1774 #endif
1775 .size   sha256_block_data_order,.-sha256_block_data_order
1776 #if __ARM_MAX_ARCH__>=7
1777 .arch   armv7-a
1778 .fpu    neon
1780 .type   sha256_block_data_order_neon,%function
1781 .align  4
1782 sha256_block_data_order_neon:
1783 .LNEON:
1784         stmdb   sp!,{r4-r12,lr}
1786         mov     r12,sp
1787         sub     sp,sp,#16*4+16          @ alloca
1788         sub     r14,r3,#256+32  @ K256
1789         bic     sp,sp,#15               @ align for 128-bit stores
1791         vld1.8          {q0},[r1]!
1792         vld1.8          {q1},[r1]!
1793         vld1.8          {q2},[r1]!
1794         vld1.8          {q3},[r1]!
1795         vld1.32         {q8},[r14,:128]!
1796         vld1.32         {q9},[r14,:128]!
1797         vld1.32         {q10},[r14,:128]!
1798         vld1.32         {q11},[r14,:128]!
1799         vrev32.8        q0,q0           @ yes, even on
1800         str             r0,[sp,#64]
1801         vrev32.8        q1,q1           @ big-endian
1802         str             r1,[sp,#68]
1803         mov             r1,sp
1804         vrev32.8        q2,q2
1805         str             r2,[sp,#72]
1806         vrev32.8        q3,q3
1807         str             r12,[sp,#76]            @ save original sp
1808         vadd.i32        q8,q8,q0
1809         vadd.i32        q9,q9,q1
1810         vst1.32         {q8},[r1,:128]!
1811         vadd.i32        q10,q10,q2
1812         vst1.32         {q9},[r1,:128]!
1813         vadd.i32        q11,q11,q3
1814         vst1.32         {q10},[r1,:128]!
1815         vst1.32         {q11},[r1,:128]!
1817         ldmia           r0,{r4-r11}
1818         sub             r1,r1,#64
1819         ldr             r2,[sp,#0]
1820         eor             r12,r12,r12
1821         eor             r3,r5,r6
1822         b               .L_00_48
1824 .align  4
1825 .L_00_48:
1826         vext.8  q8,q0,q1,#4
1827         add     r11,r11,r2
1828         eor     r2,r9,r10
1829         eor     r0,r8,r8,ror#5
1830         vext.8  q9,q2,q3,#4
1831         add     r4,r4,r12
1832         and     r2,r2,r8
1833         eor     r12,r0,r8,ror#19
1834         vshr.u32        q10,q8,#7
1835         eor     r0,r4,r4,ror#11
1836         eor     r2,r2,r10
1837         vadd.i32        q0,q0,q9
1838         add     r11,r11,r12,ror#6
1839         eor     r12,r4,r5
1840         vshr.u32        q9,q8,#3
1841         eor     r0,r0,r4,ror#20
1842         add     r11,r11,r2
1843         vsli.32 q10,q8,#25
1844         ldr     r2,[sp,#4]
1845         and     r3,r3,r12
1846         vshr.u32        q11,q8,#18
1847         add     r7,r7,r11
1848         add     r11,r11,r0,ror#2
1849         eor     r3,r3,r5
1850         veor    q9,q9,q10
1851         add     r10,r10,r2
1852         vsli.32 q11,q8,#14
1853         eor     r2,r8,r9
1854         eor     r0,r7,r7,ror#5
1855         vshr.u32        d24,d7,#17
1856         add     r11,r11,r3
1857         and     r2,r2,r7
1858         veor    q9,q9,q11
1859         eor     r3,r0,r7,ror#19
1860         eor     r0,r11,r11,ror#11
1861         vsli.32 d24,d7,#15
1862         eor     r2,r2,r9
1863         add     r10,r10,r3,ror#6
1864         vshr.u32        d25,d7,#10
1865         eor     r3,r11,r4
1866         eor     r0,r0,r11,ror#20
1867         vadd.i32        q0,q0,q9
1868         add     r10,r10,r2
1869         ldr     r2,[sp,#8]
1870         veor    d25,d25,d24
1871         and     r12,r12,r3
1872         add     r6,r6,r10
1873         vshr.u32        d24,d7,#19
1874         add     r10,r10,r0,ror#2
1875         eor     r12,r12,r4
1876         vsli.32 d24,d7,#13
1877         add     r9,r9,r2
1878         eor     r2,r7,r8
1879         veor    d25,d25,d24
1880         eor     r0,r6,r6,ror#5
1881         add     r10,r10,r12
1882         vadd.i32        d0,d0,d25
1883         and     r2,r2,r6
1884         eor     r12,r0,r6,ror#19
1885         vshr.u32        d24,d0,#17
1886         eor     r0,r10,r10,ror#11
1887         eor     r2,r2,r8
1888         vsli.32 d24,d0,#15
1889         add     r9,r9,r12,ror#6
1890         eor     r12,r10,r11
1891         vshr.u32        d25,d0,#10
1892         eor     r0,r0,r10,ror#20
1893         add     r9,r9,r2
1894         veor    d25,d25,d24
1895         ldr     r2,[sp,#12]
1896         and     r3,r3,r12
1897         vshr.u32        d24,d0,#19
1898         add     r5,r5,r9
1899         add     r9,r9,r0,ror#2
1900         eor     r3,r3,r11
1901         vld1.32 {q8},[r14,:128]!
1902         add     r8,r8,r2
1903         vsli.32 d24,d0,#13
1904         eor     r2,r6,r7
1905         eor     r0,r5,r5,ror#5
1906         veor    d25,d25,d24
1907         add     r9,r9,r3
1908         and     r2,r2,r5
1909         vadd.i32        d1,d1,d25
1910         eor     r3,r0,r5,ror#19
1911         eor     r0,r9,r9,ror#11
1912         vadd.i32        q8,q8,q0
1913         eor     r2,r2,r7
1914         add     r8,r8,r3,ror#6
1915         eor     r3,r9,r10
1916         eor     r0,r0,r9,ror#20
1917         add     r8,r8,r2
1918         ldr     r2,[sp,#16]
1919         and     r12,r12,r3
1920         add     r4,r4,r8
1921         vst1.32 {q8},[r1,:128]!
1922         add     r8,r8,r0,ror#2
1923         eor     r12,r12,r10
1924         vext.8  q8,q1,q2,#4
1925         add     r7,r7,r2
1926         eor     r2,r5,r6
1927         eor     r0,r4,r4,ror#5
1928         vext.8  q9,q3,q0,#4
1929         add     r8,r8,r12
1930         and     r2,r2,r4
1931         eor     r12,r0,r4,ror#19
1932         vshr.u32        q10,q8,#7
1933         eor     r0,r8,r8,ror#11
1934         eor     r2,r2,r6
1935         vadd.i32        q1,q1,q9
1936         add     r7,r7,r12,ror#6
1937         eor     r12,r8,r9
1938         vshr.u32        q9,q8,#3
1939         eor     r0,r0,r8,ror#20
1940         add     r7,r7,r2
1941         vsli.32 q10,q8,#25
1942         ldr     r2,[sp,#20]
1943         and     r3,r3,r12
1944         vshr.u32        q11,q8,#18
1945         add     r11,r11,r7
1946         add     r7,r7,r0,ror#2
1947         eor     r3,r3,r9
1948         veor    q9,q9,q10
1949         add     r6,r6,r2
1950         vsli.32 q11,q8,#14
1951         eor     r2,r4,r5
1952         eor     r0,r11,r11,ror#5
1953         vshr.u32        d24,d1,#17
1954         add     r7,r7,r3
1955         and     r2,r2,r11
1956         veor    q9,q9,q11
1957         eor     r3,r0,r11,ror#19
1958         eor     r0,r7,r7,ror#11
1959         vsli.32 d24,d1,#15
1960         eor     r2,r2,r5
1961         add     r6,r6,r3,ror#6
1962         vshr.u32        d25,d1,#10
1963         eor     r3,r7,r8
1964         eor     r0,r0,r7,ror#20
1965         vadd.i32        q1,q1,q9
1966         add     r6,r6,r2
1967         ldr     r2,[sp,#24]
1968         veor    d25,d25,d24
1969         and     r12,r12,r3
1970         add     r10,r10,r6
1971         vshr.u32        d24,d1,#19
1972         add     r6,r6,r0,ror#2
1973         eor     r12,r12,r8
1974         vsli.32 d24,d1,#13
1975         add     r5,r5,r2
1976         eor     r2,r11,r4
1977         veor    d25,d25,d24
1978         eor     r0,r10,r10,ror#5
1979         add     r6,r6,r12
1980         vadd.i32        d2,d2,d25
1981         and     r2,r2,r10
1982         eor     r12,r0,r10,ror#19
1983         vshr.u32        d24,d2,#17
1984         eor     r0,r6,r6,ror#11
1985         eor     r2,r2,r4
1986         vsli.32 d24,d2,#15
1987         add     r5,r5,r12,ror#6
1988         eor     r12,r6,r7
1989         vshr.u32        d25,d2,#10
1990         eor     r0,r0,r6,ror#20
1991         add     r5,r5,r2
1992         veor    d25,d25,d24
1993         ldr     r2,[sp,#28]
1994         and     r3,r3,r12
1995         vshr.u32        d24,d2,#19
1996         add     r9,r9,r5
1997         add     r5,r5,r0,ror#2
1998         eor     r3,r3,r7
1999         vld1.32 {q8},[r14,:128]!
2000         add     r4,r4,r2
2001         vsli.32 d24,d2,#13
2002         eor     r2,r10,r11
2003         eor     r0,r9,r9,ror#5
2004         veor    d25,d25,d24
2005         add     r5,r5,r3
2006         and     r2,r2,r9
2007         vadd.i32        d3,d3,d25
2008         eor     r3,r0,r9,ror#19
2009         eor     r0,r5,r5,ror#11
2010         vadd.i32        q8,q8,q1
2011         eor     r2,r2,r11
2012         add     r4,r4,r3,ror#6
2013         eor     r3,r5,r6
2014         eor     r0,r0,r5,ror#20
2015         add     r4,r4,r2
2016         ldr     r2,[sp,#32]
2017         and     r12,r12,r3
2018         add     r8,r8,r4
2019         vst1.32 {q8},[r1,:128]!
2020         add     r4,r4,r0,ror#2
2021         eor     r12,r12,r6
2022         vext.8  q8,q2,q3,#4
2023         add     r11,r11,r2
2024         eor     r2,r9,r10
2025         eor     r0,r8,r8,ror#5
2026         vext.8  q9,q0,q1,#4
2027         add     r4,r4,r12
2028         and     r2,r2,r8
2029         eor     r12,r0,r8,ror#19
2030         vshr.u32        q10,q8,#7
2031         eor     r0,r4,r4,ror#11
2032         eor     r2,r2,r10
2033         vadd.i32        q2,q2,q9
2034         add     r11,r11,r12,ror#6
2035         eor     r12,r4,r5
2036         vshr.u32        q9,q8,#3
2037         eor     r0,r0,r4,ror#20
2038         add     r11,r11,r2
2039         vsli.32 q10,q8,#25
2040         ldr     r2,[sp,#36]
2041         and     r3,r3,r12
2042         vshr.u32        q11,q8,#18
2043         add     r7,r7,r11
2044         add     r11,r11,r0,ror#2
2045         eor     r3,r3,r5
2046         veor    q9,q9,q10
2047         add     r10,r10,r2
2048         vsli.32 q11,q8,#14
2049         eor     r2,r8,r9
2050         eor     r0,r7,r7,ror#5
2051         vshr.u32        d24,d3,#17
2052         add     r11,r11,r3
2053         and     r2,r2,r7
2054         veor    q9,q9,q11
2055         eor     r3,r0,r7,ror#19
2056         eor     r0,r11,r11,ror#11
2057         vsli.32 d24,d3,#15
2058         eor     r2,r2,r9
2059         add     r10,r10,r3,ror#6
2060         vshr.u32        d25,d3,#10
2061         eor     r3,r11,r4
2062         eor     r0,r0,r11,ror#20
2063         vadd.i32        q2,q2,q9
2064         add     r10,r10,r2
2065         ldr     r2,[sp,#40]
2066         veor    d25,d25,d24
2067         and     r12,r12,r3
2068         add     r6,r6,r10
2069         vshr.u32        d24,d3,#19
2070         add     r10,r10,r0,ror#2
2071         eor     r12,r12,r4
2072         vsli.32 d24,d3,#13
2073         add     r9,r9,r2
2074         eor     r2,r7,r8
2075         veor    d25,d25,d24
2076         eor     r0,r6,r6,ror#5
2077         add     r10,r10,r12
2078         vadd.i32        d4,d4,d25
2079         and     r2,r2,r6
2080         eor     r12,r0,r6,ror#19
2081         vshr.u32        d24,d4,#17
2082         eor     r0,r10,r10,ror#11
2083         eor     r2,r2,r8
2084         vsli.32 d24,d4,#15
2085         add     r9,r9,r12,ror#6
2086         eor     r12,r10,r11
2087         vshr.u32        d25,d4,#10
2088         eor     r0,r0,r10,ror#20
2089         add     r9,r9,r2
2090         veor    d25,d25,d24
2091         ldr     r2,[sp,#44]
2092         and     r3,r3,r12
2093         vshr.u32        d24,d4,#19
2094         add     r5,r5,r9
2095         add     r9,r9,r0,ror#2
2096         eor     r3,r3,r11
2097         vld1.32 {q8},[r14,:128]!
2098         add     r8,r8,r2
2099         vsli.32 d24,d4,#13
2100         eor     r2,r6,r7
2101         eor     r0,r5,r5,ror#5
2102         veor    d25,d25,d24
2103         add     r9,r9,r3
2104         and     r2,r2,r5
2105         vadd.i32        d5,d5,d25
2106         eor     r3,r0,r5,ror#19
2107         eor     r0,r9,r9,ror#11
2108         vadd.i32        q8,q8,q2
2109         eor     r2,r2,r7
2110         add     r8,r8,r3,ror#6
2111         eor     r3,r9,r10
2112         eor     r0,r0,r9,ror#20
2113         add     r8,r8,r2
2114         ldr     r2,[sp,#48]
2115         and     r12,r12,r3
2116         add     r4,r4,r8
2117         vst1.32 {q8},[r1,:128]!
2118         add     r8,r8,r0,ror#2
2119         eor     r12,r12,r10
2120         vext.8  q8,q3,q0,#4
2121         add     r7,r7,r2
2122         eor     r2,r5,r6
2123         eor     r0,r4,r4,ror#5
2124         vext.8  q9,q1,q2,#4
2125         add     r8,r8,r12
2126         and     r2,r2,r4
2127         eor     r12,r0,r4,ror#19
2128         vshr.u32        q10,q8,#7
2129         eor     r0,r8,r8,ror#11
2130         eor     r2,r2,r6
2131         vadd.i32        q3,q3,q9
2132         add     r7,r7,r12,ror#6
2133         eor     r12,r8,r9
2134         vshr.u32        q9,q8,#3
2135         eor     r0,r0,r8,ror#20
2136         add     r7,r7,r2
2137         vsli.32 q10,q8,#25
2138         ldr     r2,[sp,#52]
2139         and     r3,r3,r12
2140         vshr.u32        q11,q8,#18
2141         add     r11,r11,r7
2142         add     r7,r7,r0,ror#2
2143         eor     r3,r3,r9
2144         veor    q9,q9,q10
2145         add     r6,r6,r2
2146         vsli.32 q11,q8,#14
2147         eor     r2,r4,r5
2148         eor     r0,r11,r11,ror#5
2149         vshr.u32        d24,d5,#17
2150         add     r7,r7,r3
2151         and     r2,r2,r11
2152         veor    q9,q9,q11
2153         eor     r3,r0,r11,ror#19
2154         eor     r0,r7,r7,ror#11
2155         vsli.32 d24,d5,#15
2156         eor     r2,r2,r5
2157         add     r6,r6,r3,ror#6
2158         vshr.u32        d25,d5,#10
2159         eor     r3,r7,r8
2160         eor     r0,r0,r7,ror#20
2161         vadd.i32        q3,q3,q9
2162         add     r6,r6,r2
2163         ldr     r2,[sp,#56]
2164         veor    d25,d25,d24
2165         and     r12,r12,r3
2166         add     r10,r10,r6
2167         vshr.u32        d24,d5,#19
2168         add     r6,r6,r0,ror#2
2169         eor     r12,r12,r8
2170         vsli.32 d24,d5,#13
2171         add     r5,r5,r2
2172         eor     r2,r11,r4
2173         veor    d25,d25,d24
2174         eor     r0,r10,r10,ror#5
2175         add     r6,r6,r12
2176         vadd.i32        d6,d6,d25
2177         and     r2,r2,r10
2178         eor     r12,r0,r10,ror#19
2179         vshr.u32        d24,d6,#17
2180         eor     r0,r6,r6,ror#11
2181         eor     r2,r2,r4
2182         vsli.32 d24,d6,#15
2183         add     r5,r5,r12,ror#6
2184         eor     r12,r6,r7
2185         vshr.u32        d25,d6,#10
2186         eor     r0,r0,r6,ror#20
2187         add     r5,r5,r2
2188         veor    d25,d25,d24
2189         ldr     r2,[sp,#60]
2190         and     r3,r3,r12
2191         vshr.u32        d24,d6,#19
2192         add     r9,r9,r5
2193         add     r5,r5,r0,ror#2
2194         eor     r3,r3,r7
2195         vld1.32 {q8},[r14,:128]!
2196         add     r4,r4,r2
2197         vsli.32 d24,d6,#13
2198         eor     r2,r10,r11
2199         eor     r0,r9,r9,ror#5
2200         veor    d25,d25,d24
2201         add     r5,r5,r3
2202         and     r2,r2,r9
2203         vadd.i32        d7,d7,d25
2204         eor     r3,r0,r9,ror#19
2205         eor     r0,r5,r5,ror#11
2206         vadd.i32        q8,q8,q3
2207         eor     r2,r2,r11
2208         add     r4,r4,r3,ror#6
2209         eor     r3,r5,r6
2210         eor     r0,r0,r5,ror#20
2211         add     r4,r4,r2
2212         ldr     r2,[r14]
2213         and     r12,r12,r3
2214         add     r8,r8,r4
2215         vst1.32 {q8},[r1,:128]!
2216         add     r4,r4,r0,ror#2
2217         eor     r12,r12,r6
2218         teq     r2,#0                           @ check for K256 terminator
2219         ldr     r2,[sp,#0]
2220         sub     r1,r1,#64
2221         bne     .L_00_48
2223         ldr             r1,[sp,#68]
2224         ldr             r0,[sp,#72]
2225         sub             r14,r14,#256    @ rewind r14
2226         teq             r1,r0
2227         subeq           r1,r1,#64               @ avoid SEGV
2228         vld1.8          {q0},[r1]!              @ load next input block
2229         vld1.8          {q1},[r1]!
2230         vld1.8          {q2},[r1]!
2231         vld1.8          {q3},[r1]!
2232         strne           r1,[sp,#68]
2233         mov             r1,sp
2234         add     r11,r11,r2
2235         eor     r2,r9,r10
2236         eor     r0,r8,r8,ror#5
2237         add     r4,r4,r12
2238         vld1.32 {q8},[r14,:128]!
2239         and     r2,r2,r8
2240         eor     r12,r0,r8,ror#19
2241         eor     r0,r4,r4,ror#11
2242         eor     r2,r2,r10
2243         vrev32.8        q0,q0
2244         add     r11,r11,r12,ror#6
2245         eor     r12,r4,r5
2246         eor     r0,r0,r4,ror#20
2247         add     r11,r11,r2
2248         vadd.i32        q8,q8,q0
2249         ldr     r2,[sp,#4]
2250         and     r3,r3,r12
2251         add     r7,r7,r11
2252         add     r11,r11,r0,ror#2
2253         eor     r3,r3,r5
2254         add     r10,r10,r2
2255         eor     r2,r8,r9
2256         eor     r0,r7,r7,ror#5
2257         add     r11,r11,r3
2258         and     r2,r2,r7
2259         eor     r3,r0,r7,ror#19
2260         eor     r0,r11,r11,ror#11
2261         eor     r2,r2,r9
2262         add     r10,r10,r3,ror#6
2263         eor     r3,r11,r4
2264         eor     r0,r0,r11,ror#20
2265         add     r10,r10,r2
2266         ldr     r2,[sp,#8]
2267         and     r12,r12,r3
2268         add     r6,r6,r10
2269         add     r10,r10,r0,ror#2
2270         eor     r12,r12,r4
2271         add     r9,r9,r2
2272         eor     r2,r7,r8
2273         eor     r0,r6,r6,ror#5
2274         add     r10,r10,r12
2275         and     r2,r2,r6
2276         eor     r12,r0,r6,ror#19
2277         eor     r0,r10,r10,ror#11
2278         eor     r2,r2,r8
2279         add     r9,r9,r12,ror#6
2280         eor     r12,r10,r11
2281         eor     r0,r0,r10,ror#20
2282         add     r9,r9,r2
2283         ldr     r2,[sp,#12]
2284         and     r3,r3,r12
2285         add     r5,r5,r9
2286         add     r9,r9,r0,ror#2
2287         eor     r3,r3,r11
2288         add     r8,r8,r2
2289         eor     r2,r6,r7
2290         eor     r0,r5,r5,ror#5
2291         add     r9,r9,r3
2292         and     r2,r2,r5
2293         eor     r3,r0,r5,ror#19
2294         eor     r0,r9,r9,ror#11
2295         eor     r2,r2,r7
2296         add     r8,r8,r3,ror#6
2297         eor     r3,r9,r10
2298         eor     r0,r0,r9,ror#20
2299         add     r8,r8,r2
2300         ldr     r2,[sp,#16]
2301         and     r12,r12,r3
2302         add     r4,r4,r8
2303         add     r8,r8,r0,ror#2
2304         eor     r12,r12,r10
2305         vst1.32 {q8},[r1,:128]!
2306         add     r7,r7,r2
2307         eor     r2,r5,r6
2308         eor     r0,r4,r4,ror#5
2309         add     r8,r8,r12
2310         vld1.32 {q8},[r14,:128]!
2311         and     r2,r2,r4
2312         eor     r12,r0,r4,ror#19
2313         eor     r0,r8,r8,ror#11
2314         eor     r2,r2,r6
2315         vrev32.8        q1,q1
2316         add     r7,r7,r12,ror#6
2317         eor     r12,r8,r9
2318         eor     r0,r0,r8,ror#20
2319         add     r7,r7,r2
2320         vadd.i32        q8,q8,q1
2321         ldr     r2,[sp,#20]
2322         and     r3,r3,r12
2323         add     r11,r11,r7
2324         add     r7,r7,r0,ror#2
2325         eor     r3,r3,r9
2326         add     r6,r6,r2
2327         eor     r2,r4,r5
2328         eor     r0,r11,r11,ror#5
2329         add     r7,r7,r3
2330         and     r2,r2,r11
2331         eor     r3,r0,r11,ror#19
2332         eor     r0,r7,r7,ror#11
2333         eor     r2,r2,r5
2334         add     r6,r6,r3,ror#6
2335         eor     r3,r7,r8
2336         eor     r0,r0,r7,ror#20
2337         add     r6,r6,r2
2338         ldr     r2,[sp,#24]
2339         and     r12,r12,r3
2340         add     r10,r10,r6
2341         add     r6,r6,r0,ror#2
2342         eor     r12,r12,r8
2343         add     r5,r5,r2
2344         eor     r2,r11,r4
2345         eor     r0,r10,r10,ror#5
2346         add     r6,r6,r12
2347         and     r2,r2,r10
2348         eor     r12,r0,r10,ror#19
2349         eor     r0,r6,r6,ror#11
2350         eor     r2,r2,r4
2351         add     r5,r5,r12,ror#6
2352         eor     r12,r6,r7
2353         eor     r0,r0,r6,ror#20
2354         add     r5,r5,r2
2355         ldr     r2,[sp,#28]
2356         and     r3,r3,r12
2357         add     r9,r9,r5
2358         add     r5,r5,r0,ror#2
2359         eor     r3,r3,r7
2360         add     r4,r4,r2
2361         eor     r2,r10,r11
2362         eor     r0,r9,r9,ror#5
2363         add     r5,r5,r3
2364         and     r2,r2,r9
2365         eor     r3,r0,r9,ror#19
2366         eor     r0,r5,r5,ror#11
2367         eor     r2,r2,r11
2368         add     r4,r4,r3,ror#6
2369         eor     r3,r5,r6
2370         eor     r0,r0,r5,ror#20
2371         add     r4,r4,r2
2372         ldr     r2,[sp,#32]
2373         and     r12,r12,r3
2374         add     r8,r8,r4
2375         add     r4,r4,r0,ror#2
2376         eor     r12,r12,r6
2377         vst1.32 {q8},[r1,:128]!
2378         add     r11,r11,r2
2379         eor     r2,r9,r10
2380         eor     r0,r8,r8,ror#5
2381         add     r4,r4,r12
2382         vld1.32 {q8},[r14,:128]!
2383         and     r2,r2,r8
2384         eor     r12,r0,r8,ror#19
2385         eor     r0,r4,r4,ror#11
2386         eor     r2,r2,r10
2387         vrev32.8        q2,q2
2388         add     r11,r11,r12,ror#6
2389         eor     r12,r4,r5
2390         eor     r0,r0,r4,ror#20
2391         add     r11,r11,r2
2392         vadd.i32        q8,q8,q2
2393         ldr     r2,[sp,#36]
2394         and     r3,r3,r12
2395         add     r7,r7,r11
2396         add     r11,r11,r0,ror#2
2397         eor     r3,r3,r5
2398         add     r10,r10,r2
2399         eor     r2,r8,r9
2400         eor     r0,r7,r7,ror#5
2401         add     r11,r11,r3
2402         and     r2,r2,r7
2403         eor     r3,r0,r7,ror#19
2404         eor     r0,r11,r11,ror#11
2405         eor     r2,r2,r9
2406         add     r10,r10,r3,ror#6
2407         eor     r3,r11,r4
2408         eor     r0,r0,r11,ror#20
2409         add     r10,r10,r2
2410         ldr     r2,[sp,#40]
2411         and     r12,r12,r3
2412         add     r6,r6,r10
2413         add     r10,r10,r0,ror#2
2414         eor     r12,r12,r4
2415         add     r9,r9,r2
2416         eor     r2,r7,r8
2417         eor     r0,r6,r6,ror#5
2418         add     r10,r10,r12
2419         and     r2,r2,r6
2420         eor     r12,r0,r6,ror#19
2421         eor     r0,r10,r10,ror#11
2422         eor     r2,r2,r8
2423         add     r9,r9,r12,ror#6
2424         eor     r12,r10,r11
2425         eor     r0,r0,r10,ror#20
2426         add     r9,r9,r2
2427         ldr     r2,[sp,#44]
2428         and     r3,r3,r12
2429         add     r5,r5,r9
2430         add     r9,r9,r0,ror#2
2431         eor     r3,r3,r11
2432         add     r8,r8,r2
2433         eor     r2,r6,r7
2434         eor     r0,r5,r5,ror#5
2435         add     r9,r9,r3
2436         and     r2,r2,r5
2437         eor     r3,r0,r5,ror#19
2438         eor     r0,r9,r9,ror#11
2439         eor     r2,r2,r7
2440         add     r8,r8,r3,ror#6
2441         eor     r3,r9,r10
2442         eor     r0,r0,r9,ror#20
2443         add     r8,r8,r2
2444         ldr     r2,[sp,#48]
2445         and     r12,r12,r3
2446         add     r4,r4,r8
2447         add     r8,r8,r0,ror#2
2448         eor     r12,r12,r10
2449         vst1.32 {q8},[r1,:128]!
2450         add     r7,r7,r2
2451         eor     r2,r5,r6
2452         eor     r0,r4,r4,ror#5
2453         add     r8,r8,r12
2454         vld1.32 {q8},[r14,:128]!
2455         and     r2,r2,r4
2456         eor     r12,r0,r4,ror#19
2457         eor     r0,r8,r8,ror#11
2458         eor     r2,r2,r6
2459         vrev32.8        q3,q3
2460         add     r7,r7,r12,ror#6
2461         eor     r12,r8,r9
2462         eor     r0,r0,r8,ror#20
2463         add     r7,r7,r2
2464         vadd.i32        q8,q8,q3
2465         ldr     r2,[sp,#52]
2466         and     r3,r3,r12
2467         add     r11,r11,r7
2468         add     r7,r7,r0,ror#2
2469         eor     r3,r3,r9
2470         add     r6,r6,r2
2471         eor     r2,r4,r5
2472         eor     r0,r11,r11,ror#5
2473         add     r7,r7,r3
2474         and     r2,r2,r11
2475         eor     r3,r0,r11,ror#19
2476         eor     r0,r7,r7,ror#11
2477         eor     r2,r2,r5
2478         add     r6,r6,r3,ror#6
2479         eor     r3,r7,r8
2480         eor     r0,r0,r7,ror#20
2481         add     r6,r6,r2
2482         ldr     r2,[sp,#56]
2483         and     r12,r12,r3
2484         add     r10,r10,r6
2485         add     r6,r6,r0,ror#2
2486         eor     r12,r12,r8
2487         add     r5,r5,r2
2488         eor     r2,r11,r4
2489         eor     r0,r10,r10,ror#5
2490         add     r6,r6,r12
2491         and     r2,r2,r10
2492         eor     r12,r0,r10,ror#19
2493         eor     r0,r6,r6,ror#11
2494         eor     r2,r2,r4
2495         add     r5,r5,r12,ror#6
2496         eor     r12,r6,r7
2497         eor     r0,r0,r6,ror#20
2498         add     r5,r5,r2
2499         ldr     r2,[sp,#60]
2500         and     r3,r3,r12
2501         add     r9,r9,r5
2502         add     r5,r5,r0,ror#2
2503         eor     r3,r3,r7
2504         add     r4,r4,r2
2505         eor     r2,r10,r11
2506         eor     r0,r9,r9,ror#5
2507         add     r5,r5,r3
2508         and     r2,r2,r9
2509         eor     r3,r0,r9,ror#19
2510         eor     r0,r5,r5,ror#11
2511         eor     r2,r2,r11
2512         add     r4,r4,r3,ror#6
2513         eor     r3,r5,r6
2514         eor     r0,r0,r5,ror#20
2515         add     r4,r4,r2
2516         ldr     r2,[sp,#64]
2517         and     r12,r12,r3
2518         add     r8,r8,r4
2519         add     r4,r4,r0,ror#2
2520         eor     r12,r12,r6
2521         vst1.32 {q8},[r1,:128]!
2522         ldr     r0,[r2,#0]
2523         add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
2524         ldr     r12,[r2,#4]
2525         ldr     r3,[r2,#8]
2526         ldr     r1,[r2,#12]
2527         add     r4,r4,r0                        @ accumulate
2528         ldr     r0,[r2,#16]
2529         add     r5,r5,r12
2530         ldr     r12,[r2,#20]
2531         add     r6,r6,r3
2532         ldr     r3,[r2,#24]
2533         add     r7,r7,r1
2534         ldr     r1,[r2,#28]
2535         add     r8,r8,r0
2536         str     r4,[r2],#4
2537         add     r9,r9,r12
2538         str     r5,[r2],#4
2539         add     r10,r10,r3
2540         str     r6,[r2],#4
2541         add     r11,r11,r1
2542         str     r7,[r2],#4
2543         stmia   r2,{r8-r11}
2545         movne   r1,sp
2546         ldrne   r2,[sp,#0]
2547         eorne   r12,r12,r12
2548         ldreq   sp,[sp,#76]                     @ restore original sp
2549         eorne   r3,r5,r6
2550         bne     .L_00_48
2552         ldmia   sp!,{r4-r12,pc}
2553 .size   sha256_block_data_order_neon,.-sha256_block_data_order_neon
2554 #endif
2555 #if __ARM_MAX_ARCH__>=7
2556 .type   sha256_block_data_order_armv8,%function
2557 .align  5
2558 sha256_block_data_order_armv8:
2559 .LARMv8:
2560         vld1.32 {q0,q1},[r0]
2561         sub     r3,r3,#sha256_block_data_order-K256
2563 .Loop_v8:
2564         vld1.8          {q8-q9},[r1]!
2565         vld1.8          {q10-q11},[r1]!
2566         vld1.32         {q12},[r3]!
2567         vrev32.8        q8,q8
2568         vrev32.8        q9,q9
2569         vrev32.8        q10,q10
2570         vrev32.8        q11,q11
2571         vmov            q14,q0  @ offload
2572         vmov            q15,q1
2573         teq             r1,r2
2574         vld1.32         {q13},[r3]!
2575         vadd.i32        q12,q12,q8
2576         .byte   0xe2,0x03,0xfa,0xf3     @ sha256su0 q8,q9
2577         vmov            q2,q0
2578         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2579         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2580         .byte   0xe6,0x0c,0x64,0xf3     @ sha256su1 q8,q10,q11
2581         vld1.32         {q12},[r3]!
2582         vadd.i32        q13,q13,q9
2583         .byte   0xe4,0x23,0xfa,0xf3     @ sha256su0 q9,q10
2584         vmov            q2,q0
2585         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2586         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2587         .byte   0xe0,0x2c,0x66,0xf3     @ sha256su1 q9,q11,q8
2588         vld1.32         {q13},[r3]!
2589         vadd.i32        q12,q12,q10
2590         .byte   0xe6,0x43,0xfa,0xf3     @ sha256su0 q10,q11
2591         vmov            q2,q0
2592         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2593         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2594         .byte   0xe2,0x4c,0x60,0xf3     @ sha256su1 q10,q8,q9
2595         vld1.32         {q12},[r3]!
2596         vadd.i32        q13,q13,q11
2597         .byte   0xe0,0x63,0xfa,0xf3     @ sha256su0 q11,q8
2598         vmov            q2,q0
2599         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2600         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2601         .byte   0xe4,0x6c,0x62,0xf3     @ sha256su1 q11,q9,q10
2602         vld1.32         {q13},[r3]!
2603         vadd.i32        q12,q12,q8
2604         .byte   0xe2,0x03,0xfa,0xf3     @ sha256su0 q8,q9
2605         vmov            q2,q0
2606         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2607         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2608         .byte   0xe6,0x0c,0x64,0xf3     @ sha256su1 q8,q10,q11
2609         vld1.32         {q12},[r3]!
2610         vadd.i32        q13,q13,q9
2611         .byte   0xe4,0x23,0xfa,0xf3     @ sha256su0 q9,q10
2612         vmov            q2,q0
2613         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2614         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2615         .byte   0xe0,0x2c,0x66,0xf3     @ sha256su1 q9,q11,q8
2616         vld1.32         {q13},[r3]!
2617         vadd.i32        q12,q12,q10
2618         .byte   0xe6,0x43,0xfa,0xf3     @ sha256su0 q10,q11
2619         vmov            q2,q0
2620         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2621         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2622         .byte   0xe2,0x4c,0x60,0xf3     @ sha256su1 q10,q8,q9
2623         vld1.32         {q12},[r3]!
2624         vadd.i32        q13,q13,q11
2625         .byte   0xe0,0x63,0xfa,0xf3     @ sha256su0 q11,q8
2626         vmov            q2,q0
2627         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2628         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2629         .byte   0xe4,0x6c,0x62,0xf3     @ sha256su1 q11,q9,q10
2630         vld1.32         {q13},[r3]!
2631         vadd.i32        q12,q12,q8
2632         .byte   0xe2,0x03,0xfa,0xf3     @ sha256su0 q8,q9
2633         vmov            q2,q0
2634         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2635         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2636         .byte   0xe6,0x0c,0x64,0xf3     @ sha256su1 q8,q10,q11
2637         vld1.32         {q12},[r3]!
2638         vadd.i32        q13,q13,q9
2639         .byte   0xe4,0x23,0xfa,0xf3     @ sha256su0 q9,q10
2640         vmov            q2,q0
2641         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2642         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2643         .byte   0xe0,0x2c,0x66,0xf3     @ sha256su1 q9,q11,q8
2644         vld1.32         {q13},[r3]!
2645         vadd.i32        q12,q12,q10
2646         .byte   0xe6,0x43,0xfa,0xf3     @ sha256su0 q10,q11
2647         vmov            q2,q0
2648         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2649         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2650         .byte   0xe2,0x4c,0x60,0xf3     @ sha256su1 q10,q8,q9
2651         vld1.32         {q12},[r3]!
2652         vadd.i32        q13,q13,q11
2653         .byte   0xe0,0x63,0xfa,0xf3     @ sha256su0 q11,q8
2654         vmov            q2,q0
2655         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2656         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2657         .byte   0xe4,0x6c,0x62,0xf3     @ sha256su1 q11,q9,q10
2658         vld1.32         {q13},[r3]!
2659         vadd.i32        q12,q12,q8
2660         vmov            q2,q0
2661         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2662         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2664         vld1.32         {q12},[r3]!
2665         vadd.i32        q13,q13,q9
2666         vmov            q2,q0
2667         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2668         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2670         vld1.32         {q13},[r3]
2671         vadd.i32        q12,q12,q10
2672         sub             r3,r3,#256-16   @ rewind
2673         vmov            q2,q0
2674         .byte   0x68,0x0c,0x02,0xf3     @ sha256h q0,q1,q12
2675         .byte   0x68,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q12
2677         vadd.i32        q13,q13,q11
2678         vmov            q2,q0
2679         .byte   0x6a,0x0c,0x02,0xf3     @ sha256h q0,q1,q13
2680         .byte   0x6a,0x2c,0x14,0xf3     @ sha256h2 q1,q2,q13
2682         vadd.i32        q0,q0,q14
2683         vadd.i32        q1,q1,q15
2684         bne             .Loop_v8
2686         vst1.32         {q0,q1},[r0]
2688         bx      lr              @ bx lr
2689 .size   sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
2690 #endif
2691 .asciz  "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
2692 .align  2
2693 #if __ARM_MAX_ARCH__>=7
2694 .comm   OPENSSL_armcap_P,4,4
2695 .hidden OPENSSL_armcap_P
2696 #endif