etc/services - sync with NetBSD-8
[minix.git] / crypto / external / bsd / openssl / lib / libcrypto / arch / arm / sha512-armv4.S
blob82ba057fc2d1cfc52b8c0c7eeae820256c8357b9
1 #include "arm_arch.h"
2 #include "arm_asm.h"
3 #ifdef __ARMEL__
4 # define LO 0
5 # define HI 4
6 # define WORD64(hi0,lo0,hi1,lo1)        .word   lo0,hi0, lo1,hi1
7 #else
8 # define HI 0
9 # define LO 4
10 # define WORD64(hi0,lo0,hi1,lo1)        .word   hi0,lo0, hi1,lo1
11 #endif
13 .text
14 .code   32
15 .type   K512,%object
16 .align  5
17 K512:
18 WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
19 WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
20 WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
21 WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
22 WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
23 WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
24 WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
25 WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
26 WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
27 WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
28 WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
29 WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
30 WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
31 WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
32 WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
33 WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
34 WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
35 WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
36 WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
37 WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
38 WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
39 WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
40 WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
41 WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
42 WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
43 WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
44 WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
45 WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
46 WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
47 WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
48 WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
49 WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
50 WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
51 WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
52 WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
53 WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
54 WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
55 WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
56 WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
57 WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
58 .size   K512,.-K512
59 #if __ARM_MAX_ARCH__>=7
60 .LOPENSSL_armcap:
61 .word   OPENSSL_armcap_P-sha512_block_data_order
62 .skip   32-4
63 #else
64 .skip   32
65 #endif
67 .global sha512_block_data_order
68 .type   sha512_block_data_order,%function
69 sha512_block_data_order:
70         sub     r3,pc,#8                @ sha512_block_data_order
71         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
72 #if __ARM_MAX_ARCH__>=7
73         ldr     r12,.LOPENSSL_armcap
74         ldr     r12,[r3,r12]            @ OPENSSL_armcap_P
75         tst     r12,#1
76         bne     .LNEON
77 #endif
78         stmdb   sp!,{r4-r12,lr}
79         sub     r14,r3,#672             @ K512
80         sub     sp,sp,#9*8
82         ldr     r7,[r0,#32+LO]
83         ldr     r8,[r0,#32+HI]
84         ldr     r9, [r0,#48+LO]
85         ldr     r10, [r0,#48+HI]
86         ldr     r11, [r0,#56+LO]
87         ldr     r12, [r0,#56+HI]
88 .Loop:
89         str     r9, [sp,#48+0]
90         str     r10, [sp,#48+4]
91         str     r11, [sp,#56+0]
92         str     r12, [sp,#56+4]
93         ldr     r5,[r0,#0+LO]
94         ldr     r6,[r0,#0+HI]
95         ldr     r3,[r0,#8+LO]
96         ldr     r4,[r0,#8+HI]
97         ldr     r9, [r0,#16+LO]
98         ldr     r10, [r0,#16+HI]
99         ldr     r11, [r0,#24+LO]
100         ldr     r12, [r0,#24+HI]
101         str     r3,[sp,#8+0]
102         str     r4,[sp,#8+4]
103         str     r9, [sp,#16+0]
104         str     r10, [sp,#16+4]
105         str     r11, [sp,#24+0]
106         str     r12, [sp,#24+4]
107         ldr     r3,[r0,#40+LO]
108         ldr     r4,[r0,#40+HI]
109         str     r3,[sp,#40+0]
110         str     r4,[sp,#40+4]
112 .L00_15:
113 #if __ARM_ARCH__<7
114         ldrb    r3,[r1,#7]
115         ldrb    r9, [r1,#6]
116         ldrb    r10, [r1,#5]
117         ldrb    r11, [r1,#4]
118         ldrb    r4,[r1,#3]
119         ldrb    r12, [r1,#2]
120         orr     r3,r3,r9,lsl#8
121         ldrb    r9, [r1,#1]
122         orr     r3,r3,r10,lsl#16
123         ldrb    r10, [r1],#8
124         orr     r3,r3,r11,lsl#24
125         orr     r4,r4,r12,lsl#8
126         orr     r4,r4,r9,lsl#16
127         orr     r4,r4,r10,lsl#24
128 #else
129         ldr     r3,[r1,#4]
130         ldr     r4,[r1],#8
131 #ifdef __ARMEL__
132         rev     r3,r3
133         rev     r4,r4
134 #endif
135 #endif
136         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
137         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
138         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
139         mov     r9,r7,lsr#14
140         str     r3,[sp,#64+0]
141         mov     r10,r8,lsr#14
142         str     r4,[sp,#64+4]
143         eor     r9,r9,r8,lsl#18
144         ldr     r11,[sp,#56+0]  @ h.lo
145         eor     r10,r10,r7,lsl#18
146         ldr     r12,[sp,#56+4]  @ h.hi
147         eor     r9,r9,r7,lsr#18
148         eor     r10,r10,r8,lsr#18
149         eor     r9,r9,r8,lsl#14
150         eor     r10,r10,r7,lsl#14
151         eor     r9,r9,r8,lsr#9
152         eor     r10,r10,r7,lsr#9
153         eor     r9,r9,r7,lsl#23
154         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
155         adds    r3,r3,r9
156         ldr     r9,[sp,#40+0]   @ f.lo
157         adc     r4,r4,r10               @ T += Sigma1(e)
158         ldr     r10,[sp,#40+4]  @ f.hi
159         adds    r3,r3,r11
160         ldr     r11,[sp,#48+0]  @ g.lo
161         adc     r4,r4,r12               @ T += h
162         ldr     r12,[sp,#48+4]  @ g.hi
164         eor     r9,r9,r11
165         str     r7,[sp,#32+0]
166         eor     r10,r10,r12
167         str     r8,[sp,#32+4]
168         and     r9,r9,r7
169         str     r5,[sp,#0+0]
170         and     r10,r10,r8
171         str     r6,[sp,#0+4]
172         eor     r9,r9,r11
173         ldr     r11,[r14,#LO]   @ K[i].lo
174         eor     r10,r10,r12             @ Ch(e,f,g)
175         ldr     r12,[r14,#HI]   @ K[i].hi
177         adds    r3,r3,r9
178         ldr     r7,[sp,#24+0]   @ d.lo
179         adc     r4,r4,r10               @ T += Ch(e,f,g)
180         ldr     r8,[sp,#24+4]   @ d.hi
181         adds    r3,r3,r11
182         and     r9,r11,#0xff
183         adc     r4,r4,r12               @ T += K[i]
184         adds    r7,r7,r3
185         ldr     r11,[sp,#8+0]   @ b.lo
186         adc     r8,r8,r4                @ d += T
187         teq     r9,#148
189         ldr     r12,[sp,#16+0]  @ c.lo
190         orreq   r14,r14,#1
191         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
192         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
193         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
194         mov     r9,r5,lsr#28
195         mov     r10,r6,lsr#28
196         eor     r9,r9,r6,lsl#4
197         eor     r10,r10,r5,lsl#4
198         eor     r9,r9,r6,lsr#2
199         eor     r10,r10,r5,lsr#2
200         eor     r9,r9,r5,lsl#30
201         eor     r10,r10,r6,lsl#30
202         eor     r9,r9,r6,lsr#7
203         eor     r10,r10,r5,lsr#7
204         eor     r9,r9,r5,lsl#25
205         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
206         adds    r3,r3,r9
207         and     r9,r5,r11
208         adc     r4,r4,r10               @ T += Sigma0(a)
210         ldr     r10,[sp,#8+4]   @ b.hi
211         orr     r5,r5,r11
212         ldr     r11,[sp,#16+4]  @ c.hi
213         and     r5,r5,r12
214         and     r12,r6,r10
215         orr     r6,r6,r10
216         orr     r5,r5,r9                @ Maj(a,b,c).lo
217         and     r6,r6,r11
218         adds    r5,r5,r3
219         orr     r6,r6,r12               @ Maj(a,b,c).hi
220         sub     sp,sp,#8
221         adc     r6,r6,r4                @ h += T
222         tst     r14,#1
223         add     r14,r14,#8
224         tst     r14,#1
225         beq     .L00_15
226         ldr     r9,[sp,#184+0]
227         ldr     r10,[sp,#184+4]
228         bic     r14,r14,#1
229 .L16_79:
230         @ sigma0(x)     (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
231         @ LO            lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
232         @ HI            hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
233         mov     r3,r9,lsr#1
234         ldr     r11,[sp,#80+0]
235         mov     r4,r10,lsr#1
236         ldr     r12,[sp,#80+4]
237         eor     r3,r3,r10,lsl#31
238         eor     r4,r4,r9,lsl#31
239         eor     r3,r3,r9,lsr#8
240         eor     r4,r4,r10,lsr#8
241         eor     r3,r3,r10,lsl#24
242         eor     r4,r4,r9,lsl#24
243         eor     r3,r3,r9,lsr#7
244         eor     r4,r4,r10,lsr#7
245         eor     r3,r3,r10,lsl#25
247         @ sigma1(x)     (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
248         @ LO            lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
249         @ HI            hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
250         mov     r9,r11,lsr#19
251         mov     r10,r12,lsr#19
252         eor     r9,r9,r12,lsl#13
253         eor     r10,r10,r11,lsl#13
254         eor     r9,r9,r12,lsr#29
255         eor     r10,r10,r11,lsr#29
256         eor     r9,r9,r11,lsl#3
257         eor     r10,r10,r12,lsl#3
258         eor     r9,r9,r11,lsr#6
259         eor     r10,r10,r12,lsr#6
260         ldr     r11,[sp,#120+0]
261         eor     r9,r9,r12,lsl#26
263         ldr     r12,[sp,#120+4]
264         adds    r3,r3,r9
265         ldr     r9,[sp,#192+0]
266         adc     r4,r4,r10
268         ldr     r10,[sp,#192+4]
269         adds    r3,r3,r11
270         adc     r4,r4,r12
271         adds    r3,r3,r9
272         adc     r4,r4,r10
273         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
274         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
275         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
276         mov     r9,r7,lsr#14
277         str     r3,[sp,#64+0]
278         mov     r10,r8,lsr#14
279         str     r4,[sp,#64+4]
280         eor     r9,r9,r8,lsl#18
281         ldr     r11,[sp,#56+0]  @ h.lo
282         eor     r10,r10,r7,lsl#18
283         ldr     r12,[sp,#56+4]  @ h.hi
284         eor     r9,r9,r7,lsr#18
285         eor     r10,r10,r8,lsr#18
286         eor     r9,r9,r8,lsl#14
287         eor     r10,r10,r7,lsl#14
288         eor     r9,r9,r8,lsr#9
289         eor     r10,r10,r7,lsr#9
290         eor     r9,r9,r7,lsl#23
291         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
292         adds    r3,r3,r9
293         ldr     r9,[sp,#40+0]   @ f.lo
294         adc     r4,r4,r10               @ T += Sigma1(e)
295         ldr     r10,[sp,#40+4]  @ f.hi
296         adds    r3,r3,r11
297         ldr     r11,[sp,#48+0]  @ g.lo
298         adc     r4,r4,r12               @ T += h
299         ldr     r12,[sp,#48+4]  @ g.hi
301         eor     r9,r9,r11
302         str     r7,[sp,#32+0]
303         eor     r10,r10,r12
304         str     r8,[sp,#32+4]
305         and     r9,r9,r7
306         str     r5,[sp,#0+0]
307         and     r10,r10,r8
308         str     r6,[sp,#0+4]
309         eor     r9,r9,r11
310         ldr     r11,[r14,#LO]   @ K[i].lo
311         eor     r10,r10,r12             @ Ch(e,f,g)
312         ldr     r12,[r14,#HI]   @ K[i].hi
314         adds    r3,r3,r9
315         ldr     r7,[sp,#24+0]   @ d.lo
316         adc     r4,r4,r10               @ T += Ch(e,f,g)
317         ldr     r8,[sp,#24+4]   @ d.hi
318         adds    r3,r3,r11
319         and     r9,r11,#0xff
320         adc     r4,r4,r12               @ T += K[i]
321         adds    r7,r7,r3
322         ldr     r11,[sp,#8+0]   @ b.lo
323         adc     r8,r8,r4                @ d += T
324         teq     r9,#23
326         ldr     r12,[sp,#16+0]  @ c.lo
327         orreq   r14,r14,#1
328         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
329         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
330         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
331         mov     r9,r5,lsr#28
332         mov     r10,r6,lsr#28
333         eor     r9,r9,r6,lsl#4
334         eor     r10,r10,r5,lsl#4
335         eor     r9,r9,r6,lsr#2
336         eor     r10,r10,r5,lsr#2
337         eor     r9,r9,r5,lsl#30
338         eor     r10,r10,r6,lsl#30
339         eor     r9,r9,r6,lsr#7
340         eor     r10,r10,r5,lsr#7
341         eor     r9,r9,r5,lsl#25
342         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
343         adds    r3,r3,r9
344         and     r9,r5,r11
345         adc     r4,r4,r10               @ T += Sigma0(a)
347         ldr     r10,[sp,#8+4]   @ b.hi
348         orr     r5,r5,r11
349         ldr     r11,[sp,#16+4]  @ c.hi
350         and     r5,r5,r12
351         and     r12,r6,r10
352         orr     r6,r6,r10
353         orr     r5,r5,r9                @ Maj(a,b,c).lo
354         and     r6,r6,r11
355         adds    r5,r5,r3
356         orr     r6,r6,r12               @ Maj(a,b,c).hi
357         sub     sp,sp,#8
358         adc     r6,r6,r4                @ h += T
359         tst     r14,#1
360         add     r14,r14,#8
361         ldreq   r9,[sp,#184+0]
362         ldreq   r10,[sp,#184+4]
363         beq     .L16_79
364         bic     r14,r14,#1
366         ldr     r3,[sp,#8+0]
367         ldr     r4,[sp,#8+4]
368         ldr     r9, [r0,#0+LO]
369         ldr     r10, [r0,#0+HI]
370         ldr     r11, [r0,#8+LO]
371         ldr     r12, [r0,#8+HI]
372         adds    r9,r5,r9
373         str     r9, [r0,#0+LO]
374         adc     r10,r6,r10
375         str     r10, [r0,#0+HI]
376         adds    r11,r3,r11
377         str     r11, [r0,#8+LO]
378         adc     r12,r4,r12
379         str     r12, [r0,#8+HI]
381         ldr     r5,[sp,#16+0]
382         ldr     r6,[sp,#16+4]
383         ldr     r3,[sp,#24+0]
384         ldr     r4,[sp,#24+4]
385         ldr     r9, [r0,#16+LO]
386         ldr     r10, [r0,#16+HI]
387         ldr     r11, [r0,#24+LO]
388         ldr     r12, [r0,#24+HI]
389         adds    r9,r5,r9
390         str     r9, [r0,#16+LO]
391         adc     r10,r6,r10
392         str     r10, [r0,#16+HI]
393         adds    r11,r3,r11
394         str     r11, [r0,#24+LO]
395         adc     r12,r4,r12
396         str     r12, [r0,#24+HI]
398         ldr     r3,[sp,#40+0]
399         ldr     r4,[sp,#40+4]
400         ldr     r9, [r0,#32+LO]
401         ldr     r10, [r0,#32+HI]
402         ldr     r11, [r0,#40+LO]
403         ldr     r12, [r0,#40+HI]
404         adds    r7,r7,r9
405         str     r7,[r0,#32+LO]
406         adc     r8,r8,r10
407         str     r8,[r0,#32+HI]
408         adds    r11,r3,r11
409         str     r11, [r0,#40+LO]
410         adc     r12,r4,r12
411         str     r12, [r0,#40+HI]
413         ldr     r5,[sp,#48+0]
414         ldr     r6,[sp,#48+4]
415         ldr     r3,[sp,#56+0]
416         ldr     r4,[sp,#56+4]
417         ldr     r9, [r0,#48+LO]
418         ldr     r10, [r0,#48+HI]
419         ldr     r11, [r0,#56+LO]
420         ldr     r12, [r0,#56+HI]
421         adds    r9,r5,r9
422         str     r9, [r0,#48+LO]
423         adc     r10,r6,r10
424         str     r10, [r0,#48+HI]
425         adds    r11,r3,r11
426         str     r11, [r0,#56+LO]
427         adc     r12,r4,r12
428         str     r12, [r0,#56+HI]
430         add     sp,sp,#640
431         sub     r14,r14,#640
433         teq     r1,r2
434         bne     .Loop
436         add     sp,sp,#8*9              @ destroy frame
437 #if __ARM_ARCH__>=5
438         ldmia   sp!,{r4-r12,pc}
439 #else
440         ldmia   sp!,{r4-r12,lr}
441         tst     lr,#1
442         moveq   pc,lr                   @ be binary compatible with V4, yet
443         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
444 #endif
445 #if __ARM_MAX_ARCH__>=7
446 .arch   armv7-a
447 .fpu    neon
449 .align  4
450 .LNEON:
451         dmb                             @ errata #451034 on early Cortex A8
452         vstmdb  sp!,{d8-d15}            @ ABI specification says so
453         sub     r3,r3,#672              @ K512
454         vldmia  r0,{d16-d23}            @ load context
455 .Loop_neon:
456         vshr.u64        d24,d20,#14     @ 0
457 #if 0<16
458         vld1.64         {d0},[r1]!      @ handles unaligned
459 #endif
460         vshr.u64        d25,d20,#18
461 #if 0>0
462          vadd.i64       d16,d30                 @ h+=Maj from the past
463 #endif
464         vshr.u64        d26,d20,#41
465         vld1.64         {d28},[r3,:64]! @ K[i++]
466         vsli.64         d24,d20,#50
467         vsli.64         d25,d20,#46
468         vmov            d29,d20
469         vsli.64         d26,d20,#23
470 #if 0<16 && defined(__ARMEL__)
471         vrev64.8        d0,d0
472 #endif
473         veor            d25,d24
474         vbsl            d29,d21,d22             @ Ch(e,f,g)
475         vshr.u64        d24,d16,#28
476         veor            d26,d25                 @ Sigma1(e)
477         vadd.i64        d27,d29,d23
478         vshr.u64        d25,d16,#34
479         vsli.64         d24,d16,#36
480         vadd.i64        d27,d26
481         vshr.u64        d26,d16,#39
482         vadd.i64        d28,d0
483         vsli.64         d25,d16,#30
484         veor            d30,d16,d17
485         vsli.64         d26,d16,#25
486         veor            d23,d24,d25
487         vadd.i64        d27,d28
488         vbsl            d30,d18,d17             @ Maj(a,b,c)
489         veor            d23,d26                 @ Sigma0(a)
490         vadd.i64        d19,d27
491         vadd.i64        d30,d27
492         @ vadd.i64      d23,d30
493         vshr.u64        d24,d19,#14     @ 1
494 #if 1<16
495         vld1.64         {d1},[r1]!      @ handles unaligned
496 #endif
497         vshr.u64        d25,d19,#18
498 #if 1>0
499          vadd.i64       d23,d30                 @ h+=Maj from the past
500 #endif
501         vshr.u64        d26,d19,#41
502         vld1.64         {d28},[r3,:64]! @ K[i++]
503         vsli.64         d24,d19,#50
504         vsli.64         d25,d19,#46
505         vmov            d29,d19
506         vsli.64         d26,d19,#23
507 #if 1<16 && defined(__ARMEL__)
508         vrev64.8        d1,d1
509 #endif
510         veor            d25,d24
511         vbsl            d29,d20,d21             @ Ch(e,f,g)
512         vshr.u64        d24,d23,#28
513         veor            d26,d25                 @ Sigma1(e)
514         vadd.i64        d27,d29,d22
515         vshr.u64        d25,d23,#34
516         vsli.64         d24,d23,#36
517         vadd.i64        d27,d26
518         vshr.u64        d26,d23,#39
519         vadd.i64        d28,d1
520         vsli.64         d25,d23,#30
521         veor            d30,d23,d16
522         vsli.64         d26,d23,#25
523         veor            d22,d24,d25
524         vadd.i64        d27,d28
525         vbsl            d30,d17,d16             @ Maj(a,b,c)
526         veor            d22,d26                 @ Sigma0(a)
527         vadd.i64        d18,d27
528         vadd.i64        d30,d27
529         @ vadd.i64      d22,d30
530         vshr.u64        d24,d18,#14     @ 2
531 #if 2<16
532         vld1.64         {d2},[r1]!      @ handles unaligned
533 #endif
534         vshr.u64        d25,d18,#18
535 #if 2>0
536          vadd.i64       d22,d30                 @ h+=Maj from the past
537 #endif
538         vshr.u64        d26,d18,#41
539         vld1.64         {d28},[r3,:64]! @ K[i++]
540         vsli.64         d24,d18,#50
541         vsli.64         d25,d18,#46
542         vmov            d29,d18
543         vsli.64         d26,d18,#23
544 #if 2<16 && defined(__ARMEL__)
545         vrev64.8        d2,d2
546 #endif
547         veor            d25,d24
548         vbsl            d29,d19,d20             @ Ch(e,f,g)
549         vshr.u64        d24,d22,#28
550         veor            d26,d25                 @ Sigma1(e)
551         vadd.i64        d27,d29,d21
552         vshr.u64        d25,d22,#34
553         vsli.64         d24,d22,#36
554         vadd.i64        d27,d26
555         vshr.u64        d26,d22,#39
556         vadd.i64        d28,d2
557         vsli.64         d25,d22,#30
558         veor            d30,d22,d23
559         vsli.64         d26,d22,#25
560         veor            d21,d24,d25
561         vadd.i64        d27,d28
562         vbsl            d30,d16,d23             @ Maj(a,b,c)
563         veor            d21,d26                 @ Sigma0(a)
564         vadd.i64        d17,d27
565         vadd.i64        d30,d27
566         @ vadd.i64      d21,d30
567         vshr.u64        d24,d17,#14     @ 3
568 #if 3<16
569         vld1.64         {d3},[r1]!      @ handles unaligned
570 #endif
571         vshr.u64        d25,d17,#18
572 #if 3>0
573          vadd.i64       d21,d30                 @ h+=Maj from the past
574 #endif
575         vshr.u64        d26,d17,#41
576         vld1.64         {d28},[r3,:64]! @ K[i++]
577         vsli.64         d24,d17,#50
578         vsli.64         d25,d17,#46
579         vmov            d29,d17
580         vsli.64         d26,d17,#23
581 #if 3<16 && defined(__ARMEL__)
582         vrev64.8        d3,d3
583 #endif
584         veor            d25,d24
585         vbsl            d29,d18,d19             @ Ch(e,f,g)
586         vshr.u64        d24,d21,#28
587         veor            d26,d25                 @ Sigma1(e)
588         vadd.i64        d27,d29,d20
589         vshr.u64        d25,d21,#34
590         vsli.64         d24,d21,#36
591         vadd.i64        d27,d26
592         vshr.u64        d26,d21,#39
593         vadd.i64        d28,d3
594         vsli.64         d25,d21,#30
595         veor            d30,d21,d22
596         vsli.64         d26,d21,#25
597         veor            d20,d24,d25
598         vadd.i64        d27,d28
599         vbsl            d30,d23,d22             @ Maj(a,b,c)
600         veor            d20,d26                 @ Sigma0(a)
601         vadd.i64        d16,d27
602         vadd.i64        d30,d27
603         @ vadd.i64      d20,d30
604         vshr.u64        d24,d16,#14     @ 4
605 #if 4<16
606         vld1.64         {d4},[r1]!      @ handles unaligned
607 #endif
608         vshr.u64        d25,d16,#18
609 #if 4>0
610          vadd.i64       d20,d30                 @ h+=Maj from the past
611 #endif
612         vshr.u64        d26,d16,#41
613         vld1.64         {d28},[r3,:64]! @ K[i++]
614         vsli.64         d24,d16,#50
615         vsli.64         d25,d16,#46
616         vmov            d29,d16
617         vsli.64         d26,d16,#23
618 #if 4<16 && defined(__ARMEL__)
619         vrev64.8        d4,d4
620 #endif
621         veor            d25,d24
622         vbsl            d29,d17,d18             @ Ch(e,f,g)
623         vshr.u64        d24,d20,#28
624         veor            d26,d25                 @ Sigma1(e)
625         vadd.i64        d27,d29,d19
626         vshr.u64        d25,d20,#34
627         vsli.64         d24,d20,#36
628         vadd.i64        d27,d26
629         vshr.u64        d26,d20,#39
630         vadd.i64        d28,d4
631         vsli.64         d25,d20,#30
632         veor            d30,d20,d21
633         vsli.64         d26,d20,#25
634         veor            d19,d24,d25
635         vadd.i64        d27,d28
636         vbsl            d30,d22,d21             @ Maj(a,b,c)
637         veor            d19,d26                 @ Sigma0(a)
638         vadd.i64        d23,d27
639         vadd.i64        d30,d27
640         @ vadd.i64      d19,d30
641         vshr.u64        d24,d23,#14     @ 5
642 #if 5<16
643         vld1.64         {d5},[r1]!      @ handles unaligned
644 #endif
645         vshr.u64        d25,d23,#18
646 #if 5>0
647          vadd.i64       d19,d30                 @ h+=Maj from the past
648 #endif
649         vshr.u64        d26,d23,#41
650         vld1.64         {d28},[r3,:64]! @ K[i++]
651         vsli.64         d24,d23,#50
652         vsli.64         d25,d23,#46
653         vmov            d29,d23
654         vsli.64         d26,d23,#23
655 #if 5<16 && defined(__ARMEL__)
656         vrev64.8        d5,d5
657 #endif
658         veor            d25,d24
659         vbsl            d29,d16,d17             @ Ch(e,f,g)
660         vshr.u64        d24,d19,#28
661         veor            d26,d25                 @ Sigma1(e)
662         vadd.i64        d27,d29,d18
663         vshr.u64        d25,d19,#34
664         vsli.64         d24,d19,#36
665         vadd.i64        d27,d26
666         vshr.u64        d26,d19,#39
667         vadd.i64        d28,d5
668         vsli.64         d25,d19,#30
669         veor            d30,d19,d20
670         vsli.64         d26,d19,#25
671         veor            d18,d24,d25
672         vadd.i64        d27,d28
673         vbsl            d30,d21,d20             @ Maj(a,b,c)
674         veor            d18,d26                 @ Sigma0(a)
675         vadd.i64        d22,d27
676         vadd.i64        d30,d27
677         @ vadd.i64      d18,d30
678         vshr.u64        d24,d22,#14     @ 6
679 #if 6<16
680         vld1.64         {d6},[r1]!      @ handles unaligned
681 #endif
682         vshr.u64        d25,d22,#18
683 #if 6>0
684          vadd.i64       d18,d30                 @ h+=Maj from the past
685 #endif
686         vshr.u64        d26,d22,#41
687         vld1.64         {d28},[r3,:64]! @ K[i++]
688         vsli.64         d24,d22,#50
689         vsli.64         d25,d22,#46
690         vmov            d29,d22
691         vsli.64         d26,d22,#23
692 #if 6<16 && defined(__ARMEL__)
693         vrev64.8        d6,d6
694 #endif
695         veor            d25,d24
696         vbsl            d29,d23,d16             @ Ch(e,f,g)
697         vshr.u64        d24,d18,#28
698         veor            d26,d25                 @ Sigma1(e)
699         vadd.i64        d27,d29,d17
700         vshr.u64        d25,d18,#34
701         vsli.64         d24,d18,#36
702         vadd.i64        d27,d26
703         vshr.u64        d26,d18,#39
704         vadd.i64        d28,d6
705         vsli.64         d25,d18,#30
706         veor            d30,d18,d19
707         vsli.64         d26,d18,#25
708         veor            d17,d24,d25
709         vadd.i64        d27,d28
710         vbsl            d30,d20,d19             @ Maj(a,b,c)
711         veor            d17,d26                 @ Sigma0(a)
712         vadd.i64        d21,d27
713         vadd.i64        d30,d27
714         @ vadd.i64      d17,d30
715         vshr.u64        d24,d21,#14     @ 7
716 #if 7<16
717         vld1.64         {d7},[r1]!      @ handles unaligned
718 #endif
719         vshr.u64        d25,d21,#18
720 #if 7>0
721          vadd.i64       d17,d30                 @ h+=Maj from the past
722 #endif
723         vshr.u64        d26,d21,#41
724         vld1.64         {d28},[r3,:64]! @ K[i++]
725         vsli.64         d24,d21,#50
726         vsli.64         d25,d21,#46
727         vmov            d29,d21
728         vsli.64         d26,d21,#23
729 #if 7<16 && defined(__ARMEL__)
730         vrev64.8        d7,d7
731 #endif
732         veor            d25,d24
733         vbsl            d29,d22,d23             @ Ch(e,f,g)
734         vshr.u64        d24,d17,#28
735         veor            d26,d25                 @ Sigma1(e)
736         vadd.i64        d27,d29,d16
737         vshr.u64        d25,d17,#34
738         vsli.64         d24,d17,#36
739         vadd.i64        d27,d26
740         vshr.u64        d26,d17,#39
741         vadd.i64        d28,d7
742         vsli.64         d25,d17,#30
743         veor            d30,d17,d18
744         vsli.64         d26,d17,#25
745         veor            d16,d24,d25
746         vadd.i64        d27,d28
747         vbsl            d30,d19,d18             @ Maj(a,b,c)
748         veor            d16,d26                 @ Sigma0(a)
749         vadd.i64        d20,d27
750         vadd.i64        d30,d27
751         @ vadd.i64      d16,d30
752         vshr.u64        d24,d20,#14     @ 8
753 #if 8<16
754         vld1.64         {d8},[r1]!      @ handles unaligned
755 #endif
756         vshr.u64        d25,d20,#18
757 #if 8>0
758          vadd.i64       d16,d30                 @ h+=Maj from the past
759 #endif
760         vshr.u64        d26,d20,#41
761         vld1.64         {d28},[r3,:64]! @ K[i++]
762         vsli.64         d24,d20,#50
763         vsli.64         d25,d20,#46
764         vmov            d29,d20
765         vsli.64         d26,d20,#23
766 #if 8<16 && defined(__ARMEL__)
767         vrev64.8        d8,d8
768 #endif
769         veor            d25,d24
770         vbsl            d29,d21,d22             @ Ch(e,f,g)
771         vshr.u64        d24,d16,#28
772         veor            d26,d25                 @ Sigma1(e)
773         vadd.i64        d27,d29,d23
774         vshr.u64        d25,d16,#34
775         vsli.64         d24,d16,#36
776         vadd.i64        d27,d26
777         vshr.u64        d26,d16,#39
778         vadd.i64        d28,d8
779         vsli.64         d25,d16,#30
780         veor            d30,d16,d17
781         vsli.64         d26,d16,#25
782         veor            d23,d24,d25
783         vadd.i64        d27,d28
784         vbsl            d30,d18,d17             @ Maj(a,b,c)
785         veor            d23,d26                 @ Sigma0(a)
786         vadd.i64        d19,d27
787         vadd.i64        d30,d27
788         @ vadd.i64      d23,d30
789         vshr.u64        d24,d19,#14     @ 9
790 #if 9<16
791         vld1.64         {d9},[r1]!      @ handles unaligned
792 #endif
793         vshr.u64        d25,d19,#18
794 #if 9>0
795          vadd.i64       d23,d30                 @ h+=Maj from the past
796 #endif
797         vshr.u64        d26,d19,#41
798         vld1.64         {d28},[r3,:64]! @ K[i++]
799         vsli.64         d24,d19,#50
800         vsli.64         d25,d19,#46
801         vmov            d29,d19
802         vsli.64         d26,d19,#23
803 #if 9<16 && defined(__ARMEL__)
804         vrev64.8        d9,d9
805 #endif
806         veor            d25,d24
807         vbsl            d29,d20,d21             @ Ch(e,f,g)
808         vshr.u64        d24,d23,#28
809         veor            d26,d25                 @ Sigma1(e)
810         vadd.i64        d27,d29,d22
811         vshr.u64        d25,d23,#34
812         vsli.64         d24,d23,#36
813         vadd.i64        d27,d26
814         vshr.u64        d26,d23,#39
815         vadd.i64        d28,d9
816         vsli.64         d25,d23,#30
817         veor            d30,d23,d16
818         vsli.64         d26,d23,#25
819         veor            d22,d24,d25
820         vadd.i64        d27,d28
821         vbsl            d30,d17,d16             @ Maj(a,b,c)
822         veor            d22,d26                 @ Sigma0(a)
823         vadd.i64        d18,d27
824         vadd.i64        d30,d27
825         @ vadd.i64      d22,d30
826         vshr.u64        d24,d18,#14     @ 10
827 #if 10<16
828         vld1.64         {d10},[r1]!     @ handles unaligned
829 #endif
830         vshr.u64        d25,d18,#18
831 #if 10>0
832          vadd.i64       d22,d30                 @ h+=Maj from the past
833 #endif
834         vshr.u64        d26,d18,#41
835         vld1.64         {d28},[r3,:64]! @ K[i++]
836         vsli.64         d24,d18,#50
837         vsli.64         d25,d18,#46
838         vmov            d29,d18
839         vsli.64         d26,d18,#23
840 #if 10<16 && defined(__ARMEL__)
841         vrev64.8        d10,d10
842 #endif
843         veor            d25,d24
844         vbsl            d29,d19,d20             @ Ch(e,f,g)
845         vshr.u64        d24,d22,#28
846         veor            d26,d25                 @ Sigma1(e)
847         vadd.i64        d27,d29,d21
848         vshr.u64        d25,d22,#34
849         vsli.64         d24,d22,#36
850         vadd.i64        d27,d26
851         vshr.u64        d26,d22,#39
852         vadd.i64        d28,d10
853         vsli.64         d25,d22,#30
854         veor            d30,d22,d23
855         vsli.64         d26,d22,#25
856         veor            d21,d24,d25
857         vadd.i64        d27,d28
858         vbsl            d30,d16,d23             @ Maj(a,b,c)
859         veor            d21,d26                 @ Sigma0(a)
860         vadd.i64        d17,d27
861         vadd.i64        d30,d27
862         @ vadd.i64      d21,d30
863         vshr.u64        d24,d17,#14     @ 11
864 #if 11<16
865         vld1.64         {d11},[r1]!     @ handles unaligned
866 #endif
867         vshr.u64        d25,d17,#18
868 #if 11>0
869          vadd.i64       d21,d30                 @ h+=Maj from the past
870 #endif
871         vshr.u64        d26,d17,#41
872         vld1.64         {d28},[r3,:64]! @ K[i++]
873         vsli.64         d24,d17,#50
874         vsli.64         d25,d17,#46
875         vmov            d29,d17
876         vsli.64         d26,d17,#23
877 #if 11<16 && defined(__ARMEL__)
878         vrev64.8        d11,d11
879 #endif
880         veor            d25,d24
881         vbsl            d29,d18,d19             @ Ch(e,f,g)
882         vshr.u64        d24,d21,#28
883         veor            d26,d25                 @ Sigma1(e)
884         vadd.i64        d27,d29,d20
885         vshr.u64        d25,d21,#34
886         vsli.64         d24,d21,#36
887         vadd.i64        d27,d26
888         vshr.u64        d26,d21,#39
889         vadd.i64        d28,d11
890         vsli.64         d25,d21,#30
891         veor            d30,d21,d22
892         vsli.64         d26,d21,#25
893         veor            d20,d24,d25
894         vadd.i64        d27,d28
895         vbsl            d30,d23,d22             @ Maj(a,b,c)
896         veor            d20,d26                 @ Sigma0(a)
897         vadd.i64        d16,d27
898         vadd.i64        d30,d27
899         @ vadd.i64      d20,d30
900         vshr.u64        d24,d16,#14     @ 12
901 #if 12<16
902         vld1.64         {d12},[r1]!     @ handles unaligned
903 #endif
904         vshr.u64        d25,d16,#18
905 #if 12>0
906          vadd.i64       d20,d30                 @ h+=Maj from the past
907 #endif
908         vshr.u64        d26,d16,#41
909         vld1.64         {d28},[r3,:64]! @ K[i++]
910         vsli.64         d24,d16,#50
911         vsli.64         d25,d16,#46
912         vmov            d29,d16
913         vsli.64         d26,d16,#23
914 #if 12<16 && defined(__ARMEL__)
915         vrev64.8        d12,d12
916 #endif
917         veor            d25,d24
918         vbsl            d29,d17,d18             @ Ch(e,f,g)
919         vshr.u64        d24,d20,#28
920         veor            d26,d25                 @ Sigma1(e)
921         vadd.i64        d27,d29,d19
922         vshr.u64        d25,d20,#34
923         vsli.64         d24,d20,#36
924         vadd.i64        d27,d26
925         vshr.u64        d26,d20,#39
926         vadd.i64        d28,d12
927         vsli.64         d25,d20,#30
928         veor            d30,d20,d21
929         vsli.64         d26,d20,#25
930         veor            d19,d24,d25
931         vadd.i64        d27,d28
932         vbsl            d30,d22,d21             @ Maj(a,b,c)
933         veor            d19,d26                 @ Sigma0(a)
934         vadd.i64        d23,d27
935         vadd.i64        d30,d27
936         @ vadd.i64      d19,d30
937         vshr.u64        d24,d23,#14     @ 13
938 #if 13<16
939         vld1.64         {d13},[r1]!     @ handles unaligned
940 #endif
941         vshr.u64        d25,d23,#18
942 #if 13>0
943          vadd.i64       d19,d30                 @ h+=Maj from the past
944 #endif
945         vshr.u64        d26,d23,#41
946         vld1.64         {d28},[r3,:64]! @ K[i++]
947         vsli.64         d24,d23,#50
948         vsli.64         d25,d23,#46
949         vmov            d29,d23
950         vsli.64         d26,d23,#23
951 #if 13<16 && defined(__ARMEL__)
952         vrev64.8        d13,d13
953 #endif
954         veor            d25,d24
955         vbsl            d29,d16,d17             @ Ch(e,f,g)
956         vshr.u64        d24,d19,#28
957         veor            d26,d25                 @ Sigma1(e)
958         vadd.i64        d27,d29,d18
959         vshr.u64        d25,d19,#34
960         vsli.64         d24,d19,#36
961         vadd.i64        d27,d26
962         vshr.u64        d26,d19,#39
963         vadd.i64        d28,d13
964         vsli.64         d25,d19,#30
965         veor            d30,d19,d20
966         vsli.64         d26,d19,#25
967         veor            d18,d24,d25
968         vadd.i64        d27,d28
969         vbsl            d30,d21,d20             @ Maj(a,b,c)
970         veor            d18,d26                 @ Sigma0(a)
971         vadd.i64        d22,d27
972         vadd.i64        d30,d27
973         @ vadd.i64      d18,d30
974         vshr.u64        d24,d22,#14     @ 14
975 #if 14<16
976         vld1.64         {d14},[r1]!     @ handles unaligned
977 #endif
978         vshr.u64        d25,d22,#18
979 #if 14>0
980          vadd.i64       d18,d30                 @ h+=Maj from the past
981 #endif
982         vshr.u64        d26,d22,#41
983         vld1.64         {d28},[r3,:64]! @ K[i++]
984         vsli.64         d24,d22,#50
985         vsli.64         d25,d22,#46
986         vmov            d29,d22
987         vsli.64         d26,d22,#23
988 #if 14<16 && defined(__ARMEL__)
989         vrev64.8        d14,d14
990 #endif
991         veor            d25,d24
992         vbsl            d29,d23,d16             @ Ch(e,f,g)
993         vshr.u64        d24,d18,#28
994         veor            d26,d25                 @ Sigma1(e)
995         vadd.i64        d27,d29,d17
996         vshr.u64        d25,d18,#34
997         vsli.64         d24,d18,#36
998         vadd.i64        d27,d26
999         vshr.u64        d26,d18,#39
1000         vadd.i64        d28,d14
1001         vsli.64         d25,d18,#30
1002         veor            d30,d18,d19
1003         vsli.64         d26,d18,#25
1004         veor            d17,d24,d25
1005         vadd.i64        d27,d28
1006         vbsl            d30,d20,d19             @ Maj(a,b,c)
1007         veor            d17,d26                 @ Sigma0(a)
1008         vadd.i64        d21,d27
1009         vadd.i64        d30,d27
1010         @ vadd.i64      d17,d30
1011         vshr.u64        d24,d21,#14     @ 15
1012 #if 15<16
1013         vld1.64         {d15},[r1]!     @ handles unaligned
1014 #endif
1015         vshr.u64        d25,d21,#18
1016 #if 15>0
1017          vadd.i64       d17,d30                 @ h+=Maj from the past
1018 #endif
1019         vshr.u64        d26,d21,#41
1020         vld1.64         {d28},[r3,:64]! @ K[i++]
1021         vsli.64         d24,d21,#50
1022         vsli.64         d25,d21,#46
1023         vmov            d29,d21
1024         vsli.64         d26,d21,#23
1025 #if 15<16 && defined(__ARMEL__)
1026         vrev64.8        d15,d15
1027 #endif
1028         veor            d25,d24
1029         vbsl            d29,d22,d23             @ Ch(e,f,g)
1030         vshr.u64        d24,d17,#28
1031         veor            d26,d25                 @ Sigma1(e)
1032         vadd.i64        d27,d29,d16
1033         vshr.u64        d25,d17,#34
1034         vsli.64         d24,d17,#36
1035         vadd.i64        d27,d26
1036         vshr.u64        d26,d17,#39
1037         vadd.i64        d28,d15
1038         vsli.64         d25,d17,#30
1039         veor            d30,d17,d18
1040         vsli.64         d26,d17,#25
1041         veor            d16,d24,d25
1042         vadd.i64        d27,d28
1043         vbsl            d30,d19,d18             @ Maj(a,b,c)
1044         veor            d16,d26                 @ Sigma0(a)
1045         vadd.i64        d20,d27
1046         vadd.i64        d30,d27
1047         @ vadd.i64      d16,d30
1048         mov             r12,#4
1049 .L16_79_neon:
1050         subs            r12,#1
1051         vshr.u64        q12,q7,#19
1052         vshr.u64        q13,q7,#61
1053          vadd.i64       d16,d30                 @ h+=Maj from the past
1054         vshr.u64        q15,q7,#6
1055         vsli.64         q12,q7,#45
1056         vext.8          q14,q0,q1,#8    @ X[i+1]
1057         vsli.64         q13,q7,#3
1058         veor            q15,q12
1059         vshr.u64        q12,q14,#1
1060         veor            q15,q13                         @ sigma1(X[i+14])
1061         vshr.u64        q13,q14,#8
1062         vadd.i64        q0,q15
1063         vshr.u64        q15,q14,#7
1064         vsli.64         q12,q14,#63
1065         vsli.64         q13,q14,#56
1066         vext.8          q14,q4,q5,#8    @ X[i+9]
1067         veor            q15,q12
1068         vshr.u64        d24,d20,#14             @ from NEON_00_15
1069         vadd.i64        q0,q14
1070         vshr.u64        d25,d20,#18             @ from NEON_00_15
1071         veor            q15,q13                         @ sigma0(X[i+1])
1072         vshr.u64        d26,d20,#41             @ from NEON_00_15
1073         vadd.i64        q0,q15
1074         vld1.64         {d28},[r3,:64]! @ K[i++]
1075         vsli.64         d24,d20,#50
1076         vsli.64         d25,d20,#46
1077         vmov            d29,d20
1078         vsli.64         d26,d20,#23
1079 #if 16<16 && defined(__ARMEL__)
1080         vrev64.8        ,
1081 #endif
1082         veor            d25,d24
1083         vbsl            d29,d21,d22             @ Ch(e,f,g)
1084         vshr.u64        d24,d16,#28
1085         veor            d26,d25                 @ Sigma1(e)
1086         vadd.i64        d27,d29,d23
1087         vshr.u64        d25,d16,#34
1088         vsli.64         d24,d16,#36
1089         vadd.i64        d27,d26
1090         vshr.u64        d26,d16,#39
1091         vadd.i64        d28,d0
1092         vsli.64         d25,d16,#30
1093         veor            d30,d16,d17
1094         vsli.64         d26,d16,#25
1095         veor            d23,d24,d25
1096         vadd.i64        d27,d28
1097         vbsl            d30,d18,d17             @ Maj(a,b,c)
1098         veor            d23,d26                 @ Sigma0(a)
1099         vadd.i64        d19,d27
1100         vadd.i64        d30,d27
1101         @ vadd.i64      d23,d30
1102         vshr.u64        d24,d19,#14     @ 17
1103 #if 17<16
1104         vld1.64         {d1},[r1]!      @ handles unaligned
1105 #endif
1106         vshr.u64        d25,d19,#18
1107 #if 17>0
1108          vadd.i64       d23,d30                 @ h+=Maj from the past
1109 #endif
1110         vshr.u64        d26,d19,#41
1111         vld1.64         {d28},[r3,:64]! @ K[i++]
1112         vsli.64         d24,d19,#50
1113         vsli.64         d25,d19,#46
1114         vmov            d29,d19
1115         vsli.64         d26,d19,#23
1116 #if 17<16 && defined(__ARMEL__)
1117         vrev64.8        ,
1118 #endif
1119         veor            d25,d24
1120         vbsl            d29,d20,d21             @ Ch(e,f,g)
1121         vshr.u64        d24,d23,#28
1122         veor            d26,d25                 @ Sigma1(e)
1123         vadd.i64        d27,d29,d22
1124         vshr.u64        d25,d23,#34
1125         vsli.64         d24,d23,#36
1126         vadd.i64        d27,d26
1127         vshr.u64        d26,d23,#39
1128         vadd.i64        d28,d1
1129         vsli.64         d25,d23,#30
1130         veor            d30,d23,d16
1131         vsli.64         d26,d23,#25
1132         veor            d22,d24,d25
1133         vadd.i64        d27,d28
1134         vbsl            d30,d17,d16             @ Maj(a,b,c)
1135         veor            d22,d26                 @ Sigma0(a)
1136         vadd.i64        d18,d27
1137         vadd.i64        d30,d27
1138         @ vadd.i64      d22,d30
1139         vshr.u64        q12,q0,#19
1140         vshr.u64        q13,q0,#61
1141          vadd.i64       d22,d30                 @ h+=Maj from the past
1142         vshr.u64        q15,q0,#6
1143         vsli.64         q12,q0,#45
1144         vext.8          q14,q1,q2,#8    @ X[i+1]
1145         vsli.64         q13,q0,#3
1146         veor            q15,q12
1147         vshr.u64        q12,q14,#1
1148         veor            q15,q13                         @ sigma1(X[i+14])
1149         vshr.u64        q13,q14,#8
1150         vadd.i64        q1,q15
1151         vshr.u64        q15,q14,#7
1152         vsli.64         q12,q14,#63
1153         vsli.64         q13,q14,#56
1154         vext.8          q14,q5,q6,#8    @ X[i+9]
1155         veor            q15,q12
1156         vshr.u64        d24,d18,#14             @ from NEON_00_15
1157         vadd.i64        q1,q14
1158         vshr.u64        d25,d18,#18             @ from NEON_00_15
1159         veor            q15,q13                         @ sigma0(X[i+1])
1160         vshr.u64        d26,d18,#41             @ from NEON_00_15
1161         vadd.i64        q1,q15
1162         vld1.64         {d28},[r3,:64]! @ K[i++]
1163         vsli.64         d24,d18,#50
1164         vsli.64         d25,d18,#46
1165         vmov            d29,d18
1166         vsli.64         d26,d18,#23
1167 #if 18<16 && defined(__ARMEL__)
1168         vrev64.8        ,
1169 #endif
1170         veor            d25,d24
1171         vbsl            d29,d19,d20             @ Ch(e,f,g)
1172         vshr.u64        d24,d22,#28
1173         veor            d26,d25                 @ Sigma1(e)
1174         vadd.i64        d27,d29,d21
1175         vshr.u64        d25,d22,#34
1176         vsli.64         d24,d22,#36
1177         vadd.i64        d27,d26
1178         vshr.u64        d26,d22,#39
1179         vadd.i64        d28,d2
1180         vsli.64         d25,d22,#30
1181         veor            d30,d22,d23
1182         vsli.64         d26,d22,#25
1183         veor            d21,d24,d25
1184         vadd.i64        d27,d28
1185         vbsl            d30,d16,d23             @ Maj(a,b,c)
1186         veor            d21,d26                 @ Sigma0(a)
1187         vadd.i64        d17,d27
1188         vadd.i64        d30,d27
1189         @ vadd.i64      d21,d30
1190         vshr.u64        d24,d17,#14     @ 19
1191 #if 19<16
1192         vld1.64         {d3},[r1]!      @ handles unaligned
1193 #endif
1194         vshr.u64        d25,d17,#18
1195 #if 19>0
1196          vadd.i64       d21,d30                 @ h+=Maj from the past
1197 #endif
1198         vshr.u64        d26,d17,#41
1199         vld1.64         {d28},[r3,:64]! @ K[i++]
1200         vsli.64         d24,d17,#50
1201         vsli.64         d25,d17,#46
1202         vmov            d29,d17
1203         vsli.64         d26,d17,#23
1204 #if 19<16 && defined(__ARMEL__)
1205         vrev64.8        ,
1206 #endif
1207         veor            d25,d24
1208         vbsl            d29,d18,d19             @ Ch(e,f,g)
1209         vshr.u64        d24,d21,#28
1210         veor            d26,d25                 @ Sigma1(e)
1211         vadd.i64        d27,d29,d20
1212         vshr.u64        d25,d21,#34
1213         vsli.64         d24,d21,#36
1214         vadd.i64        d27,d26
1215         vshr.u64        d26,d21,#39
1216         vadd.i64        d28,d3
1217         vsli.64         d25,d21,#30
1218         veor            d30,d21,d22
1219         vsli.64         d26,d21,#25
1220         veor            d20,d24,d25
1221         vadd.i64        d27,d28
1222         vbsl            d30,d23,d22             @ Maj(a,b,c)
1223         veor            d20,d26                 @ Sigma0(a)
1224         vadd.i64        d16,d27
1225         vadd.i64        d30,d27
1226         @ vadd.i64      d20,d30
1227         vshr.u64        q12,q1,#19
1228         vshr.u64        q13,q1,#61
1229          vadd.i64       d20,d30                 @ h+=Maj from the past
1230         vshr.u64        q15,q1,#6
1231         vsli.64         q12,q1,#45
1232         vext.8          q14,q2,q3,#8    @ X[i+1]
1233         vsli.64         q13,q1,#3
1234         veor            q15,q12
1235         vshr.u64        q12,q14,#1
1236         veor            q15,q13                         @ sigma1(X[i+14])
1237         vshr.u64        q13,q14,#8
1238         vadd.i64        q2,q15
1239         vshr.u64        q15,q14,#7
1240         vsli.64         q12,q14,#63
1241         vsli.64         q13,q14,#56
1242         vext.8          q14,q6,q7,#8    @ X[i+9]
1243         veor            q15,q12
1244         vshr.u64        d24,d16,#14             @ from NEON_00_15
1245         vadd.i64        q2,q14
1246         vshr.u64        d25,d16,#18             @ from NEON_00_15
1247         veor            q15,q13                         @ sigma0(X[i+1])
1248         vshr.u64        d26,d16,#41             @ from NEON_00_15
1249         vadd.i64        q2,q15
1250         vld1.64         {d28},[r3,:64]! @ K[i++]
1251         vsli.64         d24,d16,#50
1252         vsli.64         d25,d16,#46
1253         vmov            d29,d16
1254         vsli.64         d26,d16,#23
1255 #if 20<16 && defined(__ARMEL__)
1256         vrev64.8        ,
1257 #endif
1258         veor            d25,d24
1259         vbsl            d29,d17,d18             @ Ch(e,f,g)
1260         vshr.u64        d24,d20,#28
1261         veor            d26,d25                 @ Sigma1(e)
1262         vadd.i64        d27,d29,d19
1263         vshr.u64        d25,d20,#34
1264         vsli.64         d24,d20,#36
1265         vadd.i64        d27,d26
1266         vshr.u64        d26,d20,#39
1267         vadd.i64        d28,d4
1268         vsli.64         d25,d20,#30
1269         veor            d30,d20,d21
1270         vsli.64         d26,d20,#25
1271         veor            d19,d24,d25
1272         vadd.i64        d27,d28
1273         vbsl            d30,d22,d21             @ Maj(a,b,c)
1274         veor            d19,d26                 @ Sigma0(a)
1275         vadd.i64        d23,d27
1276         vadd.i64        d30,d27
1277         @ vadd.i64      d19,d30
1278         vshr.u64        d24,d23,#14     @ 21
1279 #if 21<16
1280         vld1.64         {d5},[r1]!      @ handles unaligned
1281 #endif
1282         vshr.u64        d25,d23,#18
1283 #if 21>0
1284          vadd.i64       d19,d30                 @ h+=Maj from the past
1285 #endif
1286         vshr.u64        d26,d23,#41
1287         vld1.64         {d28},[r3,:64]! @ K[i++]
1288         vsli.64         d24,d23,#50
1289         vsli.64         d25,d23,#46
1290         vmov            d29,d23
1291         vsli.64         d26,d23,#23
1292 #if 21<16 && defined(__ARMEL__)
1293         vrev64.8        ,
1294 #endif
1295         veor            d25,d24
1296         vbsl            d29,d16,d17             @ Ch(e,f,g)
1297         vshr.u64        d24,d19,#28
1298         veor            d26,d25                 @ Sigma1(e)
1299         vadd.i64        d27,d29,d18
1300         vshr.u64        d25,d19,#34
1301         vsli.64         d24,d19,#36
1302         vadd.i64        d27,d26
1303         vshr.u64        d26,d19,#39
1304         vadd.i64        d28,d5
1305         vsli.64         d25,d19,#30
1306         veor            d30,d19,d20
1307         vsli.64         d26,d19,#25
1308         veor            d18,d24,d25
1309         vadd.i64        d27,d28
1310         vbsl            d30,d21,d20             @ Maj(a,b,c)
1311         veor            d18,d26                 @ Sigma0(a)
1312         vadd.i64        d22,d27
1313         vadd.i64        d30,d27
1314         @ vadd.i64      d18,d30
1315         vshr.u64        q12,q2,#19
1316         vshr.u64        q13,q2,#61
1317          vadd.i64       d18,d30                 @ h+=Maj from the past
1318         vshr.u64        q15,q2,#6
1319         vsli.64         q12,q2,#45
1320         vext.8          q14,q3,q4,#8    @ X[i+1]
1321         vsli.64         q13,q2,#3
1322         veor            q15,q12
1323         vshr.u64        q12,q14,#1
1324         veor            q15,q13                         @ sigma1(X[i+14])
1325         vshr.u64        q13,q14,#8
1326         vadd.i64        q3,q15
1327         vshr.u64        q15,q14,#7
1328         vsli.64         q12,q14,#63
1329         vsli.64         q13,q14,#56
1330         vext.8          q14,q7,q0,#8    @ X[i+9]
1331         veor            q15,q12
1332         vshr.u64        d24,d22,#14             @ from NEON_00_15
1333         vadd.i64        q3,q14
1334         vshr.u64        d25,d22,#18             @ from NEON_00_15
1335         veor            q15,q13                         @ sigma0(X[i+1])
1336         vshr.u64        d26,d22,#41             @ from NEON_00_15
1337         vadd.i64        q3,q15
1338         vld1.64         {d28},[r3,:64]! @ K[i++]
1339         vsli.64         d24,d22,#50
1340         vsli.64         d25,d22,#46
1341         vmov            d29,d22
1342         vsli.64         d26,d22,#23
1343 #if 22<16 && defined(__ARMEL__)
1344         vrev64.8        ,
1345 #endif
1346         veor            d25,d24
1347         vbsl            d29,d23,d16             @ Ch(e,f,g)
1348         vshr.u64        d24,d18,#28
1349         veor            d26,d25                 @ Sigma1(e)
1350         vadd.i64        d27,d29,d17
1351         vshr.u64        d25,d18,#34
1352         vsli.64         d24,d18,#36
1353         vadd.i64        d27,d26
1354         vshr.u64        d26,d18,#39
1355         vadd.i64        d28,d6
1356         vsli.64         d25,d18,#30
1357         veor            d30,d18,d19
1358         vsli.64         d26,d18,#25
1359         veor            d17,d24,d25
1360         vadd.i64        d27,d28
1361         vbsl            d30,d20,d19             @ Maj(a,b,c)
1362         veor            d17,d26                 @ Sigma0(a)
1363         vadd.i64        d21,d27
1364         vadd.i64        d30,d27
1365         @ vadd.i64      d17,d30
1366         vshr.u64        d24,d21,#14     @ 23
1367 #if 23<16
1368         vld1.64         {d7},[r1]!      @ handles unaligned
1369 #endif
1370         vshr.u64        d25,d21,#18
1371 #if 23>0
1372          vadd.i64       d17,d30                 @ h+=Maj from the past
1373 #endif
1374         vshr.u64        d26,d21,#41
1375         vld1.64         {d28},[r3,:64]! @ K[i++]
1376         vsli.64         d24,d21,#50
1377         vsli.64         d25,d21,#46
1378         vmov            d29,d21
1379         vsli.64         d26,d21,#23
1380 #if 23<16 && defined(__ARMEL__)
1381         vrev64.8        ,
1382 #endif
1383         veor            d25,d24
1384         vbsl            d29,d22,d23             @ Ch(e,f,g)
1385         vshr.u64        d24,d17,#28
1386         veor            d26,d25                 @ Sigma1(e)
1387         vadd.i64        d27,d29,d16
1388         vshr.u64        d25,d17,#34
1389         vsli.64         d24,d17,#36
1390         vadd.i64        d27,d26
1391         vshr.u64        d26,d17,#39
1392         vadd.i64        d28,d7
1393         vsli.64         d25,d17,#30
1394         veor            d30,d17,d18
1395         vsli.64         d26,d17,#25
1396         veor            d16,d24,d25
1397         vadd.i64        d27,d28
1398         vbsl            d30,d19,d18             @ Maj(a,b,c)
1399         veor            d16,d26                 @ Sigma0(a)
1400         vadd.i64        d20,d27
1401         vadd.i64        d30,d27
1402         @ vadd.i64      d16,d30
1403         vshr.u64        q12,q3,#19
1404         vshr.u64        q13,q3,#61
1405          vadd.i64       d16,d30                 @ h+=Maj from the past
1406         vshr.u64        q15,q3,#6
1407         vsli.64         q12,q3,#45
1408         vext.8          q14,q4,q5,#8    @ X[i+1]
1409         vsli.64         q13,q3,#3
1410         veor            q15,q12
1411         vshr.u64        q12,q14,#1
1412         veor            q15,q13                         @ sigma1(X[i+14])
1413         vshr.u64        q13,q14,#8
1414         vadd.i64        q4,q15
1415         vshr.u64        q15,q14,#7
1416         vsli.64         q12,q14,#63
1417         vsli.64         q13,q14,#56
1418         vext.8          q14,q0,q1,#8    @ X[i+9]
1419         veor            q15,q12
1420         vshr.u64        d24,d20,#14             @ from NEON_00_15
1421         vadd.i64        q4,q14
1422         vshr.u64        d25,d20,#18             @ from NEON_00_15
1423         veor            q15,q13                         @ sigma0(X[i+1])
1424         vshr.u64        d26,d20,#41             @ from NEON_00_15
1425         vadd.i64        q4,q15
1426         vld1.64         {d28},[r3,:64]! @ K[i++]
1427         vsli.64         d24,d20,#50
1428         vsli.64         d25,d20,#46
1429         vmov            d29,d20
1430         vsli.64         d26,d20,#23
1431 #if 24<16 && defined(__ARMEL__)
1432         vrev64.8        ,
1433 #endif
1434         veor            d25,d24
1435         vbsl            d29,d21,d22             @ Ch(e,f,g)
1436         vshr.u64        d24,d16,#28
1437         veor            d26,d25                 @ Sigma1(e)
1438         vadd.i64        d27,d29,d23
1439         vshr.u64        d25,d16,#34
1440         vsli.64         d24,d16,#36
1441         vadd.i64        d27,d26
1442         vshr.u64        d26,d16,#39
1443         vadd.i64        d28,d8
1444         vsli.64         d25,d16,#30
1445         veor            d30,d16,d17
1446         vsli.64         d26,d16,#25
1447         veor            d23,d24,d25
1448         vadd.i64        d27,d28
1449         vbsl            d30,d18,d17             @ Maj(a,b,c)
1450         veor            d23,d26                 @ Sigma0(a)
1451         vadd.i64        d19,d27
1452         vadd.i64        d30,d27
1453         @ vadd.i64      d23,d30
1454         vshr.u64        d24,d19,#14     @ 25
1455 #if 25<16
1456         vld1.64         {d9},[r1]!      @ handles unaligned
1457 #endif
1458         vshr.u64        d25,d19,#18
1459 #if 25>0
1460          vadd.i64       d23,d30                 @ h+=Maj from the past
1461 #endif
1462         vshr.u64        d26,d19,#41
1463         vld1.64         {d28},[r3,:64]! @ K[i++]
1464         vsli.64         d24,d19,#50
1465         vsli.64         d25,d19,#46
1466         vmov            d29,d19
1467         vsli.64         d26,d19,#23
1468 #if 25<16 && defined(__ARMEL__)
1469         vrev64.8        ,
1470 #endif
1471         veor            d25,d24
1472         vbsl            d29,d20,d21             @ Ch(e,f,g)
1473         vshr.u64        d24,d23,#28
1474         veor            d26,d25                 @ Sigma1(e)
1475         vadd.i64        d27,d29,d22
1476         vshr.u64        d25,d23,#34
1477         vsli.64         d24,d23,#36
1478         vadd.i64        d27,d26
1479         vshr.u64        d26,d23,#39
1480         vadd.i64        d28,d9
1481         vsli.64         d25,d23,#30
1482         veor            d30,d23,d16
1483         vsli.64         d26,d23,#25
1484         veor            d22,d24,d25
1485         vadd.i64        d27,d28
1486         vbsl            d30,d17,d16             @ Maj(a,b,c)
1487         veor            d22,d26                 @ Sigma0(a)
1488         vadd.i64        d18,d27
1489         vadd.i64        d30,d27
1490         @ vadd.i64      d22,d30
1491         vshr.u64        q12,q4,#19
1492         vshr.u64        q13,q4,#61
1493          vadd.i64       d22,d30                 @ h+=Maj from the past
1494         vshr.u64        q15,q4,#6
1495         vsli.64         q12,q4,#45
1496         vext.8          q14,q5,q6,#8    @ X[i+1]
1497         vsli.64         q13,q4,#3
1498         veor            q15,q12
1499         vshr.u64        q12,q14,#1
1500         veor            q15,q13                         @ sigma1(X[i+14])
1501         vshr.u64        q13,q14,#8
1502         vadd.i64        q5,q15
1503         vshr.u64        q15,q14,#7
1504         vsli.64         q12,q14,#63
1505         vsli.64         q13,q14,#56
1506         vext.8          q14,q1,q2,#8    @ X[i+9]
1507         veor            q15,q12
1508         vshr.u64        d24,d18,#14             @ from NEON_00_15
1509         vadd.i64        q5,q14
1510         vshr.u64        d25,d18,#18             @ from NEON_00_15
1511         veor            q15,q13                         @ sigma0(X[i+1])
1512         vshr.u64        d26,d18,#41             @ from NEON_00_15
1513         vadd.i64        q5,q15
1514         vld1.64         {d28},[r3,:64]! @ K[i++]
1515         vsli.64         d24,d18,#50
1516         vsli.64         d25,d18,#46
1517         vmov            d29,d18
1518         vsli.64         d26,d18,#23
1519 #if 26<16 && defined(__ARMEL__)
1520         vrev64.8        ,
1521 #endif
1522         veor            d25,d24
1523         vbsl            d29,d19,d20             @ Ch(e,f,g)
1524         vshr.u64        d24,d22,#28
1525         veor            d26,d25                 @ Sigma1(e)
1526         vadd.i64        d27,d29,d21
1527         vshr.u64        d25,d22,#34
1528         vsli.64         d24,d22,#36
1529         vadd.i64        d27,d26
1530         vshr.u64        d26,d22,#39
1531         vadd.i64        d28,d10
1532         vsli.64         d25,d22,#30
1533         veor            d30,d22,d23
1534         vsli.64         d26,d22,#25
1535         veor            d21,d24,d25
1536         vadd.i64        d27,d28
1537         vbsl            d30,d16,d23             @ Maj(a,b,c)
1538         veor            d21,d26                 @ Sigma0(a)
1539         vadd.i64        d17,d27
1540         vadd.i64        d30,d27
1541         @ vadd.i64      d21,d30
1542         vshr.u64        d24,d17,#14     @ 27
1543 #if 27<16
1544         vld1.64         {d11},[r1]!     @ handles unaligned
1545 #endif
1546         vshr.u64        d25,d17,#18
1547 #if 27>0
1548          vadd.i64       d21,d30                 @ h+=Maj from the past
1549 #endif
1550         vshr.u64        d26,d17,#41
1551         vld1.64         {d28},[r3,:64]! @ K[i++]
1552         vsli.64         d24,d17,#50
1553         vsli.64         d25,d17,#46
1554         vmov            d29,d17
1555         vsli.64         d26,d17,#23
1556 #if 27<16 && defined(__ARMEL__)
1557         vrev64.8        ,
1558 #endif
1559         veor            d25,d24
1560         vbsl            d29,d18,d19             @ Ch(e,f,g)
1561         vshr.u64        d24,d21,#28
1562         veor            d26,d25                 @ Sigma1(e)
1563         vadd.i64        d27,d29,d20
1564         vshr.u64        d25,d21,#34
1565         vsli.64         d24,d21,#36
1566         vadd.i64        d27,d26
1567         vshr.u64        d26,d21,#39
1568         vadd.i64        d28,d11
1569         vsli.64         d25,d21,#30
1570         veor            d30,d21,d22
1571         vsli.64         d26,d21,#25
1572         veor            d20,d24,d25
1573         vadd.i64        d27,d28
1574         vbsl            d30,d23,d22             @ Maj(a,b,c)
1575         veor            d20,d26                 @ Sigma0(a)
1576         vadd.i64        d16,d27
1577         vadd.i64        d30,d27
1578         @ vadd.i64      d20,d30
1579         vshr.u64        q12,q5,#19
1580         vshr.u64        q13,q5,#61
1581          vadd.i64       d20,d30                 @ h+=Maj from the past
1582         vshr.u64        q15,q5,#6
1583         vsli.64         q12,q5,#45
1584         vext.8          q14,q6,q7,#8    @ X[i+1]
1585         vsli.64         q13,q5,#3
1586         veor            q15,q12
1587         vshr.u64        q12,q14,#1
1588         veor            q15,q13                         @ sigma1(X[i+14])
1589         vshr.u64        q13,q14,#8
1590         vadd.i64        q6,q15
1591         vshr.u64        q15,q14,#7
1592         vsli.64         q12,q14,#63
1593         vsli.64         q13,q14,#56
1594         vext.8          q14,q2,q3,#8    @ X[i+9]
1595         veor            q15,q12
1596         vshr.u64        d24,d16,#14             @ from NEON_00_15
1597         vadd.i64        q6,q14
1598         vshr.u64        d25,d16,#18             @ from NEON_00_15
1599         veor            q15,q13                         @ sigma0(X[i+1])
1600         vshr.u64        d26,d16,#41             @ from NEON_00_15
1601         vadd.i64        q6,q15
1602         vld1.64         {d28},[r3,:64]! @ K[i++]
1603         vsli.64         d24,d16,#50
1604         vsli.64         d25,d16,#46
1605         vmov            d29,d16
1606         vsli.64         d26,d16,#23
1607 #if 28<16 && defined(__ARMEL__)
1608         vrev64.8        ,
1609 #endif
1610         veor            d25,d24
1611         vbsl            d29,d17,d18             @ Ch(e,f,g)
1612         vshr.u64        d24,d20,#28
1613         veor            d26,d25                 @ Sigma1(e)
1614         vadd.i64        d27,d29,d19
1615         vshr.u64        d25,d20,#34
1616         vsli.64         d24,d20,#36
1617         vadd.i64        d27,d26
1618         vshr.u64        d26,d20,#39
1619         vadd.i64        d28,d12
1620         vsli.64         d25,d20,#30
1621         veor            d30,d20,d21
1622         vsli.64         d26,d20,#25
1623         veor            d19,d24,d25
1624         vadd.i64        d27,d28
1625         vbsl            d30,d22,d21             @ Maj(a,b,c)
1626         veor            d19,d26                 @ Sigma0(a)
1627         vadd.i64        d23,d27
1628         vadd.i64        d30,d27
1629         @ vadd.i64      d19,d30
1630         vshr.u64        d24,d23,#14     @ 29
1631 #if 29<16
1632         vld1.64         {d13},[r1]!     @ handles unaligned
1633 #endif
1634         vshr.u64        d25,d23,#18
1635 #if 29>0
1636          vadd.i64       d19,d30                 @ h+=Maj from the past
1637 #endif
1638         vshr.u64        d26,d23,#41
1639         vld1.64         {d28},[r3,:64]! @ K[i++]
1640         vsli.64         d24,d23,#50
1641         vsli.64         d25,d23,#46
1642         vmov            d29,d23
1643         vsli.64         d26,d23,#23
1644 #if 29<16 && defined(__ARMEL__)
1645         vrev64.8        ,
1646 #endif
1647         veor            d25,d24
1648         vbsl            d29,d16,d17             @ Ch(e,f,g)
1649         vshr.u64        d24,d19,#28
1650         veor            d26,d25                 @ Sigma1(e)
1651         vadd.i64        d27,d29,d18
1652         vshr.u64        d25,d19,#34
1653         vsli.64         d24,d19,#36
1654         vadd.i64        d27,d26
1655         vshr.u64        d26,d19,#39
1656         vadd.i64        d28,d13
1657         vsli.64         d25,d19,#30
1658         veor            d30,d19,d20
1659         vsli.64         d26,d19,#25
1660         veor            d18,d24,d25
1661         vadd.i64        d27,d28
1662         vbsl            d30,d21,d20             @ Maj(a,b,c)
1663         veor            d18,d26                 @ Sigma0(a)
1664         vadd.i64        d22,d27
1665         vadd.i64        d30,d27
1666         @ vadd.i64      d18,d30
1667         vshr.u64        q12,q6,#19
1668         vshr.u64        q13,q6,#61
1669          vadd.i64       d18,d30                 @ h+=Maj from the past
1670         vshr.u64        q15,q6,#6
1671         vsli.64         q12,q6,#45
1672         vext.8          q14,q7,q0,#8    @ X[i+1]
1673         vsli.64         q13,q6,#3
1674         veor            q15,q12
1675         vshr.u64        q12,q14,#1
1676         veor            q15,q13                         @ sigma1(X[i+14])
1677         vshr.u64        q13,q14,#8
1678         vadd.i64        q7,q15
1679         vshr.u64        q15,q14,#7
1680         vsli.64         q12,q14,#63
1681         vsli.64         q13,q14,#56
1682         vext.8          q14,q3,q4,#8    @ X[i+9]
1683         veor            q15,q12
1684         vshr.u64        d24,d22,#14             @ from NEON_00_15
1685         vadd.i64        q7,q14
1686         vshr.u64        d25,d22,#18             @ from NEON_00_15
1687         veor            q15,q13                         @ sigma0(X[i+1])
1688         vshr.u64        d26,d22,#41             @ from NEON_00_15
1689         vadd.i64        q7,q15
1690         vld1.64         {d28},[r3,:64]! @ K[i++]
1691         vsli.64         d24,d22,#50
1692         vsli.64         d25,d22,#46
1693         vmov            d29,d22
1694         vsli.64         d26,d22,#23
1695 #if 30<16 && defined(__ARMEL__)
1696         vrev64.8        ,
1697 #endif
1698         veor            d25,d24
1699         vbsl            d29,d23,d16             @ Ch(e,f,g)
1700         vshr.u64        d24,d18,#28
1701         veor            d26,d25                 @ Sigma1(e)
1702         vadd.i64        d27,d29,d17
1703         vshr.u64        d25,d18,#34
1704         vsli.64         d24,d18,#36
1705         vadd.i64        d27,d26
1706         vshr.u64        d26,d18,#39
1707         vadd.i64        d28,d14
1708         vsli.64         d25,d18,#30
1709         veor            d30,d18,d19
1710         vsli.64         d26,d18,#25
1711         veor            d17,d24,d25
1712         vadd.i64        d27,d28
1713         vbsl            d30,d20,d19             @ Maj(a,b,c)
1714         veor            d17,d26                 @ Sigma0(a)
1715         vadd.i64        d21,d27
1716         vadd.i64        d30,d27
1717         @ vadd.i64      d17,d30
1718         vshr.u64        d24,d21,#14     @ 31
1719 #if 31<16
1720         vld1.64         {d15},[r1]!     @ handles unaligned
1721 #endif
1722         vshr.u64        d25,d21,#18
1723 #if 31>0
1724          vadd.i64       d17,d30                 @ h+=Maj from the past
1725 #endif
1726         vshr.u64        d26,d21,#41
1727         vld1.64         {d28},[r3,:64]! @ K[i++]
1728         vsli.64         d24,d21,#50
1729         vsli.64         d25,d21,#46
1730         vmov            d29,d21
1731         vsli.64         d26,d21,#23
1732 #if 31<16 && defined(__ARMEL__)
1733         vrev64.8        ,
1734 #endif
1735         veor            d25,d24
1736         vbsl            d29,d22,d23             @ Ch(e,f,g)
1737         vshr.u64        d24,d17,#28
1738         veor            d26,d25                 @ Sigma1(e)
1739         vadd.i64        d27,d29,d16
1740         vshr.u64        d25,d17,#34
1741         vsli.64         d24,d17,#36
1742         vadd.i64        d27,d26
1743         vshr.u64        d26,d17,#39
1744         vadd.i64        d28,d15
1745         vsli.64         d25,d17,#30
1746         veor            d30,d17,d18
1747         vsli.64         d26,d17,#25
1748         veor            d16,d24,d25
1749         vadd.i64        d27,d28
1750         vbsl            d30,d19,d18             @ Maj(a,b,c)
1751         veor            d16,d26                 @ Sigma0(a)
1752         vadd.i64        d20,d27
1753         vadd.i64        d30,d27
1754         @ vadd.i64      d16,d30
1755         bne             .L16_79_neon
1757          vadd.i64       d16,d30         @ h+=Maj from the past
1758         vldmia          r0,{d24-d31}    @ load context to temp
1759         vadd.i64        q8,q12          @ vectorized accumulate
1760         vadd.i64        q9,q13
1761         vadd.i64        q10,q14
1762         vadd.i64        q11,q15
1763         vstmia          r0,{d16-d23}    @ save context
1764         teq             r1,r2
1765         sub             r3,#640 @ rewind K512
1766         bne             .Loop_neon
1768         vldmia  sp!,{d8-d15}            @ epilogue
1769         RET                             @ .word 0xe12fff1e
1770 #endif
1771 .size   sha512_block_data_order,.-sha512_block_data_order
1772 .asciz  "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
1773 .align  2
1774 #if __ARM_MAX_ARCH__>=7
1775 .comm   OPENSSL_armcap_P,4,4
1776 #endif