Revert of Update the extension whitelist for application host change. (patchset ...
[chromium-blink-merge.git] / third_party / boringssl / linux-arm / crypto / bn / armv4-mont.S
blobaafc4500fe4419fc1900bd52541811d10bd59b6e
1 #include "arm_arch.h"
3 .text
4 .code   32
6 #if __ARM_ARCH__>=7
7 .align  5
8 .LOPENSSL_armcap:
9 .word   OPENSSL_armcap_P-bn_mul_mont
10 #endif
12 .global bn_mul_mont
13 .hidden bn_mul_mont
14 .type   bn_mul_mont,%function
16 .align  5
17 bn_mul_mont:
18         ldr     ip,[sp,#4]              @ load num
19         stmdb   sp!,{r0,r2}             @ sp points at argument block
20 #if __ARM_ARCH__>=7
21         tst     ip,#7
22         bne     .Lialu
23         adr     r0,bn_mul_mont
24         ldr     r2,.LOPENSSL_armcap
25         ldr     r0,[r0,r2]
26         tst     r0,#1                   @ NEON available?
27         ldmia   sp, {r0,r2}
28         beq     .Lialu
29         add     sp,sp,#8
30         b       bn_mul8x_mont_neon
31 .align  4
32 .Lialu:
33 #endif
34         cmp     ip,#2
35         mov     r0,ip                   @ load num
36         movlt   r0,#0
37         addlt   sp,sp,#2*4
38         blt     .Labrt
40         stmdb   sp!,{r4-r12,lr}         @ save 10 registers
42         mov     r0,r0,lsl#2             @ rescale r0 for byte count
43         sub     sp,sp,r0                @ alloca(4*num)
44         sub     sp,sp,#4                @ +extra dword
45         sub     r0,r0,#4                @ "num=num-1"
46         add     r4,r2,r0                @ &bp[num-1]
48         add     r0,sp,r0                @ r0 to point at &tp[num-1]
49         ldr     r8,[r0,#14*4]           @ &n0
50         ldr     r2,[r2]         @ bp[0]
51         ldr     r5,[r1],#4              @ ap[0],ap++
52         ldr     r6,[r3],#4              @ np[0],np++
53         ldr     r8,[r8]         @ *n0
54         str     r4,[r0,#15*4]           @ save &bp[num]
56         umull   r10,r11,r5,r2   @ ap[0]*bp[0]
57         str     r8,[r0,#14*4]           @ save n0 value
58         mul     r8,r10,r8               @ "tp[0]"*n0
59         mov     r12,#0
60         umlal   r10,r12,r6,r8   @ np[0]*n0+"t[0]"
61         mov     r4,sp
63 .L1st:
64         ldr     r5,[r1],#4              @ ap[j],ap++
65         mov     r10,r11
66         ldr     r6,[r3],#4              @ np[j],np++
67         mov     r11,#0
68         umlal   r10,r11,r5,r2   @ ap[j]*bp[0]
69         mov     r14,#0
70         umlal   r12,r14,r6,r8   @ np[j]*n0
71         adds    r12,r12,r10
72         str     r12,[r4],#4             @ tp[j-1]=,tp++
73         adc     r12,r14,#0
74         cmp     r4,r0
75         bne     .L1st
77         adds    r12,r12,r11
78         ldr     r4,[r0,#13*4]           @ restore bp
79         mov     r14,#0
80         ldr     r8,[r0,#14*4]           @ restore n0
81         adc     r14,r14,#0
82         str     r12,[r0]                @ tp[num-1]=
83         str     r14,[r0,#4]             @ tp[num]=
85 .Louter:
86         sub     r7,r0,sp                @ "original" r0-1 value
87         sub     r1,r1,r7                @ "rewind" ap to &ap[1]
88         ldr     r2,[r4,#4]!             @ *(++bp)
89         sub     r3,r3,r7                @ "rewind" np to &np[1]
90         ldr     r5,[r1,#-4]             @ ap[0]
91         ldr     r10,[sp]                @ tp[0]
92         ldr     r6,[r3,#-4]             @ np[0]
93         ldr     r7,[sp,#4]              @ tp[1]
95         mov     r11,#0
96         umlal   r10,r11,r5,r2   @ ap[0]*bp[i]+tp[0]
97         str     r4,[r0,#13*4]           @ save bp
98         mul     r8,r10,r8
99         mov     r12,#0
100         umlal   r10,r12,r6,r8   @ np[0]*n0+"tp[0]"
101         mov     r4,sp
103 .Linner:
104         ldr     r5,[r1],#4              @ ap[j],ap++
105         adds    r10,r11,r7              @ +=tp[j]
106         ldr     r6,[r3],#4              @ np[j],np++
107         mov     r11,#0
108         umlal   r10,r11,r5,r2   @ ap[j]*bp[i]
109         mov     r14,#0
110         umlal   r12,r14,r6,r8   @ np[j]*n0
111         adc     r11,r11,#0
112         ldr     r7,[r4,#8]              @ tp[j+1]
113         adds    r12,r12,r10
114         str     r12,[r4],#4             @ tp[j-1]=,tp++
115         adc     r12,r14,#0
116         cmp     r4,r0
117         bne     .Linner
119         adds    r12,r12,r11
120         mov     r14,#0
121         ldr     r4,[r0,#13*4]           @ restore bp
122         adc     r14,r14,#0
123         ldr     r8,[r0,#14*4]           @ restore n0
124         adds    r12,r12,r7
125         ldr     r7,[r0,#15*4]           @ restore &bp[num]
126         adc     r14,r14,#0
127         str     r12,[r0]                @ tp[num-1]=
128         str     r14,[r0,#4]             @ tp[num]=
130         cmp     r4,r7
131         bne     .Louter
133         ldr     r2,[r0,#12*4]           @ pull rp
134         add     r0,r0,#4                @ r0 to point at &tp[num]
135         sub     r5,r0,sp                @ "original" num value
136         mov     r4,sp                   @ "rewind" r4
137         mov     r1,r4                   @ "borrow" r1
138         sub     r3,r3,r5                @ "rewind" r3 to &np[0]
140         subs    r7,r7,r7                @ "clear" carry flag
141 .Lsub:  ldr     r7,[r4],#4
142         ldr     r6,[r3],#4
143         sbcs    r7,r7,r6                @ tp[j]-np[j]
144         str     r7,[r2],#4              @ rp[j]=
145         teq     r4,r0           @ preserve carry
146         bne     .Lsub
147         sbcs    r14,r14,#0              @ upmost carry
148         mov     r4,sp                   @ "rewind" r4
149         sub     r2,r2,r5                @ "rewind" r2
151         and     r1,r4,r14
152         bic     r3,r2,r14
153         orr     r1,r1,r3                @ ap=borrow?tp:rp
155 .Lcopy: ldr     r7,[r1],#4              @ copy or in-place refresh
156         str     sp,[r4],#4              @ zap tp
157         str     r7,[r2],#4
158         cmp     r4,r0
159         bne     .Lcopy
161         add     sp,r0,#4                @ skip over tp[num+1]
162         ldmia   sp!,{r4-r12,lr}         @ restore registers
163         add     sp,sp,#2*4              @ skip over {r0,r2}
164         mov     r0,#1
165 .Labrt: tst     lr,#1
166         moveq   pc,lr                   @ be binary compatible with V4, yet
167         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
168 .size   bn_mul_mont,.-bn_mul_mont
169 #if __ARM_ARCH__>=7
170 .fpu    neon
172 .type   bn_mul8x_mont_neon,%function
173 .align  5
174 bn_mul8x_mont_neon:
175         mov     ip,sp
176         stmdb   sp!,{r4-r11}
177         vstmdb  sp!,{d8-d15}            @ ABI specification says so
178         ldmia   ip,{r4-r5}              @ load rest of parameter block
180         sub             r7,sp,#16
181         vld1.32         {d28[0]}, [r2,:32]!
182         sub             r7,r7,r5,lsl#4
183         vld1.32         {d0-d3},  [r1]!         @ can't specify :32 :-(
184         and             r7,r7,#-64
185         vld1.32         {d30[0]}, [r4,:32]
186         mov             sp,r7                   @ alloca
187         veor            d8,d8,d8
188         subs            r8,r5,#8
189         vzip.16         d28,d8
191         vmull.u32       q6,d28,d0[0]
192         vmull.u32       q7,d28,d0[1]
193         vmull.u32       q8,d28,d1[0]
194         vshl.i64        d10,d13,#16
195         vmull.u32       q9,d28,d1[1]
197         vadd.u64        d10,d10,d12
198         veor            d8,d8,d8
199         vmul.u32        d29,d10,d30
201         vmull.u32       q10,d28,d2[0]
202          vld1.32        {d4-d7}, [r3]!
203         vmull.u32       q11,d28,d2[1]
204         vmull.u32       q12,d28,d3[0]
205         vzip.16         d29,d8
206         vmull.u32       q13,d28,d3[1]
208         bne     .LNEON_1st
210         @ special case for num=8, everything is in register bank...
212         vmlal.u32       q6,d29,d4[0]
213         sub             r9,r5,#1
214         vmlal.u32       q7,d29,d4[1]
215         vmlal.u32       q8,d29,d5[0]
216         vmlal.u32       q9,d29,d5[1]
218         vmlal.u32       q10,d29,d6[0]
219         vmov            q5,q6
220         vmlal.u32       q11,d29,d6[1]
221         vmov            q6,q7
222         vmlal.u32       q12,d29,d7[0]
223         vmov            q7,q8
224         vmlal.u32       q13,d29,d7[1]
225         vmov            q8,q9
226         vmov            q9,q10
227         vshr.u64        d10,d10,#16
228         vmov            q10,q11
229         vmov            q11,q12
230         vadd.u64        d10,d10,d11
231         vmov            q12,q13
232         veor            q13,q13
233         vshr.u64        d10,d10,#16
235         b       .LNEON_outer8
237 .align  4
238 .LNEON_outer8:
239         vld1.32         {d28[0]}, [r2,:32]!
240         veor            d8,d8,d8
241         vzip.16         d28,d8
242         vadd.u64        d12,d12,d10
244         vmlal.u32       q6,d28,d0[0]
245         vmlal.u32       q7,d28,d0[1]
246         vmlal.u32       q8,d28,d1[0]
247         vshl.i64        d10,d13,#16
248         vmlal.u32       q9,d28,d1[1]
250         vadd.u64        d10,d10,d12
251         veor            d8,d8,d8
252         subs            r9,r9,#1
253         vmul.u32        d29,d10,d30
255         vmlal.u32       q10,d28,d2[0]
256         vmlal.u32       q11,d28,d2[1]
257         vmlal.u32       q12,d28,d3[0]
258         vzip.16         d29,d8
259         vmlal.u32       q13,d28,d3[1]
261         vmlal.u32       q6,d29,d4[0]
262         vmlal.u32       q7,d29,d4[1]
263         vmlal.u32       q8,d29,d5[0]
264         vmlal.u32       q9,d29,d5[1]
266         vmlal.u32       q10,d29,d6[0]
267         vmov            q5,q6
268         vmlal.u32       q11,d29,d6[1]
269         vmov            q6,q7
270         vmlal.u32       q12,d29,d7[0]
271         vmov            q7,q8
272         vmlal.u32       q13,d29,d7[1]
273         vmov            q8,q9
274         vmov            q9,q10
275         vshr.u64        d10,d10,#16
276         vmov            q10,q11
277         vmov            q11,q12
278         vadd.u64        d10,d10,d11
279         vmov            q12,q13
280         veor            q13,q13
281         vshr.u64        d10,d10,#16
283         bne     .LNEON_outer8
285         vadd.u64        d12,d12,d10
286         mov             r7,sp
287         vshr.u64        d10,d12,#16
288         mov             r8,r5
289         vadd.u64        d13,d13,d10
290         add             r6,sp,#16
291         vshr.u64        d10,d13,#16
292         vzip.16         d12,d13
294         b       .LNEON_tail2
296 .align  4
297 .LNEON_1st:
298         vmlal.u32       q6,d29,d4[0]
299          vld1.32        {d0-d3}, [r1]!
300         vmlal.u32       q7,d29,d4[1]
301         subs            r8,r8,#8
302         vmlal.u32       q8,d29,d5[0]
303         vmlal.u32       q9,d29,d5[1]
305         vmlal.u32       q10,d29,d6[0]
306          vld1.32        {d4-d5}, [r3]!
307         vmlal.u32       q11,d29,d6[1]
308          vst1.64        {q6-q7}, [r7,:256]!
309         vmlal.u32       q12,d29,d7[0]
310         vmlal.u32       q13,d29,d7[1]
311          vst1.64        {q8-q9}, [r7,:256]!
313         vmull.u32       q6,d28,d0[0]
314          vld1.32        {d6-d7}, [r3]!
315         vmull.u32       q7,d28,d0[1]
316          vst1.64        {q10-q11}, [r7,:256]!
317         vmull.u32       q8,d28,d1[0]
318         vmull.u32       q9,d28,d1[1]
319          vst1.64        {q12-q13}, [r7,:256]!
321         vmull.u32       q10,d28,d2[0]
322         vmull.u32       q11,d28,d2[1]
323         vmull.u32       q12,d28,d3[0]
324         vmull.u32       q13,d28,d3[1]
326         bne     .LNEON_1st
328         vmlal.u32       q6,d29,d4[0]
329         add             r6,sp,#16
330         vmlal.u32       q7,d29,d4[1]
331         sub             r1,r1,r5,lsl#2          @ rewind r1
332         vmlal.u32       q8,d29,d5[0]
333          vld1.64        {q5}, [sp,:128]
334         vmlal.u32       q9,d29,d5[1]
335         sub             r9,r5,#1
337         vmlal.u32       q10,d29,d6[0]
338         vst1.64         {q6-q7}, [r7,:256]!
339         vmlal.u32       q11,d29,d6[1]
340         vshr.u64        d10,d10,#16
341          vld1.64        {q6},       [r6, :128]!
342         vmlal.u32       q12,d29,d7[0]
343         vst1.64         {q8-q9}, [r7,:256]!
344         vmlal.u32       q13,d29,d7[1]
346         vst1.64         {q10-q11}, [r7,:256]!
347         vadd.u64        d10,d10,d11
348         veor            q4,q4,q4
349         vst1.64         {q12-q13}, [r7,:256]!
350          vld1.64        {q7-q8}, [r6, :256]!
351         vst1.64         {q4},          [r7,:128]
352         vshr.u64        d10,d10,#16
354         b               .LNEON_outer
356 .align  4
357 .LNEON_outer:
358         vld1.32         {d28[0]}, [r2,:32]!
359         sub             r3,r3,r5,lsl#2          @ rewind r3
360         vld1.32         {d0-d3},  [r1]!
361         veor            d8,d8,d8
362         mov             r7,sp
363         vzip.16         d28,d8
364         sub             r8,r5,#8
365         vadd.u64        d12,d12,d10
367         vmlal.u32       q6,d28,d0[0]
368          vld1.64        {q9-q10},[r6,:256]!
369         vmlal.u32       q7,d28,d0[1]
370         vmlal.u32       q8,d28,d1[0]
371          vld1.64        {q11-q12},[r6,:256]!
372         vmlal.u32       q9,d28,d1[1]
374         vshl.i64        d10,d13,#16
375         veor            d8,d8,d8
376         vadd.u64        d10,d10,d12
377          vld1.64        {q13},[r6,:128]!
378         vmul.u32        d29,d10,d30
380         vmlal.u32       q10,d28,d2[0]
381          vld1.32        {d4-d7}, [r3]!
382         vmlal.u32       q11,d28,d2[1]
383         vmlal.u32       q12,d28,d3[0]
384         vzip.16         d29,d8
385         vmlal.u32       q13,d28,d3[1]
387 .LNEON_inner:
388         vmlal.u32       q6,d29,d4[0]
389          vld1.32        {d0-d3}, [r1]!
390         vmlal.u32       q7,d29,d4[1]
391          subs           r8,r8,#8
392         vmlal.u32       q8,d29,d5[0]
393         vmlal.u32       q9,d29,d5[1]
394         vst1.64         {q6-q7}, [r7,:256]!
396         vmlal.u32       q10,d29,d6[0]
397          vld1.64        {q6},       [r6, :128]!
398         vmlal.u32       q11,d29,d6[1]
399         vst1.64         {q8-q9}, [r7,:256]!
400         vmlal.u32       q12,d29,d7[0]
401          vld1.64        {q7-q8}, [r6, :256]!
402         vmlal.u32       q13,d29,d7[1]
403         vst1.64         {q10-q11}, [r7,:256]!
405         vmlal.u32       q6,d28,d0[0]
406          vld1.64        {q9-q10}, [r6, :256]!
407         vmlal.u32       q7,d28,d0[1]
408         vst1.64         {q12-q13}, [r7,:256]!
409         vmlal.u32       q8,d28,d1[0]
410          vld1.64        {q11-q12}, [r6, :256]!
411         vmlal.u32       q9,d28,d1[1]
412          vld1.32        {d4-d7}, [r3]!
414         vmlal.u32       q10,d28,d2[0]
415          vld1.64        {q13},       [r6, :128]!
416         vmlal.u32       q11,d28,d2[1]
417         vmlal.u32       q12,d28,d3[0]
418         vmlal.u32       q13,d28,d3[1]
420         bne     .LNEON_inner
422         vmlal.u32       q6,d29,d4[0]
423         add             r6,sp,#16
424         vmlal.u32       q7,d29,d4[1]
425         sub             r1,r1,r5,lsl#2          @ rewind r1
426         vmlal.u32       q8,d29,d5[0]
427          vld1.64        {q5}, [sp,:128]
428         vmlal.u32       q9,d29,d5[1]
429         subs            r9,r9,#1
431         vmlal.u32       q10,d29,d6[0]
432         vst1.64         {q6-q7}, [r7,:256]!
433         vmlal.u32       q11,d29,d6[1]
434          vld1.64        {q6},       [r6, :128]!
435         vshr.u64        d10,d10,#16
436         vst1.64         {q8-q9}, [r7,:256]!
437         vmlal.u32       q12,d29,d7[0]
438          vld1.64        {q7-q8}, [r6, :256]!
439         vmlal.u32       q13,d29,d7[1]
441         vst1.64         {q10-q11}, [r7,:256]!
442         vadd.u64        d10,d10,d11
443         vst1.64         {q12-q13}, [r7,:256]!
444         vshr.u64        d10,d10,#16
446         bne     .LNEON_outer
448         mov             r7,sp
449         mov             r8,r5
451 .LNEON_tail:
452         vadd.u64        d12,d12,d10
453         vld1.64         {q9-q10}, [r6, :256]!
454         vshr.u64        d10,d12,#16
455         vadd.u64        d13,d13,d10
456         vld1.64         {q11-q12}, [r6, :256]!
457         vshr.u64        d10,d13,#16
458         vld1.64         {q13},       [r6, :128]!
459         vzip.16         d12,d13
461 .LNEON_tail2:
462         vadd.u64        d14,d14,d10
463         vst1.32         {d12[0]}, [r7, :32]!
464         vshr.u64        d10,d14,#16
465         vadd.u64        d15,d15,d10
466         vshr.u64        d10,d15,#16
467         vzip.16         d14,d15
469         vadd.u64        d16,d16,d10
470         vst1.32         {d14[0]}, [r7, :32]!
471         vshr.u64        d10,d16,#16
472         vadd.u64        d17,d17,d10
473         vshr.u64        d10,d17,#16
474         vzip.16         d16,d17
476         vadd.u64        d18,d18,d10
477         vst1.32         {d16[0]}, [r7, :32]!
478         vshr.u64        d10,d18,#16
479         vadd.u64        d19,d19,d10
480         vshr.u64        d10,d19,#16
481         vzip.16         d18,d19
483         vadd.u64        d20,d20,d10
484         vst1.32         {d18[0]}, [r7, :32]!
485         vshr.u64        d10,d20,#16
486         vadd.u64        d21,d21,d10
487         vshr.u64        d10,d21,#16
488         vzip.16         d20,d21
490         vadd.u64        d22,d22,d10
491         vst1.32         {d20[0]}, [r7, :32]!
492         vshr.u64        d10,d22,#16
493         vadd.u64        d23,d23,d10
494         vshr.u64        d10,d23,#16
495         vzip.16         d22,d23
497         vadd.u64        d24,d24,d10
498         vst1.32         {d22[0]}, [r7, :32]!
499         vshr.u64        d10,d24,#16
500         vadd.u64        d25,d25,d10
501         vld1.64         {q6}, [r6, :128]!
502         vshr.u64        d10,d25,#16
503         vzip.16         d24,d25
505         vadd.u64        d26,d26,d10
506         vst1.32         {d24[0]}, [r7, :32]!
507         vshr.u64        d10,d26,#16
508         vadd.u64        d27,d27,d10
509         vld1.64         {q7-q8},        [r6, :256]!
510         vshr.u64        d10,d27,#16
511         vzip.16         d26,d27
512         subs            r8,r8,#8
513         vst1.32         {d26[0]}, [r7, :32]!
515         bne     .LNEON_tail
517         vst1.32 {d10[0]}, [r7, :32]             @ top-most bit
518         sub     r3,r3,r5,lsl#2                  @ rewind r3
519         subs    r1,sp,#0                                @ clear carry flag
520         add     r2,sp,r5,lsl#2
522 .LNEON_sub:
523         ldmia   r1!, {r4-r7}
524         ldmia   r3!, {r8-r11}
525         sbcs    r8, r4,r8
526         sbcs    r9, r5,r9
527         sbcs    r10,r6,r10
528         sbcs    r11,r7,r11
529         teq     r1,r2                           @ preserves carry
530         stmia   r0!, {r8-r11}
531         bne     .LNEON_sub
533         ldr     r10, [r1]                               @ load top-most bit
534         veor    q0,q0,q0
535         sub     r11,r2,sp                               @ this is num*4
536         veor    q1,q1,q1
537         mov     r1,sp
538         sub     r0,r0,r11                               @ rewind r0
539         mov     r3,r2                           @ second 3/4th of frame
540         sbcs    r10,r10,#0                              @ result is carry flag
542 .LNEON_copy_n_zap:
543         ldmia   r1!, {r4-r7}
544         ldmia   r0,  {r8-r11}
545         movcc   r8, r4
546         vst1.64 {q0-q1}, [r3,:256]!                     @ wipe
547         movcc   r9, r5
548         movcc   r10,r6
549         vst1.64 {q0-q1}, [r3,:256]!                     @ wipe
550         movcc   r11,r7
551         ldmia   r1, {r4-r7}
552         stmia   r0!, {r8-r11}
553         sub     r1,r1,#16
554         ldmia   r0, {r8-r11}
555         movcc   r8, r4
556         vst1.64 {q0-q1}, [r1,:256]!                     @ wipe
557         movcc   r9, r5
558         movcc   r10,r6
559         vst1.64 {q0-q1}, [r3,:256]!                     @ wipe
560         movcc   r11,r7
561         teq     r1,r2                           @ preserves carry
562         stmia   r0!, {r8-r11}
563         bne     .LNEON_copy_n_zap
565         sub     sp,ip,#96
566         vldmia  sp!,{d8-d15}
567         ldmia   sp!,{r4-r11}
568         .word   0xe12fff1e
569 .size   bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
570 #endif
571 .asciz  "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
572 .align  2
573 #if __ARM_ARCH__>=7
574 .comm   OPENSSL_armcap_P,4,4
575 .hidden OPENSSL_armcap_P
576 #endif