Remove building with NOCRYPTO option
[minix.git] / crypto / external / bsd / openssl / lib / libcrypto / arch / arm / ghash-armv4.S
blob0724b137d4ae299049a1c8869949286569568058
1 #include "arm_arch.h"
2 #include "arm_asm.h"
4 .syntax unified
5 .text
6 .code   32
8 .type   rem_4bit,%object
9 .align  5
10 rem_4bit:
11 .short  0x0000,0x1C20,0x3840,0x2460
12 .short  0x7080,0x6CA0,0x48C0,0x54E0
13 .short  0xE100,0xFD20,0xD940,0xC560
14 .short  0x9180,0x8DA0,0xA9C0,0xB5E0
15 .size   rem_4bit,.-rem_4bit
17 .type   rem_4bit_get,%function
18 rem_4bit_get:
19         sub     r2,pc,#8
20         sub     r2,r2,#32       @ &rem_4bit
21         b       .Lrem_4bit_got
22         nop
23 .size   rem_4bit_get,.-rem_4bit_get
25 .global gcm_ghash_4bit
26 .type   gcm_ghash_4bit,%function
27 gcm_ghash_4bit:
28         sub     r12,pc,#8
29         add     r3,r2,r3                @ r3 to point at the end
30         stmdb   sp!,{r3-r11,lr}         @ save r3/end too
31         sub     r12,r12,#48             @ &rem_4bit
33         ldmia   r12,{r4-r11}            @ copy rem_4bit ...
34         stmdb   sp!,{r4-r11}            @ ... to stack
36         ldrb    r12,[r2,#15]
37         ldrb    r14,[r0,#15]
38 .Louter:
39         eor     r12,r12,r14
40         and     r14,r12,#0xf0
41         and     r12,r12,#0x0f
42         mov     r3,#14
44         add     r7,r1,r12,lsl#4
45         ldmia   r7,{r4-r7}      @ load Htbl[nlo]
46         add     r11,r1,r14
47         ldrb    r12,[r2,#14]
49         and     r14,r4,#0xf             @ rem
50         ldmia   r11,{r8-r11}    @ load Htbl[nhi]
51         add     r14,r14,r14
52         eor     r4,r8,r4,lsr#4
53         ldrh    r8,[sp,r14]             @ rem_4bit[rem]
54         eor     r4,r4,r5,lsl#28
55         ldrb    r14,[r0,#14]
56         eor     r5,r9,r5,lsr#4
57         eor     r5,r5,r6,lsl#28
58         eor     r6,r10,r6,lsr#4
59         eor     r6,r6,r7,lsl#28
60         eor     r7,r11,r7,lsr#4
61         eor     r12,r12,r14
62         and     r14,r12,#0xf0
63         and     r12,r12,#0x0f
64         eor     r7,r7,r8,lsl#16
66 .Linner:
67         add     r11,r1,r12,lsl#4
68         and     r12,r4,#0xf             @ rem
69         subs    r3,r3,#1
70         add     r12,r12,r12
71         ldmia   r11,{r8-r11}    @ load Htbl[nlo]
72         eor     r4,r8,r4,lsr#4
73         eor     r4,r4,r5,lsl#28
74         eor     r5,r9,r5,lsr#4
75         eor     r5,r5,r6,lsl#28
76         ldrh    r8,[sp,r12]             @ rem_4bit[rem]
77         eor     r6,r10,r6,lsr#4
78         ldrbpl  r12,[r2,r3]
79         eor     r6,r6,r7,lsl#28
80         eor     r7,r11,r7,lsr#4
82         add     r11,r1,r14
83         and     r14,r4,#0xf             @ rem
84         eor     r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
85         add     r14,r14,r14
86         ldmia   r11,{r8-r11}    @ load Htbl[nhi]
87         eor     r4,r8,r4,lsr#4
88         ldrbpl  r8,[r0,r3]
89         eor     r4,r4,r5,lsl#28
90         eor     r5,r9,r5,lsr#4
91         ldrh    r9,[sp,r14]
92         eor     r5,r5,r6,lsl#28
93         eor     r6,r10,r6,lsr#4
94         eor     r6,r6,r7,lsl#28
95         eorpl   r12,r12,r8
96         eor     r7,r11,r7,lsr#4
97         andpl   r14,r12,#0xf0
98         andpl   r12,r12,#0x0f
99         eor     r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
100         bpl     .Linner
102         ldr     r3,[sp,#32]             @ re-load r3/end
103         add     r2,r2,#16
104         mov     r14,r4
105 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
106         rev     r4,r4
107         str     r4,[r0,#12]
108 #elif defined(__ARMEB__)
109         str     r4,[r0,#12]
110 #else
111         mov     r9,r4,lsr#8
112         strb    r4,[r0,#12+3]
113         mov     r10,r4,lsr#16
114         strb    r9,[r0,#12+2]
115         mov     r11,r4,lsr#24
116         strb    r10,[r0,#12+1]
117         strb    r11,[r0,#12]
118 #endif
119         cmp     r2,r3
120 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
121         rev     r5,r5
122         str     r5,[r0,#8]
123 #elif defined(__ARMEB__)
124         str     r5,[r0,#8]
125 #else
126         mov     r9,r5,lsr#8
127         strb    r5,[r0,#8+3]
128         mov     r10,r5,lsr#16
129         strb    r9,[r0,#8+2]
130         mov     r11,r5,lsr#24
131         strb    r10,[r0,#8+1]
132         strb    r11,[r0,#8]
133 #endif
134         ldrbne  r12,[r2,#15]
135 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
136         rev     r6,r6
137         str     r6,[r0,#4]
138 #elif defined(__ARMEB__)
139         str     r6,[r0,#4]
140 #else
141         mov     r9,r6,lsr#8
142         strb    r6,[r0,#4+3]
143         mov     r10,r6,lsr#16
144         strb    r9,[r0,#4+2]
145         mov     r11,r6,lsr#24
146         strb    r10,[r0,#4+1]
147         strb    r11,[r0,#4]
148 #endif
149         
150 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
151         rev     r7,r7
152         str     r7,[r0,#0]
153 #elif defined(__ARMEB__)
154         str     r7,[r0,#0]
155 #else
156         mov     r9,r7,lsr#8
157         strb    r7,[r0,#0+3]
158         mov     r10,r7,lsr#16
159         strb    r9,[r0,#0+2]
160         mov     r11,r7,lsr#24
161         strb    r10,[r0,#0+1]
162         strb    r11,[r0,#0]
163 #endif
164         
165         bne     .Louter
167         add     sp,sp,#36
168 #if __ARM_ARCH__>=5
169         ldmia   sp!,{r4-r11,pc}
170 #else
171         ldmia   sp!,{r4-r11,lr}
172         tst     lr,#1
173         moveq   pc,lr                   @ be binary compatible with V4, yet
174         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
175 #endif
176 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
178 .global gcm_gmult_4bit
179 .type   gcm_gmult_4bit,%function
180 gcm_gmult_4bit:
181         stmdb   sp!,{r4-r11,lr}
182         ldrb    r12,[r0,#15]
183         b       rem_4bit_get
184 .Lrem_4bit_got:
185         and     r14,r12,#0xf0
186         and     r12,r12,#0x0f
187         mov     r3,#14
189         add     r7,r1,r12,lsl#4
190         ldmia   r7,{r4-r7}      @ load Htbl[nlo]
191         ldrb    r12,[r0,#14]
193         add     r11,r1,r14
194         and     r14,r4,#0xf             @ rem
195         ldmia   r11,{r8-r11}    @ load Htbl[nhi]
196         add     r14,r14,r14
197         eor     r4,r8,r4,lsr#4
198         ldrh    r8,[r2,r14]     @ rem_4bit[rem]
199         eor     r4,r4,r5,lsl#28
200         eor     r5,r9,r5,lsr#4
201         eor     r5,r5,r6,lsl#28
202         eor     r6,r10,r6,lsr#4
203         eor     r6,r6,r7,lsl#28
204         eor     r7,r11,r7,lsr#4
205         and     r14,r12,#0xf0
206         eor     r7,r7,r8,lsl#16
207         and     r12,r12,#0x0f
209 .Loop:
210         add     r11,r1,r12,lsl#4
211         and     r12,r4,#0xf             @ rem
212         subs    r3,r3,#1
213         add     r12,r12,r12
214         ldmia   r11,{r8-r11}    @ load Htbl[nlo]
215         eor     r4,r8,r4,lsr#4
216         eor     r4,r4,r5,lsl#28
217         eor     r5,r9,r5,lsr#4
218         eor     r5,r5,r6,lsl#28
219         ldrh    r8,[r2,r12]     @ rem_4bit[rem]
220         eor     r6,r10,r6,lsr#4
221         ldrbpl  r12,[r0,r3]
222         eor     r6,r6,r7,lsl#28
223         eor     r7,r11,r7,lsr#4
225         add     r11,r1,r14
226         and     r14,r4,#0xf             @ rem
227         eor     r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
228         add     r14,r14,r14
229         ldmia   r11,{r8-r11}    @ load Htbl[nhi]
230         eor     r4,r8,r4,lsr#4
231         eor     r4,r4,r5,lsl#28
232         eor     r5,r9,r5,lsr#4
233         ldrh    r8,[r2,r14]     @ rem_4bit[rem]
234         eor     r5,r5,r6,lsl#28
235         eor     r6,r10,r6,lsr#4
236         eor     r6,r6,r7,lsl#28
237         eor     r7,r11,r7,lsr#4
238         andpl   r14,r12,#0xf0
239         andpl   r12,r12,#0x0f
240         eor     r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
241         bpl     .Loop
242 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
243         rev     r4,r4
244         str     r4,[r0,#12]
245 #elif defined(__ARMEB__)
246         str     r4,[r0,#12]
247 #else
248         mov     r9,r4,lsr#8
249         strb    r4,[r0,#12+3]
250         mov     r10,r4,lsr#16
251         strb    r9,[r0,#12+2]
252         mov     r11,r4,lsr#24
253         strb    r10,[r0,#12+1]
254         strb    r11,[r0,#12]
255 #endif
256         
257 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
258         rev     r5,r5
259         str     r5,[r0,#8]
260 #elif defined(__ARMEB__)
261         str     r5,[r0,#8]
262 #else
263         mov     r9,r5,lsr#8
264         strb    r5,[r0,#8+3]
265         mov     r10,r5,lsr#16
266         strb    r9,[r0,#8+2]
267         mov     r11,r5,lsr#24
268         strb    r10,[r0,#8+1]
269         strb    r11,[r0,#8]
270 #endif
271         
272 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
273         rev     r6,r6
274         str     r6,[r0,#4]
275 #elif defined(__ARMEB__)
276         str     r6,[r0,#4]
277 #else
278         mov     r9,r6,lsr#8
279         strb    r6,[r0,#4+3]
280         mov     r10,r6,lsr#16
281         strb    r9,[r0,#4+2]
282         mov     r11,r6,lsr#24
283         strb    r10,[r0,#4+1]
284         strb    r11,[r0,#4]
285 #endif
286         
287 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
288         rev     r7,r7
289         str     r7,[r0,#0]
290 #elif defined(__ARMEB__)
291         str     r7,[r0,#0]
292 #else
293         mov     r9,r7,lsr#8
294         strb    r7,[r0,#0+3]
295         mov     r10,r7,lsr#16
296         strb    r9,[r0,#0+2]
297         mov     r11,r7,lsr#24
298         strb    r10,[r0,#0+1]
299         strb    r11,[r0,#0]
300 #endif
301         
302 #if __ARM_ARCH__>=5
303         ldmia   sp!,{r4-r11,pc}
304 #else
305         ldmia   sp!,{r4-r11,lr}
306         tst     lr,#1
307         moveq   pc,lr                   @ be binary compatible with V4, yet
308         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
309 #endif
310 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
311 #if __ARM_MAX_ARCH__>=7
312 .arch   armv7-a
313 .fpu    neon
315 .global gcm_init_neon
316 .type   gcm_init_neon,%function
317 .align  4
318 gcm_init_neon:
319         vld1.64         d7,[r1,:64]!    @ load H
320         vmov.i8         q8,#0xe1
321         vld1.64         d6,[r1,:64]
322         vshl.i64        d17,#57
323         vshr.u64        d16,#63         @ t0=0xc2....01
324         vdup.8          q9,d7[7]
325         vshr.u64        d26,d6,#63
326         vshr.s8         q9,#7                   @ broadcast carry bit
327         vshl.i64        q3,q3,#1
328         vand            q8,q8,q9
329         vorr            d7,d26          @ H<<<=1
330         veor            q3,q3,q8                @ twisted H
331         vstmia          r0,{q3}
333         RET                                     @ bx lr
334 .size   gcm_init_neon,.-gcm_init_neon
336 .global gcm_gmult_neon
337 .type   gcm_gmult_neon,%function
338 .align  4
339 gcm_gmult_neon:
340         vld1.64         d7,[r0,:64]!    @ load Xi
341         vld1.64         d6,[r0,:64]!
342         vmov.i64        d29,#0x0000ffffffffffff
343         vldmia          r1,{d26-d27}    @ load twisted H
344         vmov.i64        d30,#0x00000000ffffffff
345 #ifdef __ARMEL__
346         vrev64.8        q3,q3
347 #endif
348         vmov.i64        d31,#0x000000000000ffff
349         veor            d28,d26,d27             @ Karatsuba pre-processing
350         mov             r3,#16
351         b               .Lgmult_neon
352 .size   gcm_gmult_neon,.-gcm_gmult_neon
354 .global gcm_ghash_neon
355 .type   gcm_ghash_neon,%function
356 .align  4
357 gcm_ghash_neon:
358         vld1.64         d1,[r0,:64]!    @ load Xi
359         vld1.64         d0,[r0,:64]!
360         vmov.i64        d29,#0x0000ffffffffffff
361         vldmia          r1,{d26-d27}    @ load twisted H
362         vmov.i64        d30,#0x00000000ffffffff
363 #ifdef __ARMEL__
364         vrev64.8        q0,q0
365 #endif
366         vmov.i64        d31,#0x000000000000ffff
367         veor            d28,d26,d27             @ Karatsuba pre-processing
369 .Loop_neon:
370         vld1.64         d7,[r2]!                @ load inp
371         vld1.64         d6,[r2]!
372 #ifdef __ARMEL__
373         vrev64.8        q3,q3
374 #endif
375         veor            q3,q0                   @ inp^=Xi
376 .Lgmult_neon:
377         vext.8          d16, d26, d26, #1       @ A1
378         vmull.p8        q8, d16, d6             @ F = A1*B
379         vext.8          d0, d6, d6, #1  @ B1
380         vmull.p8        q0, d26, d0             @ E = A*B1
381         vext.8          d18, d26, d26, #2       @ A2
382         vmull.p8        q9, d18, d6             @ H = A2*B
383         vext.8          d22, d6, d6, #2 @ B2
384         vmull.p8        q11, d26, d22           @ G = A*B2
385         vext.8          d20, d26, d26, #3       @ A3
386         veor            q8, q8, q0              @ L = E + F
387         vmull.p8        q10, d20, d6            @ J = A3*B
388         vext.8          d0, d6, d6, #3  @ B3
389         veor            q9, q9, q11             @ M = G + H
390         vmull.p8        q0, d26, d0             @ I = A*B3
391         veor            d16, d16, d17   @ t0 = (L) (P0 + P1) << 8
392         vand            d17, d17, d29
393         vext.8          d22, d6, d6, #4 @ B4
394         veor            d18, d18, d19   @ t1 = (M) (P2 + P3) << 16
395         vand            d19, d19, d30
396         vmull.p8        q11, d26, d22           @ K = A*B4
397         veor            q10, q10, q0            @ N = I + J
398         veor            d16, d16, d17
399         veor            d18, d18, d19
400         veor            d20, d20, d21   @ t2 = (N) (P4 + P5) << 24
401         vand            d21, d21, d31
402         vext.8          q8, q8, q8, #15
403         veor            d22, d22, d23   @ t3 = (K) (P6 + P7) << 32
404         vmov.i64        d23, #0
405         vext.8          q9, q9, q9, #14
406         veor            d20, d20, d21
407         vmull.p8        q0, d26, d6             @ D = A*B
408         vext.8          q11, q11, q11, #12
409         vext.8          q10, q10, q10, #13
410         veor            q8, q8, q9
411         veor            q10, q10, q11
412         veor            q0, q0, q8
413         veor            q0, q0, q10
414         veor            d6,d6,d7        @ Karatsuba pre-processing
415         vext.8          d16, d28, d28, #1       @ A1
416         vmull.p8        q8, d16, d6             @ F = A1*B
417         vext.8          d2, d6, d6, #1  @ B1
418         vmull.p8        q1, d28, d2             @ E = A*B1
419         vext.8          d18, d28, d28, #2       @ A2
420         vmull.p8        q9, d18, d6             @ H = A2*B
421         vext.8          d22, d6, d6, #2 @ B2
422         vmull.p8        q11, d28, d22           @ G = A*B2
423         vext.8          d20, d28, d28, #3       @ A3
424         veor            q8, q8, q1              @ L = E + F
425         vmull.p8        q10, d20, d6            @ J = A3*B
426         vext.8          d2, d6, d6, #3  @ B3
427         veor            q9, q9, q11             @ M = G + H
428         vmull.p8        q1, d28, d2             @ I = A*B3
429         veor            d16, d16, d17   @ t0 = (L) (P0 + P1) << 8
430         vand            d17, d17, d29
431         vext.8          d22, d6, d6, #4 @ B4
432         veor            d18, d18, d19   @ t1 = (M) (P2 + P3) << 16
433         vand            d19, d19, d30
434         vmull.p8        q11, d28, d22           @ K = A*B4
435         veor            q10, q10, q1            @ N = I + J
436         veor            d16, d16, d17
437         veor            d18, d18, d19
438         veor            d20, d20, d21   @ t2 = (N) (P4 + P5) << 24
439         vand            d21, d21, d31
440         vext.8          q8, q8, q8, #15
441         veor            d22, d22, d23   @ t3 = (K) (P6 + P7) << 32
442         vmov.i64        d23, #0
443         vext.8          q9, q9, q9, #14
444         veor            d20, d20, d21
445         vmull.p8        q1, d28, d6             @ D = A*B
446         vext.8          q11, q11, q11, #12
447         vext.8          q10, q10, q10, #13
448         veor            q8, q8, q9
449         veor            q10, q10, q11
450         veor            q1, q1, q8
451         veor            q1, q1, q10
452         vext.8          d16, d27, d27, #1       @ A1
453         vmull.p8        q8, d16, d7             @ F = A1*B
454         vext.8          d4, d7, d7, #1  @ B1
455         vmull.p8        q2, d27, d4             @ E = A*B1
456         vext.8          d18, d27, d27, #2       @ A2
457         vmull.p8        q9, d18, d7             @ H = A2*B
458         vext.8          d22, d7, d7, #2 @ B2
459         vmull.p8        q11, d27, d22           @ G = A*B2
460         vext.8          d20, d27, d27, #3       @ A3
461         veor            q8, q8, q2              @ L = E + F
462         vmull.p8        q10, d20, d7            @ J = A3*B
463         vext.8          d4, d7, d7, #3  @ B3
464         veor            q9, q9, q11             @ M = G + H
465         vmull.p8        q2, d27, d4             @ I = A*B3
466         veor            d16, d16, d17   @ t0 = (L) (P0 + P1) << 8
467         vand            d17, d17, d29
468         vext.8          d22, d7, d7, #4 @ B4
469         veor            d18, d18, d19   @ t1 = (M) (P2 + P3) << 16
470         vand            d19, d19, d30
471         vmull.p8        q11, d27, d22           @ K = A*B4
472         veor            q10, q10, q2            @ N = I + J
473         veor            d16, d16, d17
474         veor            d18, d18, d19
475         veor            d20, d20, d21   @ t2 = (N) (P4 + P5) << 24
476         vand            d21, d21, d31
477         vext.8          q8, q8, q8, #15
478         veor            d22, d22, d23   @ t3 = (K) (P6 + P7) << 32
479         vmov.i64        d23, #0
480         vext.8          q9, q9, q9, #14
481         veor            d20, d20, d21
482         vmull.p8        q2, d27, d7             @ D = A*B
483         vext.8          q11, q11, q11, #12
484         vext.8          q10, q10, q10, #13
485         veor            q8, q8, q9
486         veor            q10, q10, q11
487         veor            q2, q2, q8
488         veor            q2, q2, q10
489         veor            q1,q1,q0                @ Karatsuba post-processing
490         veor            q1,q1,q2
491         veor            d1,d1,d2
492         veor            d4,d4,d3        @ Xh|Xl - 256-bit result
494         @ equivalent of reduction_avx from ghash-x86_64.pl
495         vshl.i64        q9,q0,#57               @ 1st phase
496         vshl.i64        q10,q0,#62
497         veor            q10,q10,q9              @
498         vshl.i64        q9,q0,#63
499         veor            q10, q10, q9            @
500         veor            d1,d1,d20       @
501         veor            d4,d4,d21
503         vshr.u64        q10,q0,#1               @ 2nd phase
504         veor            q2,q2,q0
505         veor            q0,q0,q10               @
506         vshr.u64        q10,q10,#6
507         vshr.u64        q0,q0,#1                @
508         veor            q0,q0,q2                @
509         veor            q0,q0,q10               @
511         subs            r3,#16
512         bne             .Loop_neon
514 #ifdef __ARMEL__
515         vrev64.8        q0,q0
516 #endif
517         sub             r0,#16  
518         vst1.64         d1,[r0,:64]!    @ write out Xi
519         vst1.64         d0,[r0,:64]
521         RET                                     @ bx lr
522 .size   gcm_ghash_neon,.-gcm_ghash_neon
523 #endif
524 .asciz  "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
525 .align  2