7 .type gcm_init_v8,%function
10 vld1.64 {q9},[r1] @ load H
15 vext.8 q8,q10,q8,#8 @ t0=0xc2....01
18 vshr.s32 q9,q9,#31 @ broadcast carry bit
23 vorr q3,q3,q11 @ H<<<=1
24 veor q3,q3,q8 @ twisted H
28 .size gcm_init_v8,.-gcm_init_v8
31 .type gcm_gmult_v8,%function
34 vld1.64 {q9},[r0] @ load Xi
36 vld1.64 {q12},[r1] @ load twisted H
45 veor q13,q13,q12 @ Karatsuba pre-processing
48 .size gcm_gmult_v8,.-gcm_gmult_v8
51 .type gcm_ghash_v8,%function
54 vld1.64 {q0},[r0] @ load [rotated] Xi
58 vld1.64 {q12},[r1] @ load twisted H
62 vld1.64 {q9},[r2],r12 @ load [rotated] inp
68 veor q13,q13,q12 @ Karatsuba pre-processing
75 veor q3,q3,q0 @ inp^=Xi
76 veor q9,q9,q10 @ q9 is rotated inp^Xi
79 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
80 veor q9,q9,q3 @ Karatsuba pre-processing
81 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
83 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
86 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
89 vld1.64 {q9},[r2],r12 @ load [rotated] inp
91 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
93 vmov d4,d3 @ Xh|Xm - 256-bit result
94 vmov d3,d0 @ Xm is rotated Xl
101 vext.8 q10,q0,q0,#8 @ 2nd phase
102 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
111 vst1.64 {q0},[r0] @ write out Xi
114 .size gcm_ghash_v8,.-gcm_ghash_v8
115 .asciz "GHASH for ARMv8, CRYPTOGAMS by <appro@openssl.org>"