WIP FPC-III support
[linux/fpc-iii.git] / arch / arm64 / crypto / crct10dif-ce-core.S
blob111d9c9abddd1885785c135cfb4a4f07ff8ab807
1 //
2 // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
3 //
4 // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
5 // Copyright (C) 2019 Google LLC <ebiggers@google.com>
6 //
7 // This program is free software; you can redistribute it and/or modify
8 // it under the terms of the GNU General Public License version 2 as
9 // published by the Free Software Foundation.
12 // Derived from the x86 version:
14 // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
16 // Copyright (c) 2013, Intel Corporation
18 // Authors:
19 //     Erdinc Ozturk <erdinc.ozturk@intel.com>
20 //     Vinodh Gopal <vinodh.gopal@intel.com>
21 //     James Guilford <james.guilford@intel.com>
22 //     Tim Chen <tim.c.chen@linux.intel.com>
24 // This software is available to you under a choice of one of two
25 // licenses.  You may choose to be licensed under the terms of the GNU
26 // General Public License (GPL) Version 2, available from the file
27 // COPYING in the main directory of this source tree, or the
28 // OpenIB.org BSD license below:
30 // Redistribution and use in source and binary forms, with or without
31 // modification, are permitted provided that the following conditions are
32 // met:
34 // * Redistributions of source code must retain the above copyright
35 //   notice, this list of conditions and the following disclaimer.
37 // * Redistributions in binary form must reproduce the above copyright
38 //   notice, this list of conditions and the following disclaimer in the
39 //   documentation and/or other materials provided with the
40 //   distribution.
42 // * Neither the name of the Intel Corporation nor the names of its
43 //   contributors may be used to endorse or promote products derived from
44 //   this software without specific prior written permission.
47 // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
48 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
50 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
51 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
52 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
54 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
55 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
56 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
57 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 //       Reference paper titled "Fast CRC Computation for Generic
60 //      Polynomials Using PCLMULQDQ Instruction"
61 //       URL: http://www.intel.com/content/dam/www/public/us/en/documents
62 //  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
65 #include <linux/linkage.h>
66 #include <asm/assembler.h>
68         .text
69         .arch           armv8-a+crypto
71         init_crc        .req    w19
72         buf             .req    x20
73         len             .req    x21
74         fold_consts_ptr .req    x22
76         fold_consts     .req    v10
78         ad              .req    v14
80         k00_16          .req    v15
81         k32_48          .req    v16
83         t3              .req    v17
84         t4              .req    v18
85         t5              .req    v19
86         t6              .req    v20
87         t7              .req    v21
88         t8              .req    v22
89         t9              .req    v23
91         perm1           .req    v24
92         perm2           .req    v25
93         perm3           .req    v26
94         perm4           .req    v27
96         bd1             .req    v28
97         bd2             .req    v29
98         bd3             .req    v30
99         bd4             .req    v31
101         .macro          __pmull_init_p64
102         .endm
104         .macro          __pmull_pre_p64, bd
105         .endm
107         .macro          __pmull_init_p8
108         // k00_16 := 0x0000000000000000_000000000000ffff
109         // k32_48 := 0x00000000ffffffff_0000ffffffffffff
110         movi            k32_48.2d, #0xffffffff
111         mov             k32_48.h[2], k32_48.h[0]
112         ushr            k00_16.2d, k32_48.2d, #32
114         // prepare the permutation vectors
115         mov_q           x5, 0x080f0e0d0c0b0a09
116         movi            perm4.8b, #8
117         dup             perm1.2d, x5
118         eor             perm1.16b, perm1.16b, perm4.16b
119         ushr            perm2.2d, perm1.2d, #8
120         ushr            perm3.2d, perm1.2d, #16
121         ushr            perm4.2d, perm1.2d, #24
122         sli             perm2.2d, perm1.2d, #56
123         sli             perm3.2d, perm1.2d, #48
124         sli             perm4.2d, perm1.2d, #40
125         .endm
127         .macro          __pmull_pre_p8, bd
128         tbl             bd1.16b, {\bd\().16b}, perm1.16b
129         tbl             bd2.16b, {\bd\().16b}, perm2.16b
130         tbl             bd3.16b, {\bd\().16b}, perm3.16b
131         tbl             bd4.16b, {\bd\().16b}, perm4.16b
132         .endm
134 SYM_FUNC_START_LOCAL(__pmull_p8_core)
135 .L__pmull_p8_core:
136         ext             t4.8b, ad.8b, ad.8b, #1                 // A1
137         ext             t5.8b, ad.8b, ad.8b, #2                 // A2
138         ext             t6.8b, ad.8b, ad.8b, #3                 // A3
140         pmull           t4.8h, t4.8b, fold_consts.8b            // F = A1*B
141         pmull           t8.8h, ad.8b, bd1.8b                    // E = A*B1
142         pmull           t5.8h, t5.8b, fold_consts.8b            // H = A2*B
143         pmull           t7.8h, ad.8b, bd2.8b                    // G = A*B2
144         pmull           t6.8h, t6.8b, fold_consts.8b            // J = A3*B
145         pmull           t9.8h, ad.8b, bd3.8b                    // I = A*B3
146         pmull           t3.8h, ad.8b, bd4.8b                    // K = A*B4
147         b               0f
149 .L__pmull_p8_core2:
150         tbl             t4.16b, {ad.16b}, perm1.16b             // A1
151         tbl             t5.16b, {ad.16b}, perm2.16b             // A2
152         tbl             t6.16b, {ad.16b}, perm3.16b             // A3
154         pmull2          t4.8h, t4.16b, fold_consts.16b          // F = A1*B
155         pmull2          t8.8h, ad.16b, bd1.16b                  // E = A*B1
156         pmull2          t5.8h, t5.16b, fold_consts.16b          // H = A2*B
157         pmull2          t7.8h, ad.16b, bd2.16b                  // G = A*B2
158         pmull2          t6.8h, t6.16b, fold_consts.16b          // J = A3*B
159         pmull2          t9.8h, ad.16b, bd3.16b                  // I = A*B3
160         pmull2          t3.8h, ad.16b, bd4.16b                  // K = A*B4
162 0:      eor             t4.16b, t4.16b, t8.16b                  // L = E + F
163         eor             t5.16b, t5.16b, t7.16b                  // M = G + H
164         eor             t6.16b, t6.16b, t9.16b                  // N = I + J
166         uzp1            t8.2d, t4.2d, t5.2d
167         uzp2            t4.2d, t4.2d, t5.2d
168         uzp1            t7.2d, t6.2d, t3.2d
169         uzp2            t6.2d, t6.2d, t3.2d
171         // t4 = (L) (P0 + P1) << 8
172         // t5 = (M) (P2 + P3) << 16
173         eor             t8.16b, t8.16b, t4.16b
174         and             t4.16b, t4.16b, k32_48.16b
176         // t6 = (N) (P4 + P5) << 24
177         // t7 = (K) (P6 + P7) << 32
178         eor             t7.16b, t7.16b, t6.16b
179         and             t6.16b, t6.16b, k00_16.16b
181         eor             t8.16b, t8.16b, t4.16b
182         eor             t7.16b, t7.16b, t6.16b
184         zip2            t5.2d, t8.2d, t4.2d
185         zip1            t4.2d, t8.2d, t4.2d
186         zip2            t3.2d, t7.2d, t6.2d
187         zip1            t6.2d, t7.2d, t6.2d
189         ext             t4.16b, t4.16b, t4.16b, #15
190         ext             t5.16b, t5.16b, t5.16b, #14
191         ext             t6.16b, t6.16b, t6.16b, #13
192         ext             t3.16b, t3.16b, t3.16b, #12
194         eor             t4.16b, t4.16b, t5.16b
195         eor             t6.16b, t6.16b, t3.16b
196         ret
197 SYM_FUNC_END(__pmull_p8_core)
199         .macro          __pmull_p8, rq, ad, bd, i
200         .ifnc           \bd, fold_consts
201         .err
202         .endif
203         mov             ad.16b, \ad\().16b
204         .ifb            \i
205         pmull           \rq\().8h, \ad\().8b, \bd\().8b         // D = A*B
206         .else
207         pmull2          \rq\().8h, \ad\().16b, \bd\().16b       // D = A*B
208         .endif
210         bl              .L__pmull_p8_core\i
212         eor             \rq\().16b, \rq\().16b, t4.16b
213         eor             \rq\().16b, \rq\().16b, t6.16b
214         .endm
216         // Fold reg1, reg2 into the next 32 data bytes, storing the result back
217         // into reg1, reg2.
218         .macro          fold_32_bytes, p, reg1, reg2
219         ldp             q11, q12, [buf], #0x20
221         __pmull_\p      v8, \reg1, fold_consts, 2
222         __pmull_\p      \reg1, \reg1, fold_consts
224 CPU_LE( rev64           v11.16b, v11.16b                )
225 CPU_LE( rev64           v12.16b, v12.16b                )
227         __pmull_\p      v9, \reg2, fold_consts, 2
228         __pmull_\p      \reg2, \reg2, fold_consts
230 CPU_LE( ext             v11.16b, v11.16b, v11.16b, #8   )
231 CPU_LE( ext             v12.16b, v12.16b, v12.16b, #8   )
233         eor             \reg1\().16b, \reg1\().16b, v8.16b
234         eor             \reg2\().16b, \reg2\().16b, v9.16b
235         eor             \reg1\().16b, \reg1\().16b, v11.16b
236         eor             \reg2\().16b, \reg2\().16b, v12.16b
237         .endm
239         // Fold src_reg into dst_reg, optionally loading the next fold constants
240         .macro          fold_16_bytes, p, src_reg, dst_reg, load_next_consts
241         __pmull_\p      v8, \src_reg, fold_consts
242         __pmull_\p      \src_reg, \src_reg, fold_consts, 2
243         .ifnb           \load_next_consts
244         ld1             {fold_consts.2d}, [fold_consts_ptr], #16
245         __pmull_pre_\p  fold_consts
246         .endif
247         eor             \dst_reg\().16b, \dst_reg\().16b, v8.16b
248         eor             \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b
249         .endm
251         .macro          __pmull_p64, rd, rn, rm, n
252         .ifb            \n
253         pmull           \rd\().1q, \rn\().1d, \rm\().1d
254         .else
255         pmull2          \rd\().1q, \rn\().2d, \rm\().2d
256         .endif
257         .endm
259         .macro          crc_t10dif_pmull, p
260         frame_push      4, 128
262         mov             init_crc, w0
263         mov             buf, x1
264         mov             len, x2
266         __pmull_init_\p
268         // For sizes less than 256 bytes, we can't fold 128 bytes at a time.
269         cmp             len, #256
270         b.lt            .Lless_than_256_bytes_\@
272         adr_l           fold_consts_ptr, .Lfold_across_128_bytes_consts
274         // Load the first 128 data bytes.  Byte swapping is necessary to make
275         // the bit order match the polynomial coefficient order.
276         ldp             q0, q1, [buf]
277         ldp             q2, q3, [buf, #0x20]
278         ldp             q4, q5, [buf, #0x40]
279         ldp             q6, q7, [buf, #0x60]
280         add             buf, buf, #0x80
281 CPU_LE( rev64           v0.16b, v0.16b                  )
282 CPU_LE( rev64           v1.16b, v1.16b                  )
283 CPU_LE( rev64           v2.16b, v2.16b                  )
284 CPU_LE( rev64           v3.16b, v3.16b                  )
285 CPU_LE( rev64           v4.16b, v4.16b                  )
286 CPU_LE( rev64           v5.16b, v5.16b                  )
287 CPU_LE( rev64           v6.16b, v6.16b                  )
288 CPU_LE( rev64           v7.16b, v7.16b                  )
289 CPU_LE( ext             v0.16b, v0.16b, v0.16b, #8      )
290 CPU_LE( ext             v1.16b, v1.16b, v1.16b, #8      )
291 CPU_LE( ext             v2.16b, v2.16b, v2.16b, #8      )
292 CPU_LE( ext             v3.16b, v3.16b, v3.16b, #8      )
293 CPU_LE( ext             v4.16b, v4.16b, v4.16b, #8      )
294 CPU_LE( ext             v5.16b, v5.16b, v5.16b, #8      )
295 CPU_LE( ext             v6.16b, v6.16b, v6.16b, #8      )
296 CPU_LE( ext             v7.16b, v7.16b, v7.16b, #8      )
298         // XOR the first 16 data *bits* with the initial CRC value.
299         movi            v8.16b, #0
300         mov             v8.h[7], init_crc
301         eor             v0.16b, v0.16b, v8.16b
303         // Load the constants for folding across 128 bytes.
304         ld1             {fold_consts.2d}, [fold_consts_ptr]
305         __pmull_pre_\p  fold_consts
307         // Subtract 128 for the 128 data bytes just consumed.  Subtract another
308         // 128 to simplify the termination condition of the following loop.
309         sub             len, len, #256
311         // While >= 128 data bytes remain (not counting v0-v7), fold the 128
312         // bytes v0-v7 into them, storing the result back into v0-v7.
313 .Lfold_128_bytes_loop_\@:
314         fold_32_bytes   \p, v0, v1
315         fold_32_bytes   \p, v2, v3
316         fold_32_bytes   \p, v4, v5
317         fold_32_bytes   \p, v6, v7
319         subs            len, len, #128
320         b.lt            .Lfold_128_bytes_loop_done_\@
322         if_will_cond_yield_neon
323         stp             q0, q1, [sp, #.Lframe_local_offset]
324         stp             q2, q3, [sp, #.Lframe_local_offset + 32]
325         stp             q4, q5, [sp, #.Lframe_local_offset + 64]
326         stp             q6, q7, [sp, #.Lframe_local_offset + 96]
327         do_cond_yield_neon
328         ldp             q0, q1, [sp, #.Lframe_local_offset]
329         ldp             q2, q3, [sp, #.Lframe_local_offset + 32]
330         ldp             q4, q5, [sp, #.Lframe_local_offset + 64]
331         ldp             q6, q7, [sp, #.Lframe_local_offset + 96]
332         ld1             {fold_consts.2d}, [fold_consts_ptr]
333         __pmull_init_\p
334         __pmull_pre_\p  fold_consts
335         endif_yield_neon
337         b               .Lfold_128_bytes_loop_\@
339 .Lfold_128_bytes_loop_done_\@:
341         // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
343         // Fold across 64 bytes.
344         add             fold_consts_ptr, fold_consts_ptr, #16
345         ld1             {fold_consts.2d}, [fold_consts_ptr], #16
346         __pmull_pre_\p  fold_consts
347         fold_16_bytes   \p, v0, v4
348         fold_16_bytes   \p, v1, v5
349         fold_16_bytes   \p, v2, v6
350         fold_16_bytes   \p, v3, v7, 1
351         // Fold across 32 bytes.
352         fold_16_bytes   \p, v4, v6
353         fold_16_bytes   \p, v5, v7, 1
354         // Fold across 16 bytes.
355         fold_16_bytes   \p, v6, v7
357         // Add 128 to get the correct number of data bytes remaining in 0...127
358         // (not counting v7), following the previous extra subtraction by 128.
359         // Then subtract 16 to simplify the termination condition of the
360         // following loop.
361         adds            len, len, #(128-16)
363         // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7
364         // into them, storing the result back into v7.
365         b.lt            .Lfold_16_bytes_loop_done_\@
366 .Lfold_16_bytes_loop_\@:
367         __pmull_\p      v8, v7, fold_consts
368         __pmull_\p      v7, v7, fold_consts, 2
369         eor             v7.16b, v7.16b, v8.16b
370         ldr             q0, [buf], #16
371 CPU_LE( rev64           v0.16b, v0.16b                  )
372 CPU_LE( ext             v0.16b, v0.16b, v0.16b, #8      )
373         eor             v7.16b, v7.16b, v0.16b
374         subs            len, len, #16
375         b.ge            .Lfold_16_bytes_loop_\@
377 .Lfold_16_bytes_loop_done_\@:
378         // Add 16 to get the correct number of data bytes remaining in 0...15
379         // (not counting v7), following the previous extra subtraction by 16.
380         adds            len, len, #16
381         b.eq            .Lreduce_final_16_bytes_\@
383 .Lhandle_partial_segment_\@:
384         // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
385         // 16 bytes are in v7 and the rest are the remaining data in 'buf'.  To
386         // do this without needing a fold constant for each possible 'len',
387         // redivide the bytes into a first chunk of 'len' bytes and a second
388         // chunk of 16 bytes, then fold the first chunk into the second.
390         // v0 = last 16 original data bytes
391         add             buf, buf, len
392         ldr             q0, [buf, #-16]
393 CPU_LE( rev64           v0.16b, v0.16b                  )
394 CPU_LE( ext             v0.16b, v0.16b, v0.16b, #8      )
396         // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes.
397         adr_l           x4, .Lbyteshift_table + 16
398         sub             x4, x4, len
399         ld1             {v2.16b}, [x4]
400         tbl             v1.16b, {v7.16b}, v2.16b
402         // v3 = first chunk: v7 right-shifted by '16-len' bytes.
403         movi            v3.16b, #0x80
404         eor             v2.16b, v2.16b, v3.16b
405         tbl             v3.16b, {v7.16b}, v2.16b
407         // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
408         sshr            v2.16b, v2.16b, #7
410         // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
411         // then '16-len' bytes from v1 (high-order bytes).
412         bsl             v2.16b, v1.16b, v0.16b
414         // Fold the first chunk into the second chunk, storing the result in v7.
415         __pmull_\p      v0, v3, fold_consts
416         __pmull_\p      v7, v3, fold_consts, 2
417         eor             v7.16b, v7.16b, v0.16b
418         eor             v7.16b, v7.16b, v2.16b
420 .Lreduce_final_16_bytes_\@:
421         // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
423         movi            v2.16b, #0              // init zero register
425         // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
426         ld1             {fold_consts.2d}, [fold_consts_ptr], #16
427         __pmull_pre_\p  fold_consts
429         // Fold the high 64 bits into the low 64 bits, while also multiplying by
430         // x^64.  This produces a 128-bit value congruent to x^64 * M(x) and
431         // whose low 48 bits are 0.
432         ext             v0.16b, v2.16b, v7.16b, #8
433         __pmull_\p      v7, v7, fold_consts, 2  // high bits * x^48 * (x^80 mod G(x))
434         eor             v0.16b, v0.16b, v7.16b  // + low bits * x^64
436         // Fold the high 32 bits into the low 96 bits.  This produces a 96-bit
437         // value congruent to x^64 * M(x) and whose low 48 bits are 0.
438         ext             v1.16b, v0.16b, v2.16b, #12     // extract high 32 bits
439         mov             v0.s[3], v2.s[0]        // zero high 32 bits
440         __pmull_\p      v1, v1, fold_consts     // high 32 bits * x^48 * (x^48 mod G(x))
441         eor             v0.16b, v0.16b, v1.16b  // + low bits
443         // Load G(x) and floor(x^48 / G(x)).
444         ld1             {fold_consts.2d}, [fold_consts_ptr]
445         __pmull_pre_\p  fold_consts
447         // Use Barrett reduction to compute the final CRC value.
448         __pmull_\p      v1, v0, fold_consts, 2  // high 32 bits * floor(x^48 / G(x))
449         ushr            v1.2d, v1.2d, #32       // /= x^32
450         __pmull_\p      v1, v1, fold_consts     // *= G(x)
451         ushr            v0.2d, v0.2d, #48
452         eor             v0.16b, v0.16b, v1.16b  // + low 16 nonzero bits
453         // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
455         umov            w0, v0.h[0]
456         frame_pop
457         ret
459 .Lless_than_256_bytes_\@:
460         // Checksumming a buffer of length 16...255 bytes
462         adr_l           fold_consts_ptr, .Lfold_across_16_bytes_consts
464         // Load the first 16 data bytes.
465         ldr             q7, [buf], #0x10
466 CPU_LE( rev64           v7.16b, v7.16b                  )
467 CPU_LE( ext             v7.16b, v7.16b, v7.16b, #8      )
469         // XOR the first 16 data *bits* with the initial CRC value.
470         movi            v0.16b, #0
471         mov             v0.h[7], init_crc
472         eor             v7.16b, v7.16b, v0.16b
474         // Load the fold-across-16-bytes constants.
475         ld1             {fold_consts.2d}, [fold_consts_ptr], #16
476         __pmull_pre_\p  fold_consts
478         cmp             len, #16
479         b.eq            .Lreduce_final_16_bytes_\@      // len == 16
480         subs            len, len, #32
481         b.ge            .Lfold_16_bytes_loop_\@         // 32 <= len <= 255
482         add             len, len, #16
483         b               .Lhandle_partial_segment_\@     // 17 <= len <= 31
484         .endm
487 // u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len);
489 // Assumes len >= 16.
491 SYM_FUNC_START(crc_t10dif_pmull_p8)
492         crc_t10dif_pmull        p8
493 SYM_FUNC_END(crc_t10dif_pmull_p8)
495         .align          5
497 // u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
499 // Assumes len >= 16.
501 SYM_FUNC_START(crc_t10dif_pmull_p64)
502         crc_t10dif_pmull        p64
503 SYM_FUNC_END(crc_t10dif_pmull_p64)
505         .section        ".rodata", "a"
506         .align          4
508 // Fold constants precomputed from the polynomial 0x18bb7
509 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
510 .Lfold_across_128_bytes_consts:
511         .quad           0x0000000000006123      // x^(8*128)    mod G(x)
512         .quad           0x0000000000002295      // x^(8*128+64) mod G(x)
513 // .Lfold_across_64_bytes_consts:
514         .quad           0x0000000000001069      // x^(4*128)    mod G(x)
515         .quad           0x000000000000dd31      // x^(4*128+64) mod G(x)
516 // .Lfold_across_32_bytes_consts:
517         .quad           0x000000000000857d      // x^(2*128)    mod G(x)
518         .quad           0x0000000000007acc      // x^(2*128+64) mod G(x)
519 .Lfold_across_16_bytes_consts:
520         .quad           0x000000000000a010      // x^(1*128)    mod G(x)
521         .quad           0x0000000000001faa      // x^(1*128+64) mod G(x)
522 // .Lfinal_fold_consts:
523         .quad           0x1368000000000000      // x^48 * (x^48 mod G(x))
524         .quad           0x2d56000000000000      // x^48 * (x^80 mod G(x))
525 // .Lbarrett_reduction_consts:
526         .quad           0x0000000000018bb7      // G(x)
527         .quad           0x00000001f65a57f8      // floor(x^48 / G(x))
529 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
530 // len] is the index vector to shift left by 'len' bytes, and is also {0x80,
531 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.
532 .Lbyteshift_table:
533         .byte            0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
534         .byte           0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
535         .byte            0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
536         .byte            0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0