1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2012 Xyratex Technology Limited
5 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
7 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
8 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
10 * http://www.intel.com/products/processor/manuals/
11 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
12 * Volume 2B: Instruction Set Reference, N-Z
14 * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
15 * Alexander Boyko <Alexander_Boyko@xyratex.com>
18 #include <linux/linkage.h>
24 * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
25 * #define CONSTANT_R1 0x154442bd4LL
27 * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
28 * #define CONSTANT_R2 0x1c6e41596LL
31 .octa 0x00000001c6e415960000000154442bd4
33 * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
34 * #define CONSTANT_R3 0x1751997d0LL
36 * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
37 * #define CONSTANT_R4 0x0ccaa009eLL
40 .octa 0x00000000ccaa009e00000001751997d0
42 * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
43 * #define CONSTANT_R5 0x163cd6124LL
46 .octa 0x00000000000000000000000163cd6124
48 .octa 0x000000000000000000000000FFFFFFFF
50 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
52 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
53 * #define CONSTANT_RU 0x1F7011641LL
56 .octa 0x00000001F701164100000001DB710641
58 #define CONSTANT %xmm0
76 * BUF - buffer (16 bytes aligned)
77 * LEN - sizeof buffer (16 bytes aligned), LEN should be greater than 63
79 * u32 crc32_pclmul_le_16(u32 crc, const u8 *buffer, size_t len);
82 SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
84 movdqa 0x10(BUF), %xmm2
85 movdqa 0x20(BUF), %xmm3
86 movdqa 0x30(BUF), %xmm4
95 movdqa .Lconstant_R2R1(%rip), CONSTANT
97 movdqa .Lconstant_R2R1, CONSTANT
100 .Lloop_64:/* 64 bytes Full cache line folding */
101 prefetchnta 0x40(BUF)
108 pclmulqdq $0x00, CONSTANT, %xmm1
109 pclmulqdq $0x00, CONSTANT, %xmm2
110 pclmulqdq $0x00, CONSTANT, %xmm3
112 pclmulqdq $0x00, CONSTANT, %xmm4
114 pclmulqdq $0x11, CONSTANT, %xmm5
115 pclmulqdq $0x11, CONSTANT, %xmm6
116 pclmulqdq $0x11, CONSTANT, %xmm7
118 pclmulqdq $0x11, CONSTANT, %xmm8
126 /* xmm8 unsupported for x32 */
128 pclmulqdq $0x00, CONSTANT, %xmm4
129 pclmulqdq $0x11, CONSTANT, %xmm5
134 pxor 0x10(BUF), %xmm2
135 pxor 0x20(BUF), %xmm3
136 pxor 0x30(BUF), %xmm4
142 .Lless_64:/* Folding cache line into 128bit */
144 movdqa .Lconstant_R4R3(%rip), CONSTANT
146 movdqa .Lconstant_R4R3, CONSTANT
151 pclmulqdq $0x00, CONSTANT, %xmm1
152 pclmulqdq $0x11, CONSTANT, %xmm5
157 pclmulqdq $0x00, CONSTANT, %xmm1
158 pclmulqdq $0x11, CONSTANT, %xmm5
163 pclmulqdq $0x00, CONSTANT, %xmm1
164 pclmulqdq $0x11, CONSTANT, %xmm5
170 .Lloop_16:/* Folding rest buffer into 128bit */
172 pclmulqdq $0x00, CONSTANT, %xmm1
173 pclmulqdq $0x11, CONSTANT, %xmm5
182 /* perform the last 64 bit fold, also adds 32 zeroes
183 * to the input stream */
184 pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
188 /* final 32-bit fold */
191 movdqa .Lconstant_R5(%rip), CONSTANT
192 movdqa .Lconstant_mask32(%rip), %xmm3
194 movdqa .Lconstant_R5, CONSTANT
195 movdqa .Lconstant_mask32, %xmm3
199 pclmulqdq $0x00, CONSTANT, %xmm1
202 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
204 movdqa .Lconstant_RUpoly(%rip), CONSTANT
206 movdqa .Lconstant_RUpoly, CONSTANT
210 pclmulqdq $0x10, CONSTANT, %xmm1
212 pclmulqdq $0x00, CONSTANT, %xmm1
214 pextrd $0x01, %xmm1, %eax
217 SYM_FUNC_END(crc32_pclmul_le_16)