2 * SSE2/SSSE3/AVX2-optimized routines to support checksumming of bytes.
4 * Copyright (C) 1996 Andrew Tridgell
5 * Copyright (C) 1996 Paul Mackerras
6 * Copyright (C) 2004-2020 Wayne Davison
7 * Copyright (C) 2020 Jorrit Jongma
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, visit the http://fsf.org website.
23 * Optimization target for get_checksum1() was the Intel Atom D2700, the
24 * slowest CPU in the test set and the most likely to be CPU limited during
25 * transfers. The combination of intrinsics was chosen specifically for the
26 * most gain on that CPU, other combinations were occasionally slightly
27 * faster on the others.
29 * While on more modern CPUs transfers are less likely to be CPU limited
30 * (at least by this specific function), lower CPU usage is always better.
31 * Improvements may still be seen when matching chunks from NVMe storage
34 * Benchmarks (in MB/s) C SSE2 SSSE3 AVX2
35 * - Intel Atom D2700 550 750 1000 N/A
36 * - Intel i7-7700hq 1850 2550 4050 6200
37 * - AMD ThreadRipper 2950x 2900 5600 8950 8100
39 * Curiously the AMD is slower with AVX2 than SSSE3, while the Intel is
40 * significantly faster. AVX2 is kept because it's more likely to relieve
41 * the bottleneck on the slower CPU.
43 * This optimization for get_checksum1() is intentionally limited to x86-64
44 * as no 32-bit CPU was available for testing. As 32-bit CPUs only have half
45 * the available xmm registers, this optimized version may not be faster than
46 * the pure C version anyway. Note that all x86-64 CPUs support at least SSE2.
48 * This file is compiled using GCC 4.8+'s C++ front end to allow the use of
49 * the target attribute, selecting the fastest code path based on runtime
50 * detection of CPU capabilities.
60 #include <immintrin.h>
62 /* Compatibility functions to let our SSSE3 algorithm run on SSE2 */
64 __attribute__ ((target("sse2"))) static inline __m128i
sse_interleave_odd_epi16(__m128i a
, __m128i b
)
66 return _mm_packs_epi32(
67 _mm_srai_epi32(a
, 16),
72 __attribute__ ((target("sse2"))) static inline __m128i
sse_interleave_even_epi16(__m128i a
, __m128i b
)
74 return sse_interleave_odd_epi16(
80 __attribute__ ((target("sse2"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a
, __m128i b
)
82 return _mm_mullo_epi16(
88 __attribute__ ((target("sse2"))) static inline __m128i
sse_mulu_even_epi8(__m128i a
, __m128i b
)
90 return _mm_mullo_epi16(
91 _mm_and_si128(a
, _mm_set1_epi16(0xFF)),
92 _mm_srai_epi16(_mm_slli_si128(b
, 1), 8)
96 __attribute__ ((target("sse2"))) static inline __m128i
sse_hadds_epi16(__m128i a
, __m128i b
)
98 return _mm_adds_epi16(
99 sse_interleave_even_epi16(a
, b
),
100 sse_interleave_odd_epi16(a
, b
)
104 __attribute__ ((target("ssse3"))) static inline __m128i
sse_hadds_epi16(__m128i a
, __m128i b
)
106 return _mm_hadds_epi16(a
, b
);
109 __attribute__ ((target("sse2"))) static inline __m128i
sse_maddubs_epi16(__m128i a
, __m128i b
)
111 return _mm_adds_epi16(
112 sse_mulu_even_epi8(a
, b
),
113 sse_mulu_odd_epi8(a
, b
)
117 __attribute__ ((target("ssse3"))) static inline __m128i
sse_maddubs_epi16(__m128i a
, __m128i b
)
119 return _mm_maddubs_epi16(a
, b
);
122 /* These don't actually get called, but we need to define them. */
123 __attribute__ ((target("default"))) static inline __m128i
sse_interleave_odd_epi16(__m128i a
, __m128i b
) { return a
; }
124 __attribute__ ((target("default"))) static inline __m128i
sse_interleave_even_epi16(__m128i a
, __m128i b
) { return a
; }
125 __attribute__ ((target("default"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a
, __m128i b
) { return a
; }
126 __attribute__ ((target("default"))) static inline __m128i
sse_mulu_even_epi8(__m128i a
, __m128i b
) { return a
; }
127 __attribute__ ((target("default"))) static inline __m128i
sse_hadds_epi16(__m128i a
, __m128i b
) { return a
; }
128 __attribute__ ((target("default"))) static inline __m128i
sse_maddubs_epi16(__m128i a
, __m128i b
) { return a
; }
131 Original loop per 4 bytes:
132 s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] + 10*CHAR_OFFSET;
133 s1 += buf[i] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET;
135 SSE2/SSSE3 loop per 32 bytes:
138 for (int j = 0; j < 8; j++) {
139 t1[j] = buf[j*4 + i] + buf[j*4 + i+1] + buf[j*4 + i+2] + buf[j*4 + i+3];
140 t2[j] = 4*buf[j*4 + i] + 3*buf[j*4 + i+1] + 2*buf[j*4 + i+2] + buf[j*4 + i+3];
142 s2 += 32*s1 + (uint32)(
143 28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] + 12*t1[4] + 8*t1[5] + 4*t1[6] +
144 t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7]
146 s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7]) +
150 Both sse2 and ssse3 targets must be specified here or we lose (a lot) of
151 performance, possibly due to not unrolling+inlining the called targeted
154 __attribute__ ((target("sse2", "ssse3"))) static int32
get_checksum1_sse2_32(schar
* buf
, int32 len
, int32 i
, uint32
* ps1
, uint32
* ps2
)
157 int aligned
= ((uintptr_t)buf
& 15) == 0;
161 __m128i ss1
= _mm_loadu_si128((__m128i_u
*)x
);
163 __m128i ss2
= _mm_loadu_si128((__m128i_u
*)x
);
165 const int16 mul_t1_buf
[8] = {28, 24, 20, 16, 12, 8, 4, 0};
166 __m128i mul_t1
= _mm_loadu_si128((__m128i_u
*)mul_t1_buf
);
168 for (; i
< (len
-32); i
+=32) {
169 // Load ... 2*[int8*16]
170 // SSSE3 has _mm_lqqdu_si128, but this requires another
171 // target function for each SSE2 and SSSE3 loads. For reasons
172 // unknown (to me) we lose about 10% performance on some CPUs if
173 // we do that right here. We just use _mm_loadu_si128 as for all
174 // but a handful of specific old CPUs they are synonymous, and
175 // take the 1-5% hit on those specific CPUs where it isn't.
176 __m128i in8_1
, in8_2
;
178 in8_1
= _mm_loadu_si128((__m128i_u
*)&buf
[i
]);
179 in8_2
= _mm_loadu_si128((__m128i_u
*)&buf
[i
+ 16]);
181 in8_1
= _mm_load_si128((__m128i_u
*)&buf
[i
]);
182 in8_2
= _mm_load_si128((__m128i_u
*)&buf
[i
+ 16]);
185 // (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ... 2*[int16*8]
186 // Fastest, even though multiply by 1
187 __m128i mul_one
= _mm_set1_epi8(1);
188 __m128i add16_1
= sse_maddubs_epi16(mul_one
, in8_1
);
189 __m128i add16_2
= sse_maddubs_epi16(mul_one
, in8_2
);
191 // (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
192 __m128i mul_const
= _mm_set1_epi32(4 + (3 << 8) + (2 << 16) + (1 << 24));
193 __m128i mul_add16_1
= sse_maddubs_epi16(mul_const
, in8_1
);
194 __m128i mul_add16_2
= sse_maddubs_epi16(mul_const
, in8_2
);
197 ss2
= _mm_add_epi32(ss2
, _mm_slli_epi32(ss1
, 5));
199 // [sum(t1[0]..t1[7]), X, X, X] [int32*4]; faster than multiple _mm_hadds_epi16
200 // Shifting left, then shifting right again and shuffling (rather than just
201 // shifting right as with mul32 below) to cheaply end up with the correct sign
202 // extension as we go from int16 to int32.
203 __m128i sum_add32
= _mm_add_epi16(add16_1
, add16_2
);
204 sum_add32
= _mm_add_epi16(sum_add32
, _mm_slli_si128(sum_add32
, 2));
205 sum_add32
= _mm_add_epi16(sum_add32
, _mm_slli_si128(sum_add32
, 4));
206 sum_add32
= _mm_add_epi16(sum_add32
, _mm_slli_si128(sum_add32
, 8));
207 sum_add32
= _mm_srai_epi32(sum_add32
, 16);
208 sum_add32
= _mm_shuffle_epi32(sum_add32
, 3);
210 // [sum(t2[0]..t2[7]), X, X, X] [int32*4]; faster than multiple _mm_hadds_epi16
211 __m128i sum_mul_add32
= _mm_add_epi16(mul_add16_1
, mul_add16_2
);
212 sum_mul_add32
= _mm_add_epi16(sum_mul_add32
, _mm_slli_si128(sum_mul_add32
, 2));
213 sum_mul_add32
= _mm_add_epi16(sum_mul_add32
, _mm_slli_si128(sum_mul_add32
, 4));
214 sum_mul_add32
= _mm_add_epi16(sum_mul_add32
, _mm_slli_si128(sum_mul_add32
, 8));
215 sum_mul_add32
= _mm_srai_epi32(sum_mul_add32
, 16);
216 sum_mul_add32
= _mm_shuffle_epi32(sum_mul_add32
, 3);
218 // s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7]
219 ss1
= _mm_add_epi32(ss1
, sum_add32
);
221 // s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7]
222 ss2
= _mm_add_epi32(ss2
, sum_mul_add32
);
224 // [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*8]
225 // We could've combined this with generating sum_add32 above and
226 // save an instruction but benchmarking shows that as being slower
227 __m128i add16
= sse_hadds_epi16(add16_1
, add16_2
);
229 // [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
230 __m128i mul32
= _mm_madd_epi16(add16
, mul_t1
);
232 // [sum(mul32), X, X, X] [int32*4]; faster than multiple _mm_hadd_epi32
233 mul32
= _mm_add_epi32(mul32
, _mm_srli_si128(mul32
, 4));
234 mul32
= _mm_add_epi32(mul32
, _mm_srli_si128(mul32
, 8));
236 // s2 += 28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] + 12*t1[4] + 8*t1[5] + 4*t1[6]
237 ss2
= _mm_add_epi32(ss2
, mul32
);
240 // s1 += 32*CHAR_OFFSET
241 __m128i char_offset_multiplier
= _mm_set1_epi32(32 * CHAR_OFFSET
);
242 ss1
= _mm_add_epi32(ss1
, char_offset_multiplier
);
244 // s2 += 528*CHAR_OFFSET
245 char_offset_multiplier
= _mm_set1_epi32(528 * CHAR_OFFSET
);
246 ss2
= _mm_add_epi32(ss2
, char_offset_multiplier
);
250 _mm_store_si128((__m128i_u
*)x
, ss1
);
252 _mm_store_si128((__m128i_u
*)x
, ss2
);
259 AVX2 loop per 64 bytes:
262 for (int j = 0; j < 16; j++) {
263 t1[j] = buf[j*4 + i] + buf[j*4 + i+1] + buf[j*4 + i+2] + buf[j*4 + i+3];
264 t2[j] = 4*buf[j*4 + i] + 3*buf[j*4 + i+1] + 2*buf[j*4 + i+2] + buf[j*4 + i+3];
266 s2 += 64*s1 + (uint32)(
267 60*t1[0] + 56*t1[1] + 52*t1[2] + 48*t1[3] + 44*t1[4] + 40*t1[5] + 36*t1[6] + 32*t1[7] + 28*t1[8] + 24*t1[9] + 20*t1[10] + 16*t1[11] + 12*t1[12] + 8*t1[13] + 4*t1[14] +
268 t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7] + t2[8] + t2[9] + t2[10] + t2[11] + t2[12] + t2[13] + t2[14] + t2[15]
269 ) + 2080*CHAR_OFFSET;
270 s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7] + t1[8] + t1[9] + t1[10] + t1[11] + t1[12] + t1[13] + t1[14] + t1[15]) +
273 __attribute__ ((target("avx2"))) static int32
get_checksum1_avx2_64(schar
* buf
, int32 len
, int32 i
, uint32
* ps1
, uint32
* ps2
)
276 // Instructions reshuffled compared to SSE2 for slightly better performance
277 int aligned
= ((uintptr_t)buf
& 31) == 0;
281 __m256i ss1
= _mm256_lddqu_si256((__m256i_u
*)x
);
283 __m256i ss2
= _mm256_lddqu_si256((__m256i_u
*)x
);
285 // The order gets shuffled compared to SSE2
286 const int16 mul_t1_buf
[16] = {60, 56, 52, 48, 28, 24, 20, 16, 44, 40, 36, 32, 12, 8, 4, 0};
287 __m256i mul_t1
= _mm256_lddqu_si256((__m256i_u
*)mul_t1_buf
);
289 for (; i
< (len
-64); i
+=64) {
290 // Load ... 2*[int8*32]
291 __m256i in8_1
, in8_2
;
293 in8_1
= _mm256_lddqu_si256((__m256i_u
*)&buf
[i
]);
294 in8_2
= _mm256_lddqu_si256((__m256i_u
*)&buf
[i
+ 32]);
296 in8_1
= _mm256_load_si256((__m256i_u
*)&buf
[i
]);
297 in8_2
= _mm256_load_si256((__m256i_u
*)&buf
[i
+ 32]);
300 // Prefetch for next loops. This has no observable effect on the
301 // tested AMD but makes as much as 20% difference on the Intel.
302 // Curiously that same Intel sees no benefit from this with SSE2
304 _mm_prefetch(&buf
[i
+ 64], _MM_HINT_T0
);
305 _mm_prefetch(&buf
[i
+ 96], _MM_HINT_T0
);
306 _mm_prefetch(&buf
[i
+ 128], _MM_HINT_T0
);
307 _mm_prefetch(&buf
[i
+ 160], _MM_HINT_T0
);
309 // (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ... 2*[int16*16]
310 // Fastest, even though multiply by 1
311 __m256i mul_one
= _mm256_set1_epi8(1);
312 __m256i add16_1
= _mm256_maddubs_epi16(mul_one
, in8_1
);
313 __m256i add16_2
= _mm256_maddubs_epi16(mul_one
, in8_2
);
315 // (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*16]
316 __m256i mul_const
= _mm256_set1_epi32(4 + (3 << 8) + (2 << 16) + (1 << 24));
317 __m256i mul_add16_1
= _mm256_maddubs_epi16(mul_const
, in8_1
);
318 __m256i mul_add16_2
= _mm256_maddubs_epi16(mul_const
, in8_2
);
321 ss2
= _mm256_add_epi32(ss2
, _mm256_slli_epi32(ss1
, 6));
323 // [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*16]
324 __m256i add16
= _mm256_hadds_epi16(add16_1
, add16_2
);
326 // [t1[0], t1[1], ...] -> [t1[0]*60 + t1[1]*56, ...] [int32*8]
327 __m256i mul32
= _mm256_madd_epi16(add16
, mul_t1
);
329 // [sum(t1[0]..t1[15]), X, X, X, X, X, X, X] [int32*8]
330 __m256i sum_add32
= _mm256_add_epi16(add16_1
, add16_2
);
331 sum_add32
= _mm256_add_epi16(sum_add32
, _mm256_permute4x64_epi64(sum_add32
, 2 + (3 << 2) + (0 << 4) + (1 << 6)));
332 sum_add32
= _mm256_add_epi16(sum_add32
, _mm256_slli_si256(sum_add32
, 2));
333 sum_add32
= _mm256_add_epi16(sum_add32
, _mm256_slli_si256(sum_add32
, 4));
334 sum_add32
= _mm256_add_epi16(sum_add32
, _mm256_slli_si256(sum_add32
, 8));
335 sum_add32
= _mm256_srai_epi32(sum_add32
, 16);
336 sum_add32
= _mm256_shuffle_epi32(sum_add32
, 3);
338 // s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7] + t1[8] + t1[9] + t1[10] + t1[11] + t1[12] + t1[13] + t1[14] + t1[15]
339 ss1
= _mm256_add_epi32(ss1
, sum_add32
);
341 // [sum(t2[0]..t2[15]), X, X, X, X, X, X, X] [int32*8]
342 __m256i sum_mul_add32
= _mm256_add_epi16(mul_add16_1
, mul_add16_2
);
343 sum_mul_add32
= _mm256_add_epi16(sum_mul_add32
, _mm256_permute4x64_epi64(sum_mul_add32
, 2 + (3 << 2) + (0 << 4) + (1 << 6)));
344 sum_mul_add32
= _mm256_add_epi16(sum_mul_add32
, _mm256_slli_si256(sum_mul_add32
, 2));
345 sum_mul_add32
= _mm256_add_epi16(sum_mul_add32
, _mm256_slli_si256(sum_mul_add32
, 4));
346 sum_mul_add32
= _mm256_add_epi16(sum_mul_add32
, _mm256_slli_si256(sum_mul_add32
, 8));
347 sum_mul_add32
= _mm256_srai_epi32(sum_mul_add32
, 16);
348 sum_mul_add32
= _mm256_shuffle_epi32(sum_mul_add32
, 3);
350 // s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7] + t2[8] + t2[9] + t2[10] + t2[11] + t2[12] + t2[13] + t2[14] + t2[15]
351 ss2
= _mm256_add_epi32(ss2
, sum_mul_add32
);
353 // [sum(mul32), X, X, X, X, X, X, X] [int32*8]
354 mul32
= _mm256_add_epi32(mul32
, _mm256_permute2x128_si256(mul32
, mul32
, 1));
355 mul32
= _mm256_add_epi32(mul32
, _mm256_srli_si256(mul32
, 4));
356 mul32
= _mm256_add_epi32(mul32
, _mm256_srli_si256(mul32
, 8));
358 // s2 += 60*t1[0] + 56*t1[1] + 52*t1[2] + 48*t1[3] + 44*t1[4] + 40*t1[5] + 36*t1[6] + 32*t1[7] + 28*t1[8] + 24*t1[9] + 20*t1[10] + 16*t1[11] + 12*t1[12] + 8*t1[13] + 4*t1[14]
359 ss2
= _mm256_add_epi32(ss2
, mul32
);
362 // s1 += 64*CHAR_OFFSET
363 __m256i char_offset_multiplier
= _mm256_set1_epi32(64 * CHAR_OFFSET
);
364 ss1
= _mm256_add_epi32(ss1
, char_offset_multiplier
);
366 // s2 += 2080*CHAR_OFFSET
367 char_offset_multiplier
= _mm256_set1_epi32(2080 * CHAR_OFFSET
);
368 ss2
= _mm256_add_epi32(ss2
, char_offset_multiplier
);
372 _mm256_store_si256((__m256i_u
*)x
, ss1
);
374 _mm256_store_si256((__m256i_u
*)x
, ss2
);
380 __attribute__ ((target("default"))) static int32
get_checksum1_avx2_64(schar
* buf
, int32 len
, int32 i
, uint32
* ps1
, uint32
* ps2
)
385 __attribute__ ((target("default"))) static int32
get_checksum1_sse2_32(schar
* buf
, int32 len
, int32 i
, uint32
* ps1
, uint32
* ps2
)
390 static inline int32
get_checksum1_default_1(schar
* buf
, int32 len
, int32 i
, uint32
* ps1
, uint32
* ps2
)
394 for (; i
< (len
-4); i
+=4) {
395 s2
+= 4*(s1
+ buf
[i
]) + 3*buf
[i
+1] + 2*buf
[i
+2] + buf
[i
+3] + 10*CHAR_OFFSET
;
396 s1
+= (buf
[i
+0] + buf
[i
+1] + buf
[i
+2] + buf
[i
+3] + 4*CHAR_OFFSET
);
398 for (; i
< len
; i
++) {
399 s1
+= (buf
[i
]+CHAR_OFFSET
); s2
+= s1
;
408 uint32
get_checksum1(char *buf1
, int32 len
)
414 // multiples of 64 bytes using AVX2 (if available)
415 i
= get_checksum1_avx2_64((schar
*)buf1
, len
, i
, &s1
, &s2
);
417 // multiples of 32 bytes using SSE2/SSSE3 (if available)
418 i
= get_checksum1_sse2_32((schar
*)buf1
, len
, i
, &s1
, &s2
);
421 i
= get_checksum1_default_1((schar
*)buf1
, len
, i
, &s1
, &s2
);
423 return (s1
& 0xffff) + (s2
<< 16);
428 #endif /* HAVE_SIMD */
429 #endif /* __cplusplus */
430 #endif /* __x86_64__ */