1 /* SPDX-License-Identifier: BSD-2-Clause */
4 * xxHash - Extremely Fast Hash algorithm
5 * Copyright (C) 2012-2016, Yann Collet.
7 * You can contact the author at:
8 * - xxHash homepage: http://cyan4973.github.io/xxHash/
9 * - xxHash source repository: https://github.com/Cyan4973/xxHash
12 #include <arch/byteorder.h>
17 /*-*************************************
19 **************************************/
20 #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
21 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
23 /*-*************************************
25 **************************************/
26 static const uint32_t PRIME32_1
= 2654435761U;
27 static const uint32_t PRIME32_2
= 2246822519U;
28 static const uint32_t PRIME32_3
= 3266489917U;
29 static const uint32_t PRIME32_4
= 668265263U;
30 static const uint32_t PRIME32_5
= 374761393U;
32 static const uint64_t PRIME64_1
= 11400714785074694791ULL;
33 static const uint64_t PRIME64_2
= 14029467366897019727ULL;
34 static const uint64_t PRIME64_3
= 1609587929392839161ULL;
35 static const uint64_t PRIME64_4
= 9650029242287828579ULL;
36 static const uint64_t PRIME64_5
= 2870177450012600261ULL;
38 /*-**************************
40 ***************************/
41 void xxh32_copy_state(struct xxh32_state
*dst
, const struct xxh32_state
*src
)
43 memcpy(dst
, src
, sizeof(*dst
));
46 void xxh64_copy_state(struct xxh64_state
*dst
, const struct xxh64_state
*src
)
48 memcpy(dst
, src
, sizeof(*dst
));
51 static uint32_t xxh_get_unaligned_le32(const void *p
)
53 const uint32_t *p32
= (const uint32_t *)p
;
57 static uint64_t xxh_get_unaligned_le64(const void *p
)
59 const uint64_t *p64
= (const uint64_t *)p
;
63 /*-***************************
64 * Simple Hash Functions
65 ****************************/
66 static uint32_t xxh32_round(uint32_t seed
, const uint32_t input
)
68 seed
+= input
* PRIME32_2
;
69 seed
= xxh_rotl32(seed
, 13);
74 uint32_t xxh32(const void *input
, const size_t len
, const uint32_t seed
)
76 const uint8_t *p
= (const uint8_t *)input
;
77 const uint8_t *b_end
= p
+ len
;
81 const uint8_t *const limit
= b_end
- 16;
82 uint32_t v1
= seed
+ PRIME32_1
+ PRIME32_2
;
83 uint32_t v2
= seed
+ PRIME32_2
;
84 uint32_t v3
= seed
+ 0;
85 uint32_t v4
= seed
- PRIME32_1
;
88 v1
= xxh32_round(v1
, xxh_get_unaligned_le32(p
));
90 v2
= xxh32_round(v2
, xxh_get_unaligned_le32(p
));
92 v3
= xxh32_round(v3
, xxh_get_unaligned_le32(p
));
94 v4
= xxh32_round(v4
, xxh_get_unaligned_le32(p
));
98 h32
= xxh_rotl32(v1
, 1) + xxh_rotl32(v2
, 7) +
99 xxh_rotl32(v3
, 12) + xxh_rotl32(v4
, 18);
101 h32
= seed
+ PRIME32_5
;
104 h32
+= (uint32_t)len
;
106 while (p
+ 4 <= b_end
) {
107 h32
+= xxh_get_unaligned_le32(p
) * PRIME32_3
;
108 h32
= xxh_rotl32(h32
, 17) * PRIME32_4
;
113 h32
+= (*p
) * PRIME32_5
;
114 h32
= xxh_rotl32(h32
, 11) * PRIME32_1
;
127 static uint64_t xxh64_round(uint64_t acc
, const uint64_t input
)
129 acc
+= input
* PRIME64_2
;
130 acc
= xxh_rotl64(acc
, 31);
135 static uint64_t xxh64_merge_round(uint64_t acc
, uint64_t val
)
137 val
= xxh64_round(0, val
);
139 acc
= acc
* PRIME64_1
+ PRIME64_4
;
143 uint64_t xxh64(const void *input
, const size_t len
, const uint64_t seed
)
145 const uint8_t *p
= (const uint8_t *)input
;
146 const uint8_t *const b_end
= p
+ len
;
150 const uint8_t *const limit
= b_end
- 32;
151 uint64_t v1
= seed
+ PRIME64_1
+ PRIME64_2
;
152 uint64_t v2
= seed
+ PRIME64_2
;
153 uint64_t v3
= seed
+ 0;
154 uint64_t v4
= seed
- PRIME64_1
;
157 v1
= xxh64_round(v1
, xxh_get_unaligned_le64(p
));
159 v2
= xxh64_round(v2
, xxh_get_unaligned_le64(p
));
161 v3
= xxh64_round(v3
, xxh_get_unaligned_le64(p
));
163 v4
= xxh64_round(v4
, xxh_get_unaligned_le64(p
));
165 } while (p
<= limit
);
167 h64
= xxh_rotl64(v1
, 1) + xxh_rotl64(v2
, 7) +
168 xxh_rotl64(v3
, 12) + xxh_rotl64(v4
, 18);
169 h64
= xxh64_merge_round(h64
, v1
);
170 h64
= xxh64_merge_round(h64
, v2
);
171 h64
= xxh64_merge_round(h64
, v3
);
172 h64
= xxh64_merge_round(h64
, v4
);
175 h64
= seed
+ PRIME64_5
;
178 h64
+= (uint64_t)len
;
180 while (p
+ 8 <= b_end
) {
181 const uint64_t k1
= xxh64_round(0, xxh_get_unaligned_le64(p
));
184 h64
= xxh_rotl64(h64
, 27) * PRIME64_1
+ PRIME64_4
;
188 if (p
+ 4 <= b_end
) {
189 h64
^= (uint64_t)(xxh_get_unaligned_le32(p
)) * PRIME64_1
;
190 h64
= xxh_rotl64(h64
, 23) * PRIME64_2
+ PRIME64_3
;
195 h64
^= (*p
) * PRIME64_5
;
196 h64
= xxh_rotl64(h64
, 11) * PRIME64_1
;
209 /*-**************************************************
210 * Advanced Hash Functions
211 ***************************************************/
212 void xxh32_reset(struct xxh32_state
*statePtr
, const uint32_t seed
)
214 /* use a local state for memcpy() to avoid strict-aliasing warnings */
215 struct xxh32_state state
;
217 memset(&state
, 0, sizeof(state
));
218 state
.v1
= seed
+ PRIME32_1
+ PRIME32_2
;
219 state
.v2
= seed
+ PRIME32_2
;
221 state
.v4
= seed
- PRIME32_1
;
222 memcpy(statePtr
, &state
, sizeof(state
));
225 void xxh64_reset(struct xxh64_state
*statePtr
, const uint64_t seed
)
227 /* use a local state for memcpy() to avoid strict-aliasing warnings */
228 struct xxh64_state state
;
230 memset(&state
, 0, sizeof(state
));
231 state
.v1
= seed
+ PRIME64_1
+ PRIME64_2
;
232 state
.v2
= seed
+ PRIME64_2
;
234 state
.v4
= seed
- PRIME64_1
;
235 memcpy(statePtr
, &state
, sizeof(state
));
238 int xxh32_update(struct xxh32_state
*state
, const void *input
, const size_t len
)
240 const uint8_t *p
= (const uint8_t *)input
;
241 const uint8_t *const b_end
= p
+ len
;
246 state
->total_len_32
+= (uint32_t)len
;
247 state
->large_len
|= (len
>= 16) | (state
->total_len_32
>= 16);
249 if (state
->memsize
+ len
< 16) { /* fill in tmp buffer */
250 memcpy((uint8_t *)(state
->mem32
) + state
->memsize
, input
, len
);
251 state
->memsize
+= (uint32_t)len
;
255 if (state
->memsize
) { /* some data left from previous update */
256 const uint32_t *p32
= state
->mem32
;
258 memcpy((uint8_t *)(state
->mem32
) + state
->memsize
, input
,
259 16 - state
->memsize
);
261 state
->v1
= xxh32_round(state
->v1
, xxh_get_unaligned_le32(p32
));
263 state
->v2
= xxh32_round(state
->v2
, xxh_get_unaligned_le32(p32
));
265 state
->v3
= xxh32_round(state
->v3
, xxh_get_unaligned_le32(p32
));
267 state
->v4
= xxh32_round(state
->v4
, xxh_get_unaligned_le32(p32
));
270 p
+= 16-state
->memsize
;
274 if (p
<= b_end
- 16) {
275 const uint8_t *const limit
= b_end
- 16;
276 uint32_t v1
= state
->v1
;
277 uint32_t v2
= state
->v2
;
278 uint32_t v3
= state
->v3
;
279 uint32_t v4
= state
->v4
;
282 v1
= xxh32_round(v1
, xxh_get_unaligned_le32(p
));
284 v2
= xxh32_round(v2
, xxh_get_unaligned_le32(p
));
286 v3
= xxh32_round(v3
, xxh_get_unaligned_le32(p
));
288 v4
= xxh32_round(v4
, xxh_get_unaligned_le32(p
));
290 } while (p
<= limit
);
299 memcpy(state
->mem32
, p
, (size_t)(b_end
-p
));
300 state
->memsize
= (uint32_t)(b_end
-p
);
306 uint32_t xxh32_digest(const struct xxh32_state
*state
)
308 const uint8_t *p
= (const uint8_t *)state
->mem32
;
309 const uint8_t *const b_end
= (const uint8_t *)(state
->mem32
) +
313 if (state
->large_len
) {
314 h32
= xxh_rotl32(state
->v1
, 1) + xxh_rotl32(state
->v2
, 7) +
315 xxh_rotl32(state
->v3
, 12) + xxh_rotl32(state
->v4
, 18);
317 h32
= state
->v3
/* == seed */ + PRIME32_5
;
320 h32
+= state
->total_len_32
;
322 while (p
+ 4 <= b_end
) {
323 h32
+= xxh_get_unaligned_le32(p
) * PRIME32_3
;
324 h32
= xxh_rotl32(h32
, 17) * PRIME32_4
;
329 h32
+= (*p
) * PRIME32_5
;
330 h32
= xxh_rotl32(h32
, 11) * PRIME32_1
;
343 int xxh64_update(struct xxh64_state
*state
, const void *input
, const size_t len
)
345 const uint8_t *p
= (const uint8_t *)input
;
346 const uint8_t *const b_end
= p
+ len
;
351 state
->total_len
+= len
;
353 if (state
->memsize
+ len
< 32) { /* fill in tmp buffer */
354 memcpy(((uint8_t *)state
->mem64
) + state
->memsize
, input
, len
);
355 state
->memsize
+= (uint32_t)len
;
359 if (state
->memsize
) { /* tmp buffer is full */
360 uint64_t *p64
= state
->mem64
;
362 memcpy(((uint8_t *)p64
) + state
->memsize
, input
,
363 32 - state
->memsize
);
365 state
->v1
= xxh64_round(state
->v1
, xxh_get_unaligned_le64(p64
));
367 state
->v2
= xxh64_round(state
->v2
, xxh_get_unaligned_le64(p64
));
369 state
->v3
= xxh64_round(state
->v3
, xxh_get_unaligned_le64(p64
));
371 state
->v4
= xxh64_round(state
->v4
, xxh_get_unaligned_le64(p64
));
373 p
+= 32 - state
->memsize
;
377 if (p
+ 32 <= b_end
) {
378 const uint8_t *const limit
= b_end
- 32;
379 uint64_t v1
= state
->v1
;
380 uint64_t v2
= state
->v2
;
381 uint64_t v3
= state
->v3
;
382 uint64_t v4
= state
->v4
;
385 v1
= xxh64_round(v1
, xxh_get_unaligned_le64(p
));
387 v2
= xxh64_round(v2
, xxh_get_unaligned_le64(p
));
389 v3
= xxh64_round(v3
, xxh_get_unaligned_le64(p
));
391 v4
= xxh64_round(v4
, xxh_get_unaligned_le64(p
));
393 } while (p
<= limit
);
402 memcpy(state
->mem64
, p
, (size_t)(b_end
-p
));
403 state
->memsize
= (uint32_t)(b_end
- p
);
409 uint64_t xxh64_digest(const struct xxh64_state
*state
)
411 const uint8_t *p
= (const uint8_t *)state
->mem64
;
412 const uint8_t *const b_end
= (const uint8_t *)state
->mem64
+
416 if (state
->total_len
>= 32) {
417 const uint64_t v1
= state
->v1
;
418 const uint64_t v2
= state
->v2
;
419 const uint64_t v3
= state
->v3
;
420 const uint64_t v4
= state
->v4
;
422 h64
= xxh_rotl64(v1
, 1) + xxh_rotl64(v2
, 7) +
423 xxh_rotl64(v3
, 12) + xxh_rotl64(v4
, 18);
424 h64
= xxh64_merge_round(h64
, v1
);
425 h64
= xxh64_merge_round(h64
, v2
);
426 h64
= xxh64_merge_round(h64
, v3
);
427 h64
= xxh64_merge_round(h64
, v4
);
429 h64
= state
->v3
+ PRIME64_5
;
432 h64
+= (uint64_t)state
->total_len
;
434 while (p
+ 8 <= b_end
) {
435 const uint64_t k1
= xxh64_round(0, xxh_get_unaligned_le64(p
));
438 h64
= xxh_rotl64(h64
, 27) * PRIME64_1
+ PRIME64_4
;
442 if (p
+ 4 <= b_end
) {
443 h64
^= (uint64_t)(xxh_get_unaligned_le32(p
)) * PRIME64_1
;
444 h64
= xxh_rotl64(h64
, 23) * PRIME64_2
+ PRIME64_3
;
449 h64
^= (*p
) * PRIME64_5
;
450 h64
= xxh_rotl64(h64
, 11) * PRIME64_1
;