cpu/intel: Add socket types
[coreboot2.git] / src / lib / xxhash.c
blobc14fb968155884d7ed6333aed298c08e1d47cf73
1 /* SPDX-License-Identifier: BSD-2-Clause */
3 /*
4 * xxHash - Extremely Fast Hash algorithm
5 * Copyright (C) 2012-2016, Yann Collet.
7 * You can contact the author at:
8 * - xxHash homepage: http://cyan4973.github.io/xxHash/
9 * - xxHash source repository: https://github.com/Cyan4973/xxHash
12 #include <arch/byteorder.h>
13 #include <endian.h>
14 #include <string.h>
15 #include <xxhash.h>
17 /*-*************************************
18 * Macros
19 **************************************/
20 #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
21 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
23 /*-*************************************
24 * Constants
25 **************************************/
26 static const uint32_t PRIME32_1 = 2654435761U;
27 static const uint32_t PRIME32_2 = 2246822519U;
28 static const uint32_t PRIME32_3 = 3266489917U;
29 static const uint32_t PRIME32_4 = 668265263U;
30 static const uint32_t PRIME32_5 = 374761393U;
32 static const uint64_t PRIME64_1 = 11400714785074694791ULL;
33 static const uint64_t PRIME64_2 = 14029467366897019727ULL;
34 static const uint64_t PRIME64_3 = 1609587929392839161ULL;
35 static const uint64_t PRIME64_4 = 9650029242287828579ULL;
36 static const uint64_t PRIME64_5 = 2870177450012600261ULL;
38 /*-**************************
39 * Utils
40 ***************************/
41 void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
43 memcpy(dst, src, sizeof(*dst));
46 void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
48 memcpy(dst, src, sizeof(*dst));
51 static uint32_t xxh_get_unaligned_le32(const void *p)
53 const uint32_t *p32 = (const uint32_t *)p;
54 return le32toh(*p32);
57 static uint64_t xxh_get_unaligned_le64(const void *p)
59 const uint64_t *p64 = (const uint64_t *)p;
60 return le64toh(*p64);
63 /*-***************************
64 * Simple Hash Functions
65 ****************************/
66 static uint32_t xxh32_round(uint32_t seed, const uint32_t input)
68 seed += input * PRIME32_2;
69 seed = xxh_rotl32(seed, 13);
70 seed *= PRIME32_1;
71 return seed;
74 uint32_t xxh32(const void *input, const size_t len, const uint32_t seed)
76 const uint8_t *p = (const uint8_t *)input;
77 const uint8_t *b_end = p + len;
78 uint32_t h32;
80 if (len >= 16) {
81 const uint8_t *const limit = b_end - 16;
82 uint32_t v1 = seed + PRIME32_1 + PRIME32_2;
83 uint32_t v2 = seed + PRIME32_2;
84 uint32_t v3 = seed + 0;
85 uint32_t v4 = seed - PRIME32_1;
87 do {
88 v1 = xxh32_round(v1, xxh_get_unaligned_le32(p));
89 p += 4;
90 v2 = xxh32_round(v2, xxh_get_unaligned_le32(p));
91 p += 4;
92 v3 = xxh32_round(v3, xxh_get_unaligned_le32(p));
93 p += 4;
94 v4 = xxh32_round(v4, xxh_get_unaligned_le32(p));
95 p += 4;
96 } while (p <= limit);
98 h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +
99 xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);
100 } else {
101 h32 = seed + PRIME32_5;
104 h32 += (uint32_t)len;
106 while (p + 4 <= b_end) {
107 h32 += xxh_get_unaligned_le32(p) * PRIME32_3;
108 h32 = xxh_rotl32(h32, 17) * PRIME32_4;
109 p += 4;
112 while (p < b_end) {
113 h32 += (*p) * PRIME32_5;
114 h32 = xxh_rotl32(h32, 11) * PRIME32_1;
115 p++;
118 h32 ^= h32 >> 15;
119 h32 *= PRIME32_2;
120 h32 ^= h32 >> 13;
121 h32 *= PRIME32_3;
122 h32 ^= h32 >> 16;
124 return h32;
127 static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
129 acc += input * PRIME64_2;
130 acc = xxh_rotl64(acc, 31);
131 acc *= PRIME64_1;
132 return acc;
135 static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
137 val = xxh64_round(0, val);
138 acc ^= val;
139 acc = acc * PRIME64_1 + PRIME64_4;
140 return acc;
143 uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
145 const uint8_t *p = (const uint8_t *)input;
146 const uint8_t *const b_end = p + len;
147 uint64_t h64;
149 if (len >= 32) {
150 const uint8_t *const limit = b_end - 32;
151 uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
152 uint64_t v2 = seed + PRIME64_2;
153 uint64_t v3 = seed + 0;
154 uint64_t v4 = seed - PRIME64_1;
156 do {
157 v1 = xxh64_round(v1, xxh_get_unaligned_le64(p));
158 p += 8;
159 v2 = xxh64_round(v2, xxh_get_unaligned_le64(p));
160 p += 8;
161 v3 = xxh64_round(v3, xxh_get_unaligned_le64(p));
162 p += 8;
163 v4 = xxh64_round(v4, xxh_get_unaligned_le64(p));
164 p += 8;
165 } while (p <= limit);
167 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
168 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
169 h64 = xxh64_merge_round(h64, v1);
170 h64 = xxh64_merge_round(h64, v2);
171 h64 = xxh64_merge_round(h64, v3);
172 h64 = xxh64_merge_round(h64, v4);
174 } else {
175 h64 = seed + PRIME64_5;
178 h64 += (uint64_t)len;
180 while (p + 8 <= b_end) {
181 const uint64_t k1 = xxh64_round(0, xxh_get_unaligned_le64(p));
183 h64 ^= k1;
184 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
185 p += 8;
188 if (p + 4 <= b_end) {
189 h64 ^= (uint64_t)(xxh_get_unaligned_le32(p)) * PRIME64_1;
190 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
191 p += 4;
194 while (p < b_end) {
195 h64 ^= (*p) * PRIME64_5;
196 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
197 p++;
200 h64 ^= h64 >> 33;
201 h64 *= PRIME64_2;
202 h64 ^= h64 >> 29;
203 h64 *= PRIME64_3;
204 h64 ^= h64 >> 32;
206 return h64;
209 /*-**************************************************
210 * Advanced Hash Functions
211 ***************************************************/
212 void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
214 /* use a local state for memcpy() to avoid strict-aliasing warnings */
215 struct xxh32_state state;
217 memset(&state, 0, sizeof(state));
218 state.v1 = seed + PRIME32_1 + PRIME32_2;
219 state.v2 = seed + PRIME32_2;
220 state.v3 = seed + 0;
221 state.v4 = seed - PRIME32_1;
222 memcpy(statePtr, &state, sizeof(state));
225 void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
227 /* use a local state for memcpy() to avoid strict-aliasing warnings */
228 struct xxh64_state state;
230 memset(&state, 0, sizeof(state));
231 state.v1 = seed + PRIME64_1 + PRIME64_2;
232 state.v2 = seed + PRIME64_2;
233 state.v3 = seed + 0;
234 state.v4 = seed - PRIME64_1;
235 memcpy(statePtr, &state, sizeof(state));
238 int xxh32_update(struct xxh32_state *state, const void *input, const size_t len)
240 const uint8_t *p = (const uint8_t *)input;
241 const uint8_t *const b_end = p + len;
243 if (input == NULL)
244 return -1;
246 state->total_len_32 += (uint32_t)len;
247 state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
249 if (state->memsize + len < 16) { /* fill in tmp buffer */
250 memcpy((uint8_t *)(state->mem32) + state->memsize, input, len);
251 state->memsize += (uint32_t)len;
252 return 0;
255 if (state->memsize) { /* some data left from previous update */
256 const uint32_t *p32 = state->mem32;
258 memcpy((uint8_t *)(state->mem32) + state->memsize, input,
259 16 - state->memsize);
261 state->v1 = xxh32_round(state->v1, xxh_get_unaligned_le32(p32));
262 p32++;
263 state->v2 = xxh32_round(state->v2, xxh_get_unaligned_le32(p32));
264 p32++;
265 state->v3 = xxh32_round(state->v3, xxh_get_unaligned_le32(p32));
266 p32++;
267 state->v4 = xxh32_round(state->v4, xxh_get_unaligned_le32(p32));
268 p32++;
270 p += 16-state->memsize;
271 state->memsize = 0;
274 if (p <= b_end - 16) {
275 const uint8_t *const limit = b_end - 16;
276 uint32_t v1 = state->v1;
277 uint32_t v2 = state->v2;
278 uint32_t v3 = state->v3;
279 uint32_t v4 = state->v4;
281 do {
282 v1 = xxh32_round(v1, xxh_get_unaligned_le32(p));
283 p += 4;
284 v2 = xxh32_round(v2, xxh_get_unaligned_le32(p));
285 p += 4;
286 v3 = xxh32_round(v3, xxh_get_unaligned_le32(p));
287 p += 4;
288 v4 = xxh32_round(v4, xxh_get_unaligned_le32(p));
289 p += 4;
290 } while (p <= limit);
292 state->v1 = v1;
293 state->v2 = v2;
294 state->v3 = v3;
295 state->v4 = v4;
298 if (p < b_end) {
299 memcpy(state->mem32, p, (size_t)(b_end-p));
300 state->memsize = (uint32_t)(b_end-p);
303 return 0;
306 uint32_t xxh32_digest(const struct xxh32_state *state)
308 const uint8_t *p = (const uint8_t *)state->mem32;
309 const uint8_t *const b_end = (const uint8_t *)(state->mem32) +
310 state->memsize;
311 uint32_t h32;
313 if (state->large_len) {
314 h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) +
315 xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18);
316 } else {
317 h32 = state->v3 /* == seed */ + PRIME32_5;
320 h32 += state->total_len_32;
322 while (p + 4 <= b_end) {
323 h32 += xxh_get_unaligned_le32(p) * PRIME32_3;
324 h32 = xxh_rotl32(h32, 17) * PRIME32_4;
325 p += 4;
328 while (p < b_end) {
329 h32 += (*p) * PRIME32_5;
330 h32 = xxh_rotl32(h32, 11) * PRIME32_1;
331 p++;
334 h32 ^= h32 >> 15;
335 h32 *= PRIME32_2;
336 h32 ^= h32 >> 13;
337 h32 *= PRIME32_3;
338 h32 ^= h32 >> 16;
340 return h32;
343 int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
345 const uint8_t *p = (const uint8_t *)input;
346 const uint8_t *const b_end = p + len;
348 if (input == NULL)
349 return -1;
351 state->total_len += len;
353 if (state->memsize + len < 32) { /* fill in tmp buffer */
354 memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
355 state->memsize += (uint32_t)len;
356 return 0;
359 if (state->memsize) { /* tmp buffer is full */
360 uint64_t *p64 = state->mem64;
362 memcpy(((uint8_t *)p64) + state->memsize, input,
363 32 - state->memsize);
365 state->v1 = xxh64_round(state->v1, xxh_get_unaligned_le64(p64));
366 p64++;
367 state->v2 = xxh64_round(state->v2, xxh_get_unaligned_le64(p64));
368 p64++;
369 state->v3 = xxh64_round(state->v3, xxh_get_unaligned_le64(p64));
370 p64++;
371 state->v4 = xxh64_round(state->v4, xxh_get_unaligned_le64(p64));
373 p += 32 - state->memsize;
374 state->memsize = 0;
377 if (p + 32 <= b_end) {
378 const uint8_t *const limit = b_end - 32;
379 uint64_t v1 = state->v1;
380 uint64_t v2 = state->v2;
381 uint64_t v3 = state->v3;
382 uint64_t v4 = state->v4;
384 do {
385 v1 = xxh64_round(v1, xxh_get_unaligned_le64(p));
386 p += 8;
387 v2 = xxh64_round(v2, xxh_get_unaligned_le64(p));
388 p += 8;
389 v3 = xxh64_round(v3, xxh_get_unaligned_le64(p));
390 p += 8;
391 v4 = xxh64_round(v4, xxh_get_unaligned_le64(p));
392 p += 8;
393 } while (p <= limit);
395 state->v1 = v1;
396 state->v2 = v2;
397 state->v3 = v3;
398 state->v4 = v4;
401 if (p < b_end) {
402 memcpy(state->mem64, p, (size_t)(b_end-p));
403 state->memsize = (uint32_t)(b_end - p);
406 return 0;
409 uint64_t xxh64_digest(const struct xxh64_state *state)
411 const uint8_t *p = (const uint8_t *)state->mem64;
412 const uint8_t *const b_end = (const uint8_t *)state->mem64 +
413 state->memsize;
414 uint64_t h64;
416 if (state->total_len >= 32) {
417 const uint64_t v1 = state->v1;
418 const uint64_t v2 = state->v2;
419 const uint64_t v3 = state->v3;
420 const uint64_t v4 = state->v4;
422 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
423 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
424 h64 = xxh64_merge_round(h64, v1);
425 h64 = xxh64_merge_round(h64, v2);
426 h64 = xxh64_merge_round(h64, v3);
427 h64 = xxh64_merge_round(h64, v4);
428 } else {
429 h64 = state->v3 + PRIME64_5;
432 h64 += (uint64_t)state->total_len;
434 while (p + 8 <= b_end) {
435 const uint64_t k1 = xxh64_round(0, xxh_get_unaligned_le64(p));
437 h64 ^= k1;
438 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
439 p += 8;
442 if (p + 4 <= b_end) {
443 h64 ^= (uint64_t)(xxh_get_unaligned_le32(p)) * PRIME64_1;
444 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
445 p += 4;
448 while (p < b_end) {
449 h64 ^= (*p) * PRIME64_5;
450 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
451 p++;
454 h64 ^= h64 >> 33;
455 h64 *= PRIME64_2;
456 h64 ^= h64 >> 29;
457 h64 *= PRIME64_3;
458 h64 ^= h64 >> 32;
460 return h64;