2 -------------------------------------------------------------------------------
3 lookup3.c, by Bob Jenkins, May 2006, Public Domain.
5 These are functions for producing 32-bit hashes for hash table lookup.
6 hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
7 are externally useful functions. Routines to test the hash are included
8 if SELF_TEST is defined. You can use this free for any purpose. It's in
9 the public domain. It has no warranty.
11 You probably want to use hashlittle(). hashlittle() and hashbig()
12 hash byte arrays. hashlittle() is is faster than hashbig() on
13 little-endian machines. Intel and AMD are little-endian machines.
14 On second thought, you probably want hashlittle2(), which is identical to
15 hashlittle() except it returns two 32-bit hashes for the price of one.
16 You could implement hashbig2() if you wanted but I haven't bothered here.
18 If you want to find a hash of, say, exactly 7 integers, do
19 a = i1; b = i2; c = i3;
21 a += i4; b += i5; c += i6;
25 then use c as the hash value. If you have a variable length array of
26 4-byte integers to hash, use hashword(). If you have a byte array (like
27 a character string), use hashlittle(). If you have several byte arrays, or
28 a mix of things, see the comments above hashlittle().
30 Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
31 then mix those integers. This is fast (you can do a lot more thorough
32 mixing with 12*3 instructions on 3 integers than you can with 3 instructions
33 on 1 byte), but shoehorning those bytes into integers efficiently is messy.
34 -------------------------------------------------------------------------------
43 #include <jansson_private_config.h>
47 #include <stdint.h> /* defines uint32_t etc */
50 #ifdef HAVE_SYS_PARAM_H
51 #include <sys/param.h> /* attempt to define endianness */
55 # include <endian.h> /* attempt to define endianness */
59 * My best guess at if you are big-endian or little-endian. This may
62 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
63 __BYTE_ORDER == __LITTLE_ENDIAN) || \
64 (defined(i386) || defined(__i386__) || defined(__i486__) || \
65 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
66 # define HASH_LITTLE_ENDIAN 1
67 # define HASH_BIG_ENDIAN 0
68 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
69 __BYTE_ORDER == __BIG_ENDIAN) || \
70 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
71 # define HASH_LITTLE_ENDIAN 0
72 # define HASH_BIG_ENDIAN 1
74 # define HASH_LITTLE_ENDIAN 0
75 # define HASH_BIG_ENDIAN 0
78 #define hashsize(n) ((uint32_t)1<<(n))
79 #define hashmask(n) (hashsize(n)-1)
80 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
83 -------------------------------------------------------------------------------
84 mix -- mix 3 32-bit values reversibly.
86 This is reversible, so any information in (a,b,c) before mix() is
87 still in (a,b,c) after mix().
89 If four pairs of (a,b,c) inputs are run through mix(), or through
90 mix() in reverse, there are at least 32 bits of the output that
91 are sometimes the same for one pair and different for another pair.
93 * pairs that differed by one bit, by two bits, in any combination
94 of top bits of (a,b,c), or in any combination of bottom bits of
96 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
97 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
98 is commonly produced by subtraction) look like a single 1-bit
100 * the base values were pseudorandom, all zero but one bit set, or
101 all zero plus a counter that starts at zero.
103 Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
108 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
109 for "differ" defined as + with a one-bit base and a two-bit delta. I
110 used http://burtleburtle.net/bob/hash/avalanche.html to choose
111 the operations, constants, and arrangements of the variables.
113 This does not achieve avalanche. There are input bits of (a,b,c)
114 that fail to affect some output bits of (a,b,c), especially of a. The
115 most thoroughly mixed value is c, but it doesn't really even achieve
118 This allows some parallelism. Read-after-writes are good at doubling
119 the number of bits affected, so the goal of mixing pulls in the opposite
120 direction as the goal of parallelism. I did what I could. Rotates
121 seem to cost as much as shifts on every machine I could lay my hands
122 on, and rotates are much kinder to the top and bottom bits, so I used
124 -------------------------------------------------------------------------------
128 a -= c; a ^= rot(c, 4); c += b; \
129 b -= a; b ^= rot(a, 6); a += c; \
130 c -= b; c ^= rot(b, 8); b += a; \
131 a -= c; a ^= rot(c,16); c += b; \
132 b -= a; b ^= rot(a,19); a += c; \
133 c -= b; c ^= rot(b, 4); b += a; \
137 -------------------------------------------------------------------------------
138 final -- final mixing of 3 32-bit values (a,b,c) into c
140 Pairs of (a,b,c) values differing in only a few bits will usually
141 produce values of c that look totally different. This was tested for
142 * pairs that differed by one bit, by two bits, in any combination
143 of top bits of (a,b,c), or in any combination of bottom bits of
145 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
146 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
147 is commonly produced by subtraction) look like a single 1-bit
149 * the base values were pseudorandom, all zero but one bit set, or
150 all zero plus a counter that starts at zero.
152 These constants passed:
155 and these came close:
159 -------------------------------------------------------------------------------
161 #define final(a,b,c) \
163 c ^= b; c -= rot(b,14); \
164 a ^= c; a -= rot(c,11); \
165 b ^= a; b -= rot(a,25); \
166 c ^= b; c -= rot(b,16); \
167 a ^= c; a -= rot(c,4); \
168 b ^= a; b -= rot(a,14); \
169 c ^= b; c -= rot(b,24); \
173 -------------------------------------------------------------------------------
174 hashlittle() -- hash a variable-length key into a 32-bit value
175 k : the key (the unaligned variable-length array of bytes)
176 length : the length of the key, counting by bytes
177 initval : can be any 4-byte value
178 Returns a 32-bit value. Every bit of the key affects every bit of
179 the return value. Two keys differing by one or two bits will have
180 totally different hash values.
182 The best hash table sizes are powers of 2. There is no need to do
183 mod a prime (mod is sooo slow!). If you need less than 32 bits,
184 use a bitmask. For example, if you need only 10 bits, do
185 h = (h & hashmask(10));
186 In which case, the hash table should have hashsize(10) elements.
188 If you are hashing n strings (uint8_t **)k, do it like this:
189 for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
191 By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
192 code any way you wish, private, educational, or commercial. It's free.
194 Use for hash table lookup, or anything where one collision in 2^^32 is
195 acceptable. Do NOT use for cryptographic purposes.
196 -------------------------------------------------------------------------------
199 static uint32_t hashlittle(const void *key
, size_t length
, uint32_t initval
) {
200 uint32_t a
, b
, c
; /* internal state */
201 union { const void *ptr
; size_t i
; } u
; /* needed for Mac Powerbook G4 */
203 /* Set up the internal state */
204 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
207 if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x3) == 0)) {
208 const uint32_t *k
= (const uint32_t *)key
; /* read 32-bit chunks */
210 /* Detect Valgrind or AddressSanitizer */
212 # define NO_MASKING_TRICK 1
214 # if defined(__has_feature) /* Clang */
215 # if __has_feature(address_sanitizer) /* is ASAN enabled? */
216 # define NO_MASKING_TRICK 1
219 # if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
220 # define NO_MASKING_TRICK 1
225 #ifdef NO_MASKING_TRICK
229 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
230 while (length
> 12) {
239 /*----------------------------- handle the last (probably partial) block */
241 * "k[2]&0xffffff" actually reads beyond the end of the string, but
242 * then masks off the part it's not allowed to read. Because the
243 * string is aligned, the masked-off tail is in the same word as the
244 * rest of the string. Every machine with memory protection I've seen
245 * does it on word boundaries, so is OK with this. But VALGRIND will
246 * still catch it and complain. The masking trick does make the hash
247 * noticably faster for short strings (like English words).
249 #ifndef NO_MASKING_TRICK
258 c
+= k
[2] & 0xffffff;
277 b
+= k
[1] & 0xffffff;
292 a
+= k
[0] & 0xffffff;
301 return c
; /* zero length strings require no mixing */
304 #else /* make valgrind happy */
306 k8
= (const uint8_t *)k
;
314 c
+= ((uint32_t)k8
[10]) << 16; /* fall through */
316 c
+= ((uint32_t)k8
[9]) << 8; /* fall through */
318 c
+= k8
[8]; /* fall through */
324 b
+= ((uint32_t)k8
[6]) << 16; /* fall through */
326 b
+= ((uint32_t)k8
[5]) << 8; /* fall through */
328 b
+= k8
[4]; /* fall through */
333 a
+= ((uint32_t)k8
[2]) << 16; /* fall through */
335 a
+= ((uint32_t)k8
[1]) << 8; /* fall through */
343 #endif /* !valgrind */
345 } else if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x1) == 0)) {
346 const uint16_t *k
= (const uint16_t *)key
; /* read 16-bit chunks */
349 /*--------------- all but last block: aligned reads and different mixing */
350 while (length
> 12) {
351 a
+= k
[0] + (((uint32_t)k
[1]) << 16);
352 b
+= k
[2] + (((uint32_t)k
[3]) << 16);
353 c
+= k
[4] + (((uint32_t)k
[5]) << 16);
359 /*----------------------------- handle the last (probably partial) block */
360 k8
= (const uint8_t *)k
;
363 c
+= k
[4] + (((uint32_t)k
[5]) << 16);
364 b
+= k
[2] + (((uint32_t)k
[3]) << 16);
365 a
+= k
[0] + (((uint32_t)k
[1]) << 16);
368 c
+= ((uint32_t)k8
[10]) << 16; /* fall through */
371 b
+= k
[2] + (((uint32_t)k
[3]) << 16);
372 a
+= k
[0] + (((uint32_t)k
[1]) << 16);
375 c
+= k8
[8]; /* fall through */
377 b
+= k
[2] + (((uint32_t)k
[3]) << 16);
378 a
+= k
[0] + (((uint32_t)k
[1]) << 16);
381 b
+= ((uint32_t)k8
[6]) << 16; /* fall through */
384 a
+= k
[0] + (((uint32_t)k
[1]) << 16);
387 b
+= k8
[4]; /* fall through */
389 a
+= k
[0] + (((uint32_t)k
[1]) << 16);
392 a
+= ((uint32_t)k8
[2]) << 16; /* fall through */
400 return c
; /* zero length requires no mixing */
403 } else { /* need to read the key one byte at a time */
404 const uint8_t *k
= (const uint8_t *)key
;
406 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
407 while (length
> 12) {
409 a
+= ((uint32_t)k
[1]) << 8;
410 a
+= ((uint32_t)k
[2]) << 16;
411 a
+= ((uint32_t)k
[3]) << 24;
413 b
+= ((uint32_t)k
[5]) << 8;
414 b
+= ((uint32_t)k
[6]) << 16;
415 b
+= ((uint32_t)k
[7]) << 24;
417 c
+= ((uint32_t)k
[9]) << 8;
418 c
+= ((uint32_t)k
[10]) << 16;
419 c
+= ((uint32_t)k
[11]) << 24;
425 /*-------------------------------- last block: affect all 32 bits of (c) */
426 switch (length
) { /* all the case statements fall through */
428 c
+= ((uint32_t)k
[11]) << 24; /* fall through */
430 c
+= ((uint32_t)k
[10]) << 16; /* fall through */
432 c
+= ((uint32_t)k
[9]) << 8; /* fall through */
434 c
+= k
[8]; /* fall through */
436 b
+= ((uint32_t)k
[7]) << 24; /* fall through */
438 b
+= ((uint32_t)k
[6]) << 16; /* fall through */
440 b
+= ((uint32_t)k
[5]) << 8; /* fall through */
442 b
+= k
[4]; /* fall through */
444 a
+= ((uint32_t)k
[3]) << 24; /* fall through */
446 a
+= ((uint32_t)k
[2]) << 16; /* fall through */
448 a
+= ((uint32_t)k
[1]) << 8; /* fall through */