1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
10 #include "MurmurHash3.h"
12 //-----------------------------------------------------------------------------
13 // Platform-specific functions and macros
15 // Microsoft Visual Studio
19 #define FORCE_INLINE __forceinline
23 #define ROTL32(x,y) _rotl(x,y)
24 #define ROTL64(x,y) _rotl64(x,y)
26 #define BIG_CONSTANT(x) (x)
30 #else // defined(_MSC_VER)
32 #define FORCE_INLINE __attribute__((always_inline))
34 inline uint32_t rotl32 ( uint32_t x
, int8_t r
)
36 return (x
<< r
) | (x
>> (32 - r
));
39 inline uint64_t rotl64 ( uint64_t x
, int8_t r
)
41 return (x
<< r
) | (x
>> (64 - r
));
44 #define ROTL32(x,y) rotl32(x,y)
45 #define ROTL64(x,y) rotl64(x,y)
47 #define BIG_CONSTANT(x) (x##LLU)
49 #endif // !defined(_MSC_VER)
51 //-----------------------------------------------------------------------------
52 // Block read - if your platform needs to do endian-swapping or can only
53 // handle aligned reads, do the conversion here
55 FORCE_INLINE
uint32_t getblock ( const uint32_t * p
, int i
)
60 FORCE_INLINE
uint64_t getblock ( const uint64_t * p
, int i
)
65 //-----------------------------------------------------------------------------
66 // Finalization mix - force all bits of a hash block to avalanche
68 FORCE_INLINE
uint32_t fmix ( uint32_t h
)
81 FORCE_INLINE
uint64_t fmix ( uint64_t k
)
84 k
*= BIG_CONSTANT(0xff51afd7ed558ccd);
86 k
*= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
92 //-----------------------------------------------------------------------------
94 void MurmurHash3_x86_32 ( const void * key
, int len
,
95 uint32_t seed
, void * out
)
97 const uint8_t * data
= (const uint8_t*)key
;
98 const int nblocks
= len
/ 4;
102 uint32_t c1
= 0xcc9e2d51;
103 uint32_t c2
= 0x1b873593;
108 const uint32_t * blocks
= (const uint32_t *)(data
+ nblocks
*4);
110 for(int i
= -nblocks
; i
; i
++)
112 uint32_t k1
= getblock(blocks
,i
);
120 h1
= h1
*5+0xe6546b64;
126 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*4);
132 case 3: k1
^= tail
[2] << 16;
133 case 2: k1
^= tail
[1] << 8;
134 case 1: k1
^= tail
[0];
135 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
145 *(uint32_t*)out
= h1
;
148 //-----------------------------------------------------------------------------
150 void MurmurHash3_x86_128 ( const void * key
, const int len
,
151 uint32_t seed
, void * out
)
153 const uint8_t * data
= (const uint8_t*)key
;
154 const int nblocks
= len
/ 16;
161 uint32_t c1
= 0x239b961b;
162 uint32_t c2
= 0xab0e9789;
163 uint32_t c3
= 0x38b34ae5;
164 uint32_t c4
= 0xa1e38b93;
169 const uint32_t * blocks
= (const uint32_t *)(data
+ nblocks
*16);
171 for(int i
= -nblocks
; i
; i
++)
173 uint32_t k1
= getblock(blocks
,i
*4+0);
174 uint32_t k2
= getblock(blocks
,i
*4+1);
175 uint32_t k3
= getblock(blocks
,i
*4+2);
176 uint32_t k4
= getblock(blocks
,i
*4+3);
178 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
180 h1
= ROTL32(h1
,19); h1
+= h2
; h1
= h1
*5+0x561ccd1b;
182 k2
*= c2
; k2
= ROTL32(k2
,16); k2
*= c3
; h2
^= k2
;
184 h2
= ROTL32(h2
,17); h2
+= h3
; h2
= h2
*5+0x0bcaa747;
186 k3
*= c3
; k3
= ROTL32(k3
,17); k3
*= c4
; h3
^= k3
;
188 h3
= ROTL32(h3
,15); h3
+= h4
; h3
= h3
*5+0x96cd1c35;
190 k4
*= c4
; k4
= ROTL32(k4
,18); k4
*= c1
; h4
^= k4
;
192 h4
= ROTL32(h4
,13); h4
+= h1
; h4
= h4
*5+0x32ac3b17;
198 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
207 case 15: k4
^= tail
[14] << 16;
208 case 14: k4
^= tail
[13] << 8;
209 case 13: k4
^= tail
[12] << 0;
210 k4
*= c4
; k4
= ROTL32(k4
,18); k4
*= c1
; h4
^= k4
;
212 case 12: k3
^= tail
[11] << 24;
213 case 11: k3
^= tail
[10] << 16;
214 case 10: k3
^= tail
[ 9] << 8;
215 case 9: k3
^= tail
[ 8] << 0;
216 k3
*= c3
; k3
= ROTL32(k3
,17); k3
*= c4
; h3
^= k3
;
218 case 8: k2
^= tail
[ 7] << 24;
219 case 7: k2
^= tail
[ 6] << 16;
220 case 6: k2
^= tail
[ 5] << 8;
221 case 5: k2
^= tail
[ 4] << 0;
222 k2
*= c2
; k2
= ROTL32(k2
,16); k2
*= c3
; h2
^= k2
;
224 case 4: k1
^= tail
[ 3] << 24;
225 case 3: k1
^= tail
[ 2] << 16;
226 case 2: k1
^= tail
[ 1] << 8;
227 case 1: k1
^= tail
[ 0] << 0;
228 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
234 h1
^= len
; h2
^= len
; h3
^= len
; h4
^= len
;
236 h1
+= h2
; h1
+= h3
; h1
+= h4
;
237 h2
+= h1
; h3
+= h1
; h4
+= h1
;
244 h1
+= h2
; h1
+= h3
; h1
+= h4
;
245 h2
+= h1
; h3
+= h1
; h4
+= h1
;
247 ((uint32_t*)out
)[0] = h1
;
248 ((uint32_t*)out
)[1] = h2
;
249 ((uint32_t*)out
)[2] = h3
;
250 ((uint32_t*)out
)[3] = h4
;
253 //-----------------------------------------------------------------------------
255 void MurmurHash3_x64_128 ( const void * key
, const int len
,
256 const uint32_t seed
, void * out
)
258 const uint8_t * data
= (const uint8_t*)key
;
259 const int nblocks
= len
/ 16;
264 uint64_t c1
= BIG_CONSTANT(0x87c37b91114253d5);
265 uint64_t c2
= BIG_CONSTANT(0x4cf5ad432745937f);
270 const uint64_t * blocks
= (const uint64_t *)(data
);
272 for(int i
= 0; i
< nblocks
; i
++)
274 uint64_t k1
= getblock(blocks
,i
*2+0);
275 uint64_t k2
= getblock(blocks
,i
*2+1);
277 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
279 h1
= ROTL64(h1
,27); h1
+= h2
; h1
= h1
*5+0x52dce729;
281 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
283 h2
= ROTL64(h2
,31); h2
+= h1
; h2
= h2
*5+0x38495ab5;
289 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
296 case 15: k2
^= uint64_t(tail
[14]) << 48;
297 case 14: k2
^= uint64_t(tail
[13]) << 40;
298 case 13: k2
^= uint64_t(tail
[12]) << 32;
299 case 12: k2
^= uint64_t(tail
[11]) << 24;
300 case 11: k2
^= uint64_t(tail
[10]) << 16;
301 case 10: k2
^= uint64_t(tail
[ 9]) << 8;
302 case 9: k2
^= uint64_t(tail
[ 8]) << 0;
303 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
305 case 8: k1
^= uint64_t(tail
[ 7]) << 56;
306 case 7: k1
^= uint64_t(tail
[ 6]) << 48;
307 case 6: k1
^= uint64_t(tail
[ 5]) << 40;
308 case 5: k1
^= uint64_t(tail
[ 4]) << 32;
309 case 4: k1
^= uint64_t(tail
[ 3]) << 24;
310 case 3: k1
^= uint64_t(tail
[ 2]) << 16;
311 case 2: k1
^= uint64_t(tail
[ 1]) << 8;
312 case 1: k1
^= uint64_t(tail
[ 0]) << 0;
313 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
319 h1
^= len
; h2
^= len
;
330 ((uint64_t*)out
)[0] = h1
;
331 ((uint64_t*)out
)[1] = h2
;
334 //-----------------------------------------------------------------------------