1 //===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the newly proposed standard C++ interfaces for hashing
10 // arbitrary data and building hash functions for user-defined types. This
11 // interface was originally proposed in N3333[1] and is currently under review
12 // for inclusion in a future TR and/or standard.
14 // The primary interfaces provide are comprised of one type and three functions:
16 // -- 'hash_code' class is an opaque type representing the hash code for some
17 // data. It is the intended product of hashing, and can be used to implement
18 // hash tables, checksumming, and other common uses of hashes. It is not an
19 // integer type (although it can be converted to one) because it is risky
20 // to assume much about the internals of a hash_code. In particular, each
21 // execution of the program has a high probability of producing a different
22 // hash_code for a given input. Thus their values are not stable to save or
23 // persist, and should only be used during the execution for the
24 // construction of hashing datastructures.
26 // -- 'hash_value' is a function designed to be overloaded for each
27 // user-defined type which wishes to be used within a hashing context. It
28 // should be overloaded within the user-defined type's namespace and found
29 // via ADL. Overloads for primitive types are provided by this library.
31 // -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
32 // programmers in easily and intuitively combining a set of data into
33 // a single hash_code for their object. They should only logically be used
34 // within the implementation of a 'hash_value' routine or similar context.
36 // Note that 'hash_combine_range' contains very special logic for hashing
37 // a contiguous array of integers or pointers. This logic is *extremely* fast,
38 // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
39 // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
42 //===----------------------------------------------------------------------===//
44 #ifndef LLVM_ADT_HASHING_H
45 #define LLVM_ADT_HASHING_H
47 #include "llvm/Support/DataTypes.h"
48 #include "llvm/Support/Host.h"
49 #include "llvm/Support/SwapByteOrder.h"
50 #include "llvm/Support/type_traits.h"
59 /// An opaque object representing a hash code.
61 /// This object represents the result of hashing some entity. It is intended to
62 /// be used to implement hashtables or other hashing-based data structures.
63 /// While it wraps and exposes a numeric value, this value should not be
64 /// trusted to be stable or predictable across processes or executions.
66 /// In order to obtain the hash_code for an object 'x':
68 /// using llvm::hash_value;
69 /// llvm::hash_code code = hash_value(x);
75 /// Default construct a hash_code.
76 /// Note that this leaves the value uninitialized.
77 hash_code() = default;
79 /// Form a hash code directly from a numerical value.
80 hash_code(size_t value
) : value(value
) {}
82 /// Convert the hash code to its numerical value for use.
83 /*explicit*/ operator size_t() const { return value
; }
85 friend bool operator==(const hash_code
&lhs
, const hash_code
&rhs
) {
86 return lhs
.value
== rhs
.value
;
88 friend bool operator!=(const hash_code
&lhs
, const hash_code
&rhs
) {
89 return lhs
.value
!= rhs
.value
;
92 /// Allow a hash_code to be directly run through hash_value.
93 friend size_t hash_value(const hash_code
&code
) { return code
.value
; }
96 /// Compute a hash_code for any integer value.
98 /// Note that this function is intended to compute the same hash_code for
99 /// a particular value without regard to the pre-promotion type. This is in
100 /// contrast to hash_combine which may produce different hash_codes for
101 /// differing argument types even if they would implicit promote to a common
102 /// type without changing the value.
103 template <typename T
>
104 typename
std::enable_if
<is_integral_or_enum
<T
>::value
, hash_code
>::type
107 /// Compute a hash_code for a pointer's address.
109 /// N.B.: This hashes the *address*. Not the value and not the type.
110 template <typename T
> hash_code
hash_value(const T
*ptr
);
112 /// Compute a hash_code for a pair of objects.
113 template <typename T
, typename U
>
114 hash_code
hash_value(const std::pair
<T
, U
> &arg
);
116 /// Compute a hash_code for a standard string.
117 template <typename T
>
118 hash_code
hash_value(const std::basic_string
<T
> &arg
);
121 /// Override the execution seed with a fixed value.
123 /// This hashing library uses a per-execution seed designed to change on each
124 /// run with high probability in order to ensure that the hash codes are not
125 /// attackable and to ensure that output which is intended to be stable does
126 /// not rely on the particulars of the hash codes produced.
128 /// That said, there are use cases where it is important to be able to
129 /// reproduce *exactly* a specific behavior. To that end, we provide a function
130 /// which will forcibly set the seed to a fixed value. This must be done at the
131 /// start of the program, before any hashes are computed. Also, it cannot be
132 /// undone. This makes it thread-hostile and very hard to use outside of
133 /// immediately on start of a simple program designed for reproducible
135 void set_fixed_execution_hash_seed(uint64_t fixed_value
);
138 // All of the implementation details of actually computing the various hash
139 // code values are held within this namespace. These routines are included in
140 // the header file mainly to allow inlining and constant propagation.
144 inline uint64_t fetch64(const char *p
) {
146 memcpy(&result
, p
, sizeof(result
));
147 if (sys::IsBigEndianHost
)
148 sys::swapByteOrder(result
);
152 inline uint32_t fetch32(const char *p
) {
154 memcpy(&result
, p
, sizeof(result
));
155 if (sys::IsBigEndianHost
)
156 sys::swapByteOrder(result
);
160 /// Some primes between 2^63 and 2^64 for various uses.
161 static const uint64_t k0
= 0xc3a5c85c97cb3127ULL
;
162 static const uint64_t k1
= 0xb492b66fbe98f273ULL
;
163 static const uint64_t k2
= 0x9ae16a3b2f90404fULL
;
164 static const uint64_t k3
= 0xc949d7c7509e6557ULL
;
166 /// Bitwise right rotate.
167 /// Normally this will compile to a single instruction, especially if the
168 /// shift is a manifest constant.
169 inline uint64_t rotate(uint64_t val
, size_t shift
) {
170 // Avoid shifting by 64: doing so yields an undefined result.
171 return shift
== 0 ? val
: ((val
>> shift
) | (val
<< (64 - shift
)));
174 inline uint64_t shift_mix(uint64_t val
) {
175 return val
^ (val
>> 47);
178 inline uint64_t hash_16_bytes(uint64_t low
, uint64_t high
) {
179 // Murmur-inspired hashing.
180 const uint64_t kMul
= 0x9ddfea08eb382d69ULL
;
181 uint64_t a
= (low
^ high
) * kMul
;
183 uint64_t b
= (high
^ a
) * kMul
;
189 inline uint64_t hash_1to3_bytes(const char *s
, size_t len
, uint64_t seed
) {
191 uint8_t b
= s
[len
>> 1];
192 uint8_t c
= s
[len
- 1];
193 uint32_t y
= static_cast<uint32_t>(a
) + (static_cast<uint32_t>(b
) << 8);
194 uint32_t z
= static_cast<uint32_t>(len
) + (static_cast<uint32_t>(c
) << 2);
195 return shift_mix(y
* k2
^ z
* k3
^ seed
) * k2
;
198 inline uint64_t hash_4to8_bytes(const char *s
, size_t len
, uint64_t seed
) {
199 uint64_t a
= fetch32(s
);
200 return hash_16_bytes(len
+ (a
<< 3), seed
^ fetch32(s
+ len
- 4));
203 inline uint64_t hash_9to16_bytes(const char *s
, size_t len
, uint64_t seed
) {
204 uint64_t a
= fetch64(s
);
205 uint64_t b
= fetch64(s
+ len
- 8);
206 return hash_16_bytes(seed
^ a
, rotate(b
+ len
, len
)) ^ b
;
209 inline uint64_t hash_17to32_bytes(const char *s
, size_t len
, uint64_t seed
) {
210 uint64_t a
= fetch64(s
) * k1
;
211 uint64_t b
= fetch64(s
+ 8);
212 uint64_t c
= fetch64(s
+ len
- 8) * k2
;
213 uint64_t d
= fetch64(s
+ len
- 16) * k0
;
214 return hash_16_bytes(rotate(a
- b
, 43) + rotate(c
^ seed
, 30) + d
,
215 a
+ rotate(b
^ k3
, 20) - c
+ len
+ seed
);
218 inline uint64_t hash_33to64_bytes(const char *s
, size_t len
, uint64_t seed
) {
219 uint64_t z
= fetch64(s
+ 24);
220 uint64_t a
= fetch64(s
) + (len
+ fetch64(s
+ len
- 16)) * k0
;
221 uint64_t b
= rotate(a
+ z
, 52);
222 uint64_t c
= rotate(a
, 37);
225 a
+= fetch64(s
+ 16);
227 uint64_t vs
= b
+ rotate(a
, 31) + c
;
228 a
= fetch64(s
+ 16) + fetch64(s
+ len
- 32);
229 z
= fetch64(s
+ len
- 8);
230 b
= rotate(a
+ z
, 52);
232 a
+= fetch64(s
+ len
- 24);
234 a
+= fetch64(s
+ len
- 16);
236 uint64_t ws
= b
+ rotate(a
, 31) + c
;
237 uint64_t r
= shift_mix((vf
+ ws
) * k2
+ (wf
+ vs
) * k0
);
238 return shift_mix((seed
^ (r
* k0
)) + vs
) * k2
;
241 inline uint64_t hash_short(const char *s
, size_t length
, uint64_t seed
) {
242 if (length
>= 4 && length
<= 8)
243 return hash_4to8_bytes(s
, length
, seed
);
244 if (length
> 8 && length
<= 16)
245 return hash_9to16_bytes(s
, length
, seed
);
246 if (length
> 16 && length
<= 32)
247 return hash_17to32_bytes(s
, length
, seed
);
249 return hash_33to64_bytes(s
, length
, seed
);
251 return hash_1to3_bytes(s
, length
, seed
);
256 /// The intermediate state used during hashing.
257 /// Currently, the algorithm for computing hash codes is based on CityHash and
258 /// keeps 56 bytes of arbitrary state.
260 uint64_t h0
, h1
, h2
, h3
, h4
, h5
, h6
;
262 /// Create a new hash_state structure and initialize it based on the
263 /// seed and the first 64-byte chunk.
264 /// This effectively performs the initial mix.
265 static hash_state
create(const char *s
, uint64_t seed
) {
267 0, seed
, hash_16_bytes(seed
, k1
), rotate(seed
^ k1
, 49),
268 seed
* k1
, shift_mix(seed
), 0 };
269 state
.h6
= hash_16_bytes(state
.h4
, state
.h5
);
274 /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
275 /// and 'b', including whatever is already in 'a' and 'b'.
276 static void mix_32_bytes(const char *s
, uint64_t &a
, uint64_t &b
) {
278 uint64_t c
= fetch64(s
+ 24);
279 b
= rotate(b
+ a
+ c
, 21);
281 a
+= fetch64(s
+ 8) + fetch64(s
+ 16);
282 b
+= rotate(a
, 44) + d
;
286 /// Mix in a 64-byte buffer of data.
287 /// We mix all 64 bytes even when the chunk length is smaller, but we
288 /// record the actual length.
289 void mix(const char *s
) {
290 h0
= rotate(h0
+ h1
+ h3
+ fetch64(s
+ 8), 37) * k1
;
291 h1
= rotate(h1
+ h4
+ fetch64(s
+ 48), 42) * k1
;
293 h1
+= h3
+ fetch64(s
+ 40);
294 h2
= rotate(h2
+ h5
, 33) * k1
;
297 mix_32_bytes(s
, h3
, h4
);
299 h6
= h1
+ fetch64(s
+ 16);
300 mix_32_bytes(s
+ 32, h5
, h6
);
304 /// Compute the final 64-bit hash code value based on the current
305 /// state and the length of bytes hashed.
306 uint64_t finalize(size_t length
) {
307 return hash_16_bytes(hash_16_bytes(h3
, h5
) + shift_mix(h1
) * k1
+ h2
,
308 hash_16_bytes(h4
, h6
) + shift_mix(length
) * k1
+ h0
);
313 /// A global, fixed seed-override variable.
315 /// This variable can be set using the \see llvm::set_fixed_execution_seed
316 /// function. See that function for details. Do not, under any circumstances,
317 /// set or read this variable.
318 extern uint64_t fixed_seed_override
;
320 inline uint64_t get_execution_seed() {
321 // FIXME: This needs to be a per-execution seed. This is just a placeholder
322 // implementation. Switching to a per-execution seed is likely to flush out
323 // instability bugs and so will happen as its own commit.
325 // However, if there is a fixed seed override set the first time this is
326 // called, return that instead of the per-execution seed.
327 const uint64_t seed_prime
= 0xff51afd7ed558ccdULL
;
328 static uint64_t seed
= fixed_seed_override
? fixed_seed_override
: seed_prime
;
333 /// Trait to indicate whether a type's bits can be hashed directly.
335 /// A type trait which is true if we want to combine values for hashing by
336 /// reading the underlying data. It is false if values of this type must
337 /// first be passed to hash_value, and the resulting hash_codes combined.
339 // FIXME: We want to replace is_integral_or_enum and is_pointer here with
340 // a predicate which asserts that comparing the underlying storage of two
341 // values of the type for equality is equivalent to comparing the two values
342 // for equality. For all the platforms we care about, this holds for integers
343 // and pointers, but there are platforms where it doesn't and we would like to
344 // support user-defined types which happen to satisfy this property.
345 template <typename T
> struct is_hashable_data
346 : std::integral_constant
<bool, ((is_integral_or_enum
<T
>::value
||
347 std::is_pointer
<T
>::value
) &&
348 64 % sizeof(T
) == 0)> {};
350 // Special case std::pair to detect when both types are viable and when there
351 // is no alignment-derived padding in the pair. This is a bit of a lie because
352 // std::pair isn't truly POD, but it's close enough in all reasonable
353 // implementations for our use case of hashing the underlying data.
354 template <typename T
, typename U
> struct is_hashable_data
<std::pair
<T
, U
> >
355 : std::integral_constant
<bool, (is_hashable_data
<T
>::value
&&
356 is_hashable_data
<U
>::value
&&
357 (sizeof(T
) + sizeof(U
)) ==
358 sizeof(std::pair
<T
, U
>))> {};
360 /// Helper to get the hashable data representation for a type.
361 /// This variant is enabled when the type itself can be used.
362 template <typename T
>
363 typename
std::enable_if
<is_hashable_data
<T
>::value
, T
>::type
364 get_hashable_data(const T
&value
) {
367 /// Helper to get the hashable data representation for a type.
368 /// This variant is enabled when we must first call hash_value and use the
369 /// result as our data.
370 template <typename T
>
371 typename
std::enable_if
<!is_hashable_data
<T
>::value
, size_t>::type
372 get_hashable_data(const T
&value
) {
373 using ::llvm::hash_value
;
374 return hash_value(value
);
377 /// Helper to store data from a value into a buffer and advance the
378 /// pointer into that buffer.
380 /// This routine first checks whether there is enough space in the provided
381 /// buffer, and if not immediately returns false. If there is space, it
382 /// copies the underlying bytes of value into the buffer, advances the
383 /// buffer_ptr past the copied bytes, and returns true.
384 template <typename T
>
385 bool store_and_advance(char *&buffer_ptr
, char *buffer_end
, const T
& value
,
387 size_t store_size
= sizeof(value
) - offset
;
388 if (buffer_ptr
+ store_size
> buffer_end
)
390 const char *value_data
= reinterpret_cast<const char *>(&value
);
391 memcpy(buffer_ptr
, value_data
+ offset
, store_size
);
392 buffer_ptr
+= store_size
;
396 /// Implement the combining of integral values into a hash_code.
398 /// This overload is selected when the value type of the iterator is
399 /// integral. Rather than computing a hash_code for each object and then
400 /// combining them, this (as an optimization) directly combines the integers.
401 template <typename InputIteratorT
>
402 hash_code
hash_combine_range_impl(InputIteratorT first
, InputIteratorT last
) {
403 const uint64_t seed
= get_execution_seed();
404 char buffer
[64], *buffer_ptr
= buffer
;
405 char *const buffer_end
= std::end(buffer
);
406 while (first
!= last
&& store_and_advance(buffer_ptr
, buffer_end
,
407 get_hashable_data(*first
)))
410 return hash_short(buffer
, buffer_ptr
- buffer
, seed
);
411 assert(buffer_ptr
== buffer_end
);
413 hash_state state
= state
.create(buffer
, seed
);
415 while (first
!= last
) {
416 // Fill up the buffer. We don't clear it, which re-mixes the last round
417 // when only a partial 64-byte chunk is left.
419 while (first
!= last
&& store_and_advance(buffer_ptr
, buffer_end
,
420 get_hashable_data(*first
)))
423 // Rotate the buffer if we did a partial fill in order to simulate doing
424 // a mix of the last 64-bytes. That is how the algorithm works when we
425 // have a contiguous byte sequence, and we want to emulate that here.
426 std::rotate(buffer
, buffer_ptr
, buffer_end
);
428 // Mix this chunk into the current state.
430 length
+= buffer_ptr
- buffer
;
433 return state
.finalize(length
);
436 /// Implement the combining of integral values into a hash_code.
438 /// This overload is selected when the value type of the iterator is integral
439 /// and when the input iterator is actually a pointer. Rather than computing
440 /// a hash_code for each object and then combining them, this (as an
441 /// optimization) directly combines the integers. Also, because the integers
442 /// are stored in contiguous memory, this routine avoids copying each value
443 /// and directly reads from the underlying memory.
444 template <typename ValueT
>
445 typename
std::enable_if
<is_hashable_data
<ValueT
>::value
, hash_code
>::type
446 hash_combine_range_impl(ValueT
*first
, ValueT
*last
) {
447 const uint64_t seed
= get_execution_seed();
448 const char *s_begin
= reinterpret_cast<const char *>(first
);
449 const char *s_end
= reinterpret_cast<const char *>(last
);
450 const size_t length
= std::distance(s_begin
, s_end
);
452 return hash_short(s_begin
, length
, seed
);
454 const char *s_aligned_end
= s_begin
+ (length
& ~63);
455 hash_state state
= state
.create(s_begin
, seed
);
457 while (s_begin
!= s_aligned_end
) {
462 state
.mix(s_end
- 64);
464 return state
.finalize(length
);
467 } // namespace detail
468 } // namespace hashing
471 /// Compute a hash_code for a sequence of values.
473 /// This hashes a sequence of values. It produces the same hash_code as
474 /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
475 /// and is significantly faster given pointers and types which can be hashed as
476 /// a sequence of bytes.
477 template <typename InputIteratorT
>
478 hash_code
hash_combine_range(InputIteratorT first
, InputIteratorT last
) {
479 return ::llvm::hashing::detail::hash_combine_range_impl(first
, last
);
483 // Implementation details for hash_combine.
487 /// Helper class to manage the recursive combining of hash_combine
490 /// This class exists to manage the state and various calls involved in the
491 /// recursive combining of arguments used in hash_combine. It is particularly
492 /// useful at minimizing the code in the recursive calls to ease the pain
493 /// caused by a lack of variadic functions.
494 struct hash_combine_recursive_helper
{
500 /// Construct a recursive hash combining helper.
502 /// This sets up the state for a recursive hash combine, including getting
503 /// the seed and buffer setup.
504 hash_combine_recursive_helper()
505 : seed(get_execution_seed()) {}
507 /// Combine one chunk of data into the current in-flight hash.
509 /// This merges one chunk of data into the hash. First it tries to buffer
510 /// the data. If the buffer is full, it hashes the buffer into its
511 /// hash_state, empties it, and then merges the new chunk in. This also
512 /// handles cases where the data straddles the end of the buffer.
513 template <typename T
>
514 char *combine_data(size_t &length
, char *buffer_ptr
, char *buffer_end
, T data
) {
515 if (!store_and_advance(buffer_ptr
, buffer_end
, data
)) {
516 // Check for skew which prevents the buffer from being packed, and do
517 // a partial store into the buffer to fill it. This is only a concern
518 // with the variadic combine because that formation can have varying
520 size_t partial_store_size
= buffer_end
- buffer_ptr
;
521 memcpy(buffer_ptr
, &data
, partial_store_size
);
523 // If the store fails, our buffer is full and ready to hash. We have to
524 // either initialize the hash state (on the first full buffer) or mix
525 // this buffer into the existing hash state. Length tracks the *hashed*
526 // length, not the buffered length.
528 state
= state
.create(buffer
, seed
);
531 // Mix this chunk into the current state and bump length up by 64.
535 // Reset the buffer_ptr to the head of the buffer for the next chunk of
539 // Try again to store into the buffer -- this cannot fail as we only
540 // store types smaller than the buffer.
541 if (!store_and_advance(buffer_ptr
, buffer_end
, data
,
548 /// Recursive, variadic combining method.
550 /// This function recurses through each argument, combining that argument
551 /// into a single hash.
552 template <typename T
, typename
...Ts
>
553 hash_code
combine(size_t length
, char *buffer_ptr
, char *buffer_end
,
554 const T
&arg
, const Ts
&...args
) {
555 buffer_ptr
= combine_data(length
, buffer_ptr
, buffer_end
, get_hashable_data(arg
));
557 // Recurse to the next argument.
558 return combine(length
, buffer_ptr
, buffer_end
, args
...);
561 /// Base case for recursive, variadic combining.
563 /// The base case when combining arguments recursively is reached when all
564 /// arguments have been handled. It flushes the remaining buffer and
565 /// constructs a hash_code.
566 hash_code
combine(size_t length
, char *buffer_ptr
, char *buffer_end
) {
567 // Check whether the entire set of values fit in the buffer. If so, we'll
568 // use the optimized short hashing routine and skip state entirely.
570 return hash_short(buffer
, buffer_ptr
- buffer
, seed
);
572 // Mix the final buffer, rotating it if we did a partial fill in order to
573 // simulate doing a mix of the last 64-bytes. That is how the algorithm
574 // works when we have a contiguous byte sequence, and we want to emulate
576 std::rotate(buffer
, buffer_ptr
, buffer_end
);
578 // Mix this chunk into the current state.
580 length
+= buffer_ptr
- buffer
;
582 return state
.finalize(length
);
586 } // namespace detail
587 } // namespace hashing
589 /// Combine values into a single hash_code.
591 /// This routine accepts a varying number of arguments of any type. It will
592 /// attempt to combine them into a single hash_code. For user-defined types it
593 /// attempts to call a \see hash_value overload (via ADL) for the type. For
594 /// integer and pointer types it directly combines their data into the
595 /// resulting hash_code.
597 /// The result is suitable for returning from a user's hash_value
598 /// *implementation* for their user-defined type. Consumers of a type should
599 /// *not* call this routine, they should instead call 'hash_value'.
600 template <typename
...Ts
> hash_code
hash_combine(const Ts
&...args
) {
601 // Recursively hash each argument using a helper class.
602 ::llvm::hashing::detail::hash_combine_recursive_helper helper
;
603 return helper
.combine(0, helper
.buffer
, helper
.buffer
+ 64, args
...);
606 // Implementation details for implementations of hash_value overloads provided
611 /// Helper to hash the value of a single integer.
613 /// Overloads for smaller integer types are not provided to ensure consistent
614 /// behavior in the presence of integral promotions. Essentially,
615 /// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
616 inline hash_code
hash_integer_value(uint64_t value
) {
617 // Similar to hash_4to8_bytes but using a seed instead of length.
618 const uint64_t seed
= get_execution_seed();
619 const char *s
= reinterpret_cast<const char *>(&value
);
620 const uint64_t a
= fetch32(s
);
621 return hash_16_bytes(seed
+ (a
<< 3), fetch32(s
+ 4));
624 } // namespace detail
625 } // namespace hashing
627 // Declared and documented above, but defined here so that any of the hashing
628 // infrastructure is available.
629 template <typename T
>
630 typename
std::enable_if
<is_integral_or_enum
<T
>::value
, hash_code
>::type
631 hash_value(T value
) {
632 return ::llvm::hashing::detail::hash_integer_value(
633 static_cast<uint64_t>(value
));
636 // Declared and documented above, but defined here so that any of the hashing
637 // infrastructure is available.
638 template <typename T
> hash_code
hash_value(const T
*ptr
) {
639 return ::llvm::hashing::detail::hash_integer_value(
640 reinterpret_cast<uintptr_t>(ptr
));
643 // Declared and documented above, but defined here so that any of the hashing
644 // infrastructure is available.
645 template <typename T
, typename U
>
646 hash_code
hash_value(const std::pair
<T
, U
> &arg
) {
647 return hash_combine(arg
.first
, arg
.second
);
650 // Declared and documented above, but defined here so that any of the hashing
651 // infrastructure is available.
652 template <typename T
>
653 hash_code
hash_value(const std::basic_string
<T
> &arg
) {
654 return hash_combine_range(arg
.begin(), arg
.end());