1 //===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the newly proposed standard C++ interfaces for hashing
10 // arbitrary data and building hash functions for user-defined types. This
11 // interface was originally proposed in N3333[1] and is currently under review
12 // for inclusion in a future TR and/or standard.
14 // The primary interfaces provide are comprised of one type and three functions:
16 // -- 'hash_code' class is an opaque type representing the hash code for some
17 // data. It is the intended product of hashing, and can be used to implement
18 // hash tables, checksumming, and other common uses of hashes. It is not an
19 // integer type (although it can be converted to one) because it is risky
20 // to assume much about the internals of a hash_code. In particular, each
21 // execution of the program has a high probability of producing a different
22 // hash_code for a given input. Thus their values are not stable to save or
23 // persist, and should only be used during the execution for the
24 // construction of hashing datastructures.
26 // -- 'hash_value' is a function designed to be overloaded for each
27 // user-defined type which wishes to be used within a hashing context. It
28 // should be overloaded within the user-defined type's namespace and found
29 // via ADL. Overloads for primitive types are provided by this library.
31 // -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
32 // programmers in easily and intuitively combining a set of data into
33 // a single hash_code for their object. They should only logically be used
34 // within the implementation of a 'hash_value' routine or similar context.
36 // Note that 'hash_combine_range' contains very special logic for hashing
37 // a contiguous array of integers or pointers. This logic is *extremely* fast,
38 // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
39 // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
42 //===----------------------------------------------------------------------===//
44 #ifndef LLVM_ADT_HASHING_H
45 #define LLVM_ADT_HASHING_H
47 #include "llvm/Support/DataTypes.h"
48 #include "llvm/Support/SwapByteOrder.h"
49 #include "llvm/Support/type_traits.h"
58 /// An opaque object representing a hash code.
60 /// This object represents the result of hashing some entity. It is intended to
61 /// be used to implement hashtables or other hashing-based data structures.
62 /// While it wraps and exposes a numeric value, this value should not be
63 /// trusted to be stable or predictable across processes or executions.
65 /// In order to obtain the hash_code for an object 'x':
67 /// using llvm::hash_value;
68 /// llvm::hash_code code = hash_value(x);
74 /// Default construct a hash_code.
75 /// Note that this leaves the value uninitialized.
76 hash_code() = default;
78 /// Form a hash code directly from a numerical value.
79 hash_code(size_t value
) : value(value
) {}
81 /// Convert the hash code to its numerical value for use.
82 /*explicit*/ operator size_t() const { return value
; }
84 friend bool operator==(const hash_code
&lhs
, const hash_code
&rhs
) {
85 return lhs
.value
== rhs
.value
;
87 friend bool operator!=(const hash_code
&lhs
, const hash_code
&rhs
) {
88 return lhs
.value
!= rhs
.value
;
91 /// Allow a hash_code to be directly run through hash_value.
92 friend size_t hash_value(const hash_code
&code
) { return code
.value
; }
95 /// Compute a hash_code for any integer value.
97 /// Note that this function is intended to compute the same hash_code for
98 /// a particular value without regard to the pre-promotion type. This is in
99 /// contrast to hash_combine which may produce different hash_codes for
100 /// differing argument types even if they would implicit promote to a common
101 /// type without changing the value.
102 template <typename T
>
103 typename
std::enable_if
<is_integral_or_enum
<T
>::value
, hash_code
>::type
106 /// Compute a hash_code for a pointer's address.
108 /// N.B.: This hashes the *address*. Not the value and not the type.
109 template <typename T
> hash_code
hash_value(const T
*ptr
);
111 /// Compute a hash_code for a pair of objects.
112 template <typename T
, typename U
>
113 hash_code
hash_value(const std::pair
<T
, U
> &arg
);
115 /// Compute a hash_code for a standard string.
116 template <typename T
>
117 hash_code
hash_value(const std::basic_string
<T
> &arg
);
120 /// Override the execution seed with a fixed value.
122 /// This hashing library uses a per-execution seed designed to change on each
123 /// run with high probability in order to ensure that the hash codes are not
124 /// attackable and to ensure that output which is intended to be stable does
125 /// not rely on the particulars of the hash codes produced.
127 /// That said, there are use cases where it is important to be able to
128 /// reproduce *exactly* a specific behavior. To that end, we provide a function
129 /// which will forcibly set the seed to a fixed value. This must be done at the
130 /// start of the program, before any hashes are computed. Also, it cannot be
131 /// undone. This makes it thread-hostile and very hard to use outside of
132 /// immediately on start of a simple program designed for reproducible
134 void set_fixed_execution_hash_seed(uint64_t fixed_value
);
137 // All of the implementation details of actually computing the various hash
138 // code values are held within this namespace. These routines are included in
139 // the header file mainly to allow inlining and constant propagation.
143 inline uint64_t fetch64(const char *p
) {
145 memcpy(&result
, p
, sizeof(result
));
146 if (sys::IsBigEndianHost
)
147 sys::swapByteOrder(result
);
151 inline uint32_t fetch32(const char *p
) {
153 memcpy(&result
, p
, sizeof(result
));
154 if (sys::IsBigEndianHost
)
155 sys::swapByteOrder(result
);
159 /// Some primes between 2^63 and 2^64 for various uses.
160 static const uint64_t k0
= 0xc3a5c85c97cb3127ULL
;
161 static const uint64_t k1
= 0xb492b66fbe98f273ULL
;
162 static const uint64_t k2
= 0x9ae16a3b2f90404fULL
;
163 static const uint64_t k3
= 0xc949d7c7509e6557ULL
;
165 /// Bitwise right rotate.
166 /// Normally this will compile to a single instruction, especially if the
167 /// shift is a manifest constant.
168 inline uint64_t rotate(uint64_t val
, size_t shift
) {
169 // Avoid shifting by 64: doing so yields an undefined result.
170 return shift
== 0 ? val
: ((val
>> shift
) | (val
<< (64 - shift
)));
173 inline uint64_t shift_mix(uint64_t val
) {
174 return val
^ (val
>> 47);
177 inline uint64_t hash_16_bytes(uint64_t low
, uint64_t high
) {
178 // Murmur-inspired hashing.
179 const uint64_t kMul
= 0x9ddfea08eb382d69ULL
;
180 uint64_t a
= (low
^ high
) * kMul
;
182 uint64_t b
= (high
^ a
) * kMul
;
188 inline uint64_t hash_1to3_bytes(const char *s
, size_t len
, uint64_t seed
) {
190 uint8_t b
= s
[len
>> 1];
191 uint8_t c
= s
[len
- 1];
192 uint32_t y
= static_cast<uint32_t>(a
) + (static_cast<uint32_t>(b
) << 8);
193 uint32_t z
= static_cast<uint32_t>(len
) + (static_cast<uint32_t>(c
) << 2);
194 return shift_mix(y
* k2
^ z
* k3
^ seed
) * k2
;
197 inline uint64_t hash_4to8_bytes(const char *s
, size_t len
, uint64_t seed
) {
198 uint64_t a
= fetch32(s
);
199 return hash_16_bytes(len
+ (a
<< 3), seed
^ fetch32(s
+ len
- 4));
202 inline uint64_t hash_9to16_bytes(const char *s
, size_t len
, uint64_t seed
) {
203 uint64_t a
= fetch64(s
);
204 uint64_t b
= fetch64(s
+ len
- 8);
205 return hash_16_bytes(seed
^ a
, rotate(b
+ len
, len
)) ^ b
;
208 inline uint64_t hash_17to32_bytes(const char *s
, size_t len
, uint64_t seed
) {
209 uint64_t a
= fetch64(s
) * k1
;
210 uint64_t b
= fetch64(s
+ 8);
211 uint64_t c
= fetch64(s
+ len
- 8) * k2
;
212 uint64_t d
= fetch64(s
+ len
- 16) * k0
;
213 return hash_16_bytes(rotate(a
- b
, 43) + rotate(c
^ seed
, 30) + d
,
214 a
+ rotate(b
^ k3
, 20) - c
+ len
+ seed
);
217 inline uint64_t hash_33to64_bytes(const char *s
, size_t len
, uint64_t seed
) {
218 uint64_t z
= fetch64(s
+ 24);
219 uint64_t a
= fetch64(s
) + (len
+ fetch64(s
+ len
- 16)) * k0
;
220 uint64_t b
= rotate(a
+ z
, 52);
221 uint64_t c
= rotate(a
, 37);
224 a
+= fetch64(s
+ 16);
226 uint64_t vs
= b
+ rotate(a
, 31) + c
;
227 a
= fetch64(s
+ 16) + fetch64(s
+ len
- 32);
228 z
= fetch64(s
+ len
- 8);
229 b
= rotate(a
+ z
, 52);
231 a
+= fetch64(s
+ len
- 24);
233 a
+= fetch64(s
+ len
- 16);
235 uint64_t ws
= b
+ rotate(a
, 31) + c
;
236 uint64_t r
= shift_mix((vf
+ ws
) * k2
+ (wf
+ vs
) * k0
);
237 return shift_mix((seed
^ (r
* k0
)) + vs
) * k2
;
240 inline uint64_t hash_short(const char *s
, size_t length
, uint64_t seed
) {
241 if (length
>= 4 && length
<= 8)
242 return hash_4to8_bytes(s
, length
, seed
);
243 if (length
> 8 && length
<= 16)
244 return hash_9to16_bytes(s
, length
, seed
);
245 if (length
> 16 && length
<= 32)
246 return hash_17to32_bytes(s
, length
, seed
);
248 return hash_33to64_bytes(s
, length
, seed
);
250 return hash_1to3_bytes(s
, length
, seed
);
255 /// The intermediate state used during hashing.
256 /// Currently, the algorithm for computing hash codes is based on CityHash and
257 /// keeps 56 bytes of arbitrary state.
259 uint64_t h0
, h1
, h2
, h3
, h4
, h5
, h6
;
261 /// Create a new hash_state structure and initialize it based on the
262 /// seed and the first 64-byte chunk.
263 /// This effectively performs the initial mix.
264 static hash_state
create(const char *s
, uint64_t seed
) {
266 0, seed
, hash_16_bytes(seed
, k1
), rotate(seed
^ k1
, 49),
267 seed
* k1
, shift_mix(seed
), 0 };
268 state
.h6
= hash_16_bytes(state
.h4
, state
.h5
);
273 /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
274 /// and 'b', including whatever is already in 'a' and 'b'.
275 static void mix_32_bytes(const char *s
, uint64_t &a
, uint64_t &b
) {
277 uint64_t c
= fetch64(s
+ 24);
278 b
= rotate(b
+ a
+ c
, 21);
280 a
+= fetch64(s
+ 8) + fetch64(s
+ 16);
281 b
+= rotate(a
, 44) + d
;
285 /// Mix in a 64-byte buffer of data.
286 /// We mix all 64 bytes even when the chunk length is smaller, but we
287 /// record the actual length.
288 void mix(const char *s
) {
289 h0
= rotate(h0
+ h1
+ h3
+ fetch64(s
+ 8), 37) * k1
;
290 h1
= rotate(h1
+ h4
+ fetch64(s
+ 48), 42) * k1
;
292 h1
+= h3
+ fetch64(s
+ 40);
293 h2
= rotate(h2
+ h5
, 33) * k1
;
296 mix_32_bytes(s
, h3
, h4
);
298 h6
= h1
+ fetch64(s
+ 16);
299 mix_32_bytes(s
+ 32, h5
, h6
);
303 /// Compute the final 64-bit hash code value based on the current
304 /// state and the length of bytes hashed.
305 uint64_t finalize(size_t length
) {
306 return hash_16_bytes(hash_16_bytes(h3
, h5
) + shift_mix(h1
) * k1
+ h2
,
307 hash_16_bytes(h4
, h6
) + shift_mix(length
) * k1
+ h0
);
312 /// A global, fixed seed-override variable.
314 /// This variable can be set using the \see llvm::set_fixed_execution_seed
315 /// function. See that function for details. Do not, under any circumstances,
316 /// set or read this variable.
317 extern uint64_t fixed_seed_override
;
319 inline uint64_t get_execution_seed() {
320 // FIXME: This needs to be a per-execution seed. This is just a placeholder
321 // implementation. Switching to a per-execution seed is likely to flush out
322 // instability bugs and so will happen as its own commit.
324 // However, if there is a fixed seed override set the first time this is
325 // called, return that instead of the per-execution seed.
326 const uint64_t seed_prime
= 0xff51afd7ed558ccdULL
;
327 static uint64_t seed
= fixed_seed_override
? fixed_seed_override
: seed_prime
;
332 /// Trait to indicate whether a type's bits can be hashed directly.
334 /// A type trait which is true if we want to combine values for hashing by
335 /// reading the underlying data. It is false if values of this type must
336 /// first be passed to hash_value, and the resulting hash_codes combined.
338 // FIXME: We want to replace is_integral_or_enum and is_pointer here with
339 // a predicate which asserts that comparing the underlying storage of two
340 // values of the type for equality is equivalent to comparing the two values
341 // for equality. For all the platforms we care about, this holds for integers
342 // and pointers, but there are platforms where it doesn't and we would like to
343 // support user-defined types which happen to satisfy this property.
344 template <typename T
> struct is_hashable_data
345 : std::integral_constant
<bool, ((is_integral_or_enum
<T
>::value
||
346 std::is_pointer
<T
>::value
) &&
347 64 % sizeof(T
) == 0)> {};
349 // Special case std::pair to detect when both types are viable and when there
350 // is no alignment-derived padding in the pair. This is a bit of a lie because
351 // std::pair isn't truly POD, but it's close enough in all reasonable
352 // implementations for our use case of hashing the underlying data.
353 template <typename T
, typename U
> struct is_hashable_data
<std::pair
<T
, U
> >
354 : std::integral_constant
<bool, (is_hashable_data
<T
>::value
&&
355 is_hashable_data
<U
>::value
&&
356 (sizeof(T
) + sizeof(U
)) ==
357 sizeof(std::pair
<T
, U
>))> {};
359 /// Helper to get the hashable data representation for a type.
360 /// This variant is enabled when the type itself can be used.
361 template <typename T
>
362 typename
std::enable_if
<is_hashable_data
<T
>::value
, T
>::type
363 get_hashable_data(const T
&value
) {
366 /// Helper to get the hashable data representation for a type.
367 /// This variant is enabled when we must first call hash_value and use the
368 /// result as our data.
369 template <typename T
>
370 typename
std::enable_if
<!is_hashable_data
<T
>::value
, size_t>::type
371 get_hashable_data(const T
&value
) {
372 using ::llvm::hash_value
;
373 return hash_value(value
);
376 /// Helper to store data from a value into a buffer and advance the
377 /// pointer into that buffer.
379 /// This routine first checks whether there is enough space in the provided
380 /// buffer, and if not immediately returns false. If there is space, it
381 /// copies the underlying bytes of value into the buffer, advances the
382 /// buffer_ptr past the copied bytes, and returns true.
383 template <typename T
>
384 bool store_and_advance(char *&buffer_ptr
, char *buffer_end
, const T
& value
,
386 size_t store_size
= sizeof(value
) - offset
;
387 if (buffer_ptr
+ store_size
> buffer_end
)
389 const char *value_data
= reinterpret_cast<const char *>(&value
);
390 memcpy(buffer_ptr
, value_data
+ offset
, store_size
);
391 buffer_ptr
+= store_size
;
395 /// Implement the combining of integral values into a hash_code.
397 /// This overload is selected when the value type of the iterator is
398 /// integral. Rather than computing a hash_code for each object and then
399 /// combining them, this (as an optimization) directly combines the integers.
400 template <typename InputIteratorT
>
401 hash_code
hash_combine_range_impl(InputIteratorT first
, InputIteratorT last
) {
402 const uint64_t seed
= get_execution_seed();
403 char buffer
[64], *buffer_ptr
= buffer
;
404 char *const buffer_end
= std::end(buffer
);
405 while (first
!= last
&& store_and_advance(buffer_ptr
, buffer_end
,
406 get_hashable_data(*first
)))
409 return hash_short(buffer
, buffer_ptr
- buffer
, seed
);
410 assert(buffer_ptr
== buffer_end
);
412 hash_state state
= state
.create(buffer
, seed
);
414 while (first
!= last
) {
415 // Fill up the buffer. We don't clear it, which re-mixes the last round
416 // when only a partial 64-byte chunk is left.
418 while (first
!= last
&& store_and_advance(buffer_ptr
, buffer_end
,
419 get_hashable_data(*first
)))
422 // Rotate the buffer if we did a partial fill in order to simulate doing
423 // a mix of the last 64-bytes. That is how the algorithm works when we
424 // have a contiguous byte sequence, and we want to emulate that here.
425 std::rotate(buffer
, buffer_ptr
, buffer_end
);
427 // Mix this chunk into the current state.
429 length
+= buffer_ptr
- buffer
;
432 return state
.finalize(length
);
435 /// Implement the combining of integral values into a hash_code.
437 /// This overload is selected when the value type of the iterator is integral
438 /// and when the input iterator is actually a pointer. Rather than computing
439 /// a hash_code for each object and then combining them, this (as an
440 /// optimization) directly combines the integers. Also, because the integers
441 /// are stored in contiguous memory, this routine avoids copying each value
442 /// and directly reads from the underlying memory.
443 template <typename ValueT
>
444 typename
std::enable_if
<is_hashable_data
<ValueT
>::value
, hash_code
>::type
445 hash_combine_range_impl(ValueT
*first
, ValueT
*last
) {
446 const uint64_t seed
= get_execution_seed();
447 const char *s_begin
= reinterpret_cast<const char *>(first
);
448 const char *s_end
= reinterpret_cast<const char *>(last
);
449 const size_t length
= std::distance(s_begin
, s_end
);
451 return hash_short(s_begin
, length
, seed
);
453 const char *s_aligned_end
= s_begin
+ (length
& ~63);
454 hash_state state
= state
.create(s_begin
, seed
);
456 while (s_begin
!= s_aligned_end
) {
461 state
.mix(s_end
- 64);
463 return state
.finalize(length
);
466 } // namespace detail
467 } // namespace hashing
470 /// Compute a hash_code for a sequence of values.
472 /// This hashes a sequence of values. It produces the same hash_code as
473 /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
474 /// and is significantly faster given pointers and types which can be hashed as
475 /// a sequence of bytes.
476 template <typename InputIteratorT
>
477 hash_code
hash_combine_range(InputIteratorT first
, InputIteratorT last
) {
478 return ::llvm::hashing::detail::hash_combine_range_impl(first
, last
);
482 // Implementation details for hash_combine.
486 /// Helper class to manage the recursive combining of hash_combine
489 /// This class exists to manage the state and various calls involved in the
490 /// recursive combining of arguments used in hash_combine. It is particularly
491 /// useful at minimizing the code in the recursive calls to ease the pain
492 /// caused by a lack of variadic functions.
493 struct hash_combine_recursive_helper
{
499 /// Construct a recursive hash combining helper.
501 /// This sets up the state for a recursive hash combine, including getting
502 /// the seed and buffer setup.
503 hash_combine_recursive_helper()
504 : seed(get_execution_seed()) {}
506 /// Combine one chunk of data into the current in-flight hash.
508 /// This merges one chunk of data into the hash. First it tries to buffer
509 /// the data. If the buffer is full, it hashes the buffer into its
510 /// hash_state, empties it, and then merges the new chunk in. This also
511 /// handles cases where the data straddles the end of the buffer.
512 template <typename T
>
513 char *combine_data(size_t &length
, char *buffer_ptr
, char *buffer_end
, T data
) {
514 if (!store_and_advance(buffer_ptr
, buffer_end
, data
)) {
515 // Check for skew which prevents the buffer from being packed, and do
516 // a partial store into the buffer to fill it. This is only a concern
517 // with the variadic combine because that formation can have varying
519 size_t partial_store_size
= buffer_end
- buffer_ptr
;
520 memcpy(buffer_ptr
, &data
, partial_store_size
);
522 // If the store fails, our buffer is full and ready to hash. We have to
523 // either initialize the hash state (on the first full buffer) or mix
524 // this buffer into the existing hash state. Length tracks the *hashed*
525 // length, not the buffered length.
527 state
= state
.create(buffer
, seed
);
530 // Mix this chunk into the current state and bump length up by 64.
534 // Reset the buffer_ptr to the head of the buffer for the next chunk of
538 // Try again to store into the buffer -- this cannot fail as we only
539 // store types smaller than the buffer.
540 if (!store_and_advance(buffer_ptr
, buffer_end
, data
,
547 /// Recursive, variadic combining method.
549 /// This function recurses through each argument, combining that argument
550 /// into a single hash.
551 template <typename T
, typename
...Ts
>
552 hash_code
combine(size_t length
, char *buffer_ptr
, char *buffer_end
,
553 const T
&arg
, const Ts
&...args
) {
554 buffer_ptr
= combine_data(length
, buffer_ptr
, buffer_end
, get_hashable_data(arg
));
556 // Recurse to the next argument.
557 return combine(length
, buffer_ptr
, buffer_end
, args
...);
560 /// Base case for recursive, variadic combining.
562 /// The base case when combining arguments recursively is reached when all
563 /// arguments have been handled. It flushes the remaining buffer and
564 /// constructs a hash_code.
565 hash_code
combine(size_t length
, char *buffer_ptr
, char *buffer_end
) {
566 // Check whether the entire set of values fit in the buffer. If so, we'll
567 // use the optimized short hashing routine and skip state entirely.
569 return hash_short(buffer
, buffer_ptr
- buffer
, seed
);
571 // Mix the final buffer, rotating it if we did a partial fill in order to
572 // simulate doing a mix of the last 64-bytes. That is how the algorithm
573 // works when we have a contiguous byte sequence, and we want to emulate
575 std::rotate(buffer
, buffer_ptr
, buffer_end
);
577 // Mix this chunk into the current state.
579 length
+= buffer_ptr
- buffer
;
581 return state
.finalize(length
);
585 } // namespace detail
586 } // namespace hashing
588 /// Combine values into a single hash_code.
590 /// This routine accepts a varying number of arguments of any type. It will
591 /// attempt to combine them into a single hash_code. For user-defined types it
592 /// attempts to call a \see hash_value overload (via ADL) for the type. For
593 /// integer and pointer types it directly combines their data into the
594 /// resulting hash_code.
596 /// The result is suitable for returning from a user's hash_value
597 /// *implementation* for their user-defined type. Consumers of a type should
598 /// *not* call this routine, they should instead call 'hash_value'.
599 template <typename
...Ts
> hash_code
hash_combine(const Ts
&...args
) {
600 // Recursively hash each argument using a helper class.
601 ::llvm::hashing::detail::hash_combine_recursive_helper helper
;
602 return helper
.combine(0, helper
.buffer
, helper
.buffer
+ 64, args
...);
605 // Implementation details for implementations of hash_value overloads provided
610 /// Helper to hash the value of a single integer.
612 /// Overloads for smaller integer types are not provided to ensure consistent
613 /// behavior in the presence of integral promotions. Essentially,
614 /// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
615 inline hash_code
hash_integer_value(uint64_t value
) {
616 // Similar to hash_4to8_bytes but using a seed instead of length.
617 const uint64_t seed
= get_execution_seed();
618 const char *s
= reinterpret_cast<const char *>(&value
);
619 const uint64_t a
= fetch32(s
);
620 return hash_16_bytes(seed
+ (a
<< 3), fetch32(s
+ 4));
623 } // namespace detail
624 } // namespace hashing
626 // Declared and documented above, but defined here so that any of the hashing
627 // infrastructure is available.
628 template <typename T
>
629 typename
std::enable_if
<is_integral_or_enum
<T
>::value
, hash_code
>::type
630 hash_value(T value
) {
631 return ::llvm::hashing::detail::hash_integer_value(
632 static_cast<uint64_t>(value
));
635 // Declared and documented above, but defined here so that any of the hashing
636 // infrastructure is available.
637 template <typename T
> hash_code
hash_value(const T
*ptr
) {
638 return ::llvm::hashing::detail::hash_integer_value(
639 reinterpret_cast<uintptr_t>(ptr
));
642 // Declared and documented above, but defined here so that any of the hashing
643 // infrastructure is available.
644 template <typename T
, typename U
>
645 hash_code
hash_value(const std::pair
<T
, U
> &arg
) {
646 return hash_combine(arg
.first
, arg
.second
);
649 // Declared and documented above, but defined here so that any of the hashing
650 // infrastructure is available.
651 template <typename T
>
652 hash_code
hash_value(const std::basic_string
<T
> &arg
) {
653 return hash_combine_range(arg
.begin(), arg
.end());