1 ///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
3 // A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
5 // https://github.com/martinus/unordered_dense
7 // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
8 // SPDX-License-Identifier: MIT
9 // Copyright (c) 2022-2023 Martin Leitner-Ankerl <martin.ankerl@gmail.com>
11 // Permission is hereby granted, free of charge, to any person obtaining a copy
12 // of this software and associated documentation files (the "Software"), to deal
13 // in the Software without restriction, including without limitation the rights
14 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 // copies of the Software, and to permit persons to whom the Software is
16 // furnished to do so, subject to the following conditions:
18 // The above copyright notice and this permission notice shall be included in all
19 // copies or substantial portions of the Software.
21 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #ifndef ANKERL_UNORDERED_DENSE_H
30 #define ANKERL_UNORDERED_DENSE_H
32 // see https://semver.org/spec/v2.0.0.html
33 #define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
34 #define ANKERL_UNORDERED_DENSE_VERSION_MINOR 4 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
35 #define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
37 // API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
39 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
40 #define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch
41 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
42 #define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch)
43 #define ANKERL_UNORDERED_DENSE_NAMESPACE \
44 ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \
45 ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH)
47 #if defined(_MSVC_LANG)
48 # define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG
50 # define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus
54 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
55 # define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__))
56 #elif defined(_MSC_VER)
57 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
58 # define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop))
62 #if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)
63 # define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1 // NOLINT(cppcoreguidelines-macro-usage)
65 # define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0 // NOLINT(cppcoreguidelines-macro-usage)
68 # define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline)
70 # define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline))
73 // defined in unordered_dense.cpp
74 #if !defined(ANKERL_UNORDERED_DENSE_EXPORT)
75 # define ANKERL_UNORDERED_DENSE_EXPORT
78 #if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
79 # error ankerl::unordered_dense requires C++17 or higher
81 # include <array> // for array
82 # include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C
83 # include <cstring> // for size_t, memcpy, memset
84 # include <functional> // for equal_to, hash
85 # include <initializer_list> // for initializer_list
86 # include <iterator> // for pair, distance
87 # include <limits> // for numeric_limits
88 # include <memory> // for allocator, allocator_traits, shared_ptr
89 # include <optional> // for optional
90 # include <stdexcept> // for out_of_range
91 # include <string> // for basic_string
92 # include <string_view> // for basic_string_view, hash
93 # include <tuple> // for forward_as_tuple
94 # include <type_traits> // for enable_if_t, declval, conditional_t, ena...
95 # include <utility> // for forward, exchange, pair, as_const, piece...
96 # include <vector> // for vector
97 # if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0
98 # include <cstdlib> // for abort
101 # if defined(__has_include)
102 # if __has_include(<memory_resource>)
103 # define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage)
104 # include <memory_resource> // for polymorphic_allocator
105 # elif __has_include(<experimental/memory_resource>)
106 # define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage)
107 # include <experimental/memory_resource> // for polymorphic_allocator
111 # if defined(_MSC_VER) && defined(_M_X64)
113 # pragma intrinsic(_umul128)
116 # if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
117 # define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage)
118 # define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage)
120 # define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
121 # define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
124 namespace ankerl::unordered_dense
{
125 inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE
{
129 # if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS()
131 // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
132 // inlinings more difficult. Throws are also generally the slow path.
133 [[noreturn
]] inline ANKERL_UNORDERED_DENSE_NOINLINE
void on_error_key_not_found() {
134 throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found");
136 [[noreturn
]] inline ANKERL_UNORDERED_DENSE_NOINLINE
void on_error_bucket_overflow() {
137 throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size");
139 [[noreturn
]] inline ANKERL_UNORDERED_DENSE_NOINLINE
void on_error_too_many_elements() {
140 throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements");
145 [[noreturn
]] inline void on_error_key_not_found() {
148 [[noreturn
]] inline void on_error_bucket_overflow() {
151 [[noreturn
]] inline void on_error_too_many_elements() {
157 } // namespace detail
159 // hash ///////////////////////////////////////////////////////////////////////
161 // This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash
162 // No big-endian support (because different values on different machines don't matter),
163 // hardcodes seed and the secret, reformats the code, and clang-tidy fixes.
164 namespace detail::wyhash
{
166 inline void mum(uint64_t* a
, uint64_t* b
) {
167 # if defined(__SIZEOF_INT128__)
170 *a
= static_cast<uint64_t>(r
);
171 *b
= static_cast<uint64_t>(r
>> 64U);
172 # elif defined(_MSC_VER) && defined(_M_X64)
173 *a
= _umul128(*a
, *b
, b
);
175 uint64_t ha
= *a
>> 32U;
176 uint64_t hb
= *b
>> 32U;
177 uint64_t la
= static_cast<uint32_t>(*a
);
178 uint64_t lb
= static_cast<uint32_t>(*b
);
181 uint64_t rh
= ha
* hb
;
182 uint64_t rm0
= ha
* lb
;
183 uint64_t rm1
= hb
* la
;
184 uint64_t rl
= la
* lb
;
185 uint64_t t
= rl
+ (rm0
<< 32U);
186 auto c
= static_cast<uint64_t>(t
< rl
);
187 lo
= t
+ (rm1
<< 32U);
188 c
+= static_cast<uint64_t>(lo
< t
);
189 hi
= rh
+ (rm0
>> 32U) + (rm1
>> 32U) + c
;
195 // multiply and xor mix function, aka MUM
196 [[nodiscard
]] inline auto mix(uint64_t a
, uint64_t b
) -> uint64_t {
201 // read functions. WARNING: we don't care about endianness, so results are different on big endian!
202 [[nodiscard
]] inline auto r8(const uint8_t* p
) -> uint64_t {
204 std::memcpy(&v
, p
, 8U);
208 [[nodiscard
]] inline auto r4(const uint8_t* p
) -> uint64_t {
210 std::memcpy(&v
, p
, 4);
214 // reads 1, 2, or 3 bytes
215 [[nodiscard
]] inline auto r3(const uint8_t* p
, size_t k
) -> uint64_t {
216 return (static_cast<uint64_t>(p
[0]) << 16U) | (static_cast<uint64_t>(p
[k
>> 1U]) << 8U) | p
[k
- 1];
219 [[maybe_unused
]] [[nodiscard
]] inline auto hash(void const* key
, size_t len
) -> uint64_t {
220 static constexpr auto secret
= std::array
{UINT64_C(0xa0761d6478bd642f),
221 UINT64_C(0xe7037ed1a0b428db),
222 UINT64_C(0x8ebc6af09c88c6e3),
223 UINT64_C(0x589965cc75374cc3)};
225 auto const* p
= static_cast<uint8_t const*>(key
);
226 uint64_t seed
= secret
[0];
229 if (ANKERL_UNORDERED_DENSE_LIKELY(len
<= 16)) {
230 if (ANKERL_UNORDERED_DENSE_LIKELY(len
>= 4)) {
231 a
= (r4(p
) << 32U) | r4(p
+ ((len
>> 3U) << 2U));
232 b
= (r4(p
+ len
- 4) << 32U) | r4(p
+ len
- 4 - ((len
>> 3U) << 2U));
233 } else if (ANKERL_UNORDERED_DENSE_LIKELY(len
> 0)) {
242 if (ANKERL_UNORDERED_DENSE_UNLIKELY(i
> 48)) {
243 uint64_t see1
= seed
;
244 uint64_t see2
= seed
;
246 seed
= mix(r8(p
) ^ secret
[1], r8(p
+ 8) ^ seed
);
247 see1
= mix(r8(p
+ 16) ^ secret
[2], r8(p
+ 24) ^ see1
);
248 see2
= mix(r8(p
+ 32) ^ secret
[3], r8(p
+ 40) ^ see2
);
251 } while (ANKERL_UNORDERED_DENSE_LIKELY(i
> 48));
254 while (ANKERL_UNORDERED_DENSE_UNLIKELY(i
> 16)) {
255 seed
= mix(r8(p
) ^ secret
[1], r8(p
+ 8) ^ seed
);
263 return mix(secret
[1] ^ len
, mix(a
^ secret
[1], b
^ seed
));
266 [[nodiscard
]] inline auto hash(uint64_t x
) -> uint64_t {
267 return detail::wyhash::mix(x
, UINT64_C(0x9E3779B97F4A7C15));
270 } // namespace detail::wyhash
272 ANKERL_UNORDERED_DENSE_EXPORT
template <typename T
, typename Enable
= void>
274 auto operator()(T
const& obj
) const noexcept(noexcept(std::declval
<std::hash
<T
>>().operator()(std::declval
<T
const&>())))
276 return std::hash
<T
>{}(obj
);
280 template <typename CharT
>
281 struct hash
<std::basic_string
<CharT
>> {
282 using is_avalanching
= void;
283 auto operator()(std::basic_string
<CharT
> const& str
) const noexcept
-> uint64_t {
284 return detail::wyhash::hash(str
.data(), sizeof(CharT
) * str
.size());
288 template <typename CharT
>
289 struct hash
<std::basic_string_view
<CharT
>> {
290 using is_avalanching
= void;
291 auto operator()(std::basic_string_view
<CharT
> const& sv
) const noexcept
-> uint64_t {
292 return detail::wyhash::hash(sv
.data(), sizeof(CharT
) * sv
.size());
298 using is_avalanching
= void;
299 auto operator()(T
* ptr
) const noexcept
-> uint64_t {
300 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
301 return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr
));
306 struct hash
<std::unique_ptr
<T
>> {
307 using is_avalanching
= void;
308 auto operator()(std::unique_ptr
<T
> const& ptr
) const noexcept
-> uint64_t {
309 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
310 return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr
.get()));
315 struct hash
<std::shared_ptr
<T
>> {
316 using is_avalanching
= void;
317 auto operator()(std::shared_ptr
<T
> const& ptr
) const noexcept
-> uint64_t {
318 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
319 return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr
.get()));
323 template <typename Enum
>
324 struct hash
<Enum
, typename
std::enable_if
<std::is_enum
<Enum
>::value
>::type
> {
325 using is_avalanching
= void;
326 auto operator()(Enum e
) const noexcept
-> uint64_t {
327 using underlying
= typename
std::underlying_type_t
<Enum
>;
328 return detail::wyhash::hash(static_cast<underlying
>(e
));
332 template <typename
... Args
>
333 struct tuple_hash_helper
{
334 // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest.
335 // If it isn't an integral we need to hash it.
336 template <typename Arg
>
337 [[nodiscard
]] constexpr static auto to64(Arg
const& arg
) -> uint64_t {
338 if constexpr (std::is_integral_v
<Arg
> || std::is_enum_v
<Arg
>) {
339 return static_cast<uint64_t>(arg
);
341 return hash
<Arg
>{}(arg
);
345 [[nodiscard
]] static auto mix64(uint64_t state
, uint64_t v
) -> uint64_t {
346 return detail::wyhash::mix(state
+ v
, uint64_t{0x9ddfea08eb382d69});
349 // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If
350 // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized
351 // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer.
352 template <typename T
, std::size_t... Idx
>
353 [[nodiscard
]] static auto calc_hash(T
const& t
, std::index_sequence
<Idx
...>) noexcept
-> uint64_t {
355 ((h
= mix64(h
, to64(std::get
<Idx
>(t
)))), ...);
360 template <typename
... Args
>
361 struct hash
<std::tuple
<Args
...>> : tuple_hash_helper
<Args
...> {
362 using is_avalanching
= void;
363 auto operator()(std::tuple
<Args
...> const& t
) const noexcept
-> uint64_t {
364 return tuple_hash_helper
<Args
...>::calc_hash(t
, std::index_sequence_for
<Args
...>{});
368 template <typename A
, typename B
>
369 struct hash
<std::pair
<A
, B
>> : tuple_hash_helper
<A
, B
> {
370 using is_avalanching
= void;
371 auto operator()(std::pair
<A
, B
> const& t
) const noexcept
-> uint64_t {
372 return tuple_hash_helper
<A
, B
>::calc_hash(t
, std::index_sequence_for
<A
, B
>{});
376 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
377 # define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \
380 using is_avalanching = void; \
381 auto operator()(T const& obj) const noexcept -> uint64_t { \
382 return detail::wyhash::hash(static_cast<uint64_t>(obj)); \
386 # if defined(__GNUC__) && !defined(__clang__)
387 # pragma GCC diagnostic push
388 # pragma GCC diagnostic ignored "-Wuseless-cast"
390 // see https://en.cppreference.com/w/cpp/utility/hash
391 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool);
392 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char);
393 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char);
394 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char);
395 # if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L && defined(__cpp_char8_t)
396 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t
);
398 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t
);
399 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t
);
400 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t);
401 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short);
402 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short);
403 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int);
404 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int);
405 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long);
406 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long);
407 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long);
408 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
410 # if defined(__GNUC__) && !defined(__clang__)
411 # pragma GCC diagnostic pop
414 // bucket_type //////////////////////////////////////////////////////////
416 namespace bucket_type
{
419 static constexpr uint32_t dist_inc
= 1U << 8U; // skip 1 byte fingerprint
420 static constexpr uint32_t fingerprint_mask
= dist_inc
- 1; // mask for 1 byte of fingerprint
422 uint32_t m_dist_and_fingerprint
; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
423 uint32_t m_value_idx
; // index into the m_values vector.
426 ANKERL_UNORDERED_DENSE_PACK(struct big
{
427 static constexpr uint32_t dist_inc
= 1U << 8U; // skip 1 byte fingerprint
428 static constexpr uint32_t fingerprint_mask
= dist_inc
- 1; // mask for 1 byte of fingerprint
430 uint32_t m_dist_and_fingerprint
; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
431 size_t m_value_idx
; // index into the m_values vector.
434 } // namespace bucket_type
440 template <class Default
, class AlwaysVoid
, template <class...> class Op
, class... Args
>
442 using value_t
= std::false_type
;
443 using type
= Default
;
446 template <class Default
, template <class...> class Op
, class... Args
>
447 struct detector
<Default
, std::void_t
<Op
<Args
...>>, Op
, Args
...> {
448 using value_t
= std::true_type
;
449 using type
= Op
<Args
...>;
452 template <template <class...> class Op
, class... Args
>
453 using is_detected
= typename
detail::detector
<detail::nonesuch
, void, Op
, Args
...>::value_t
;
455 template <template <class...> class Op
, class... Args
>
456 constexpr bool is_detected_v
= is_detected
<Op
, Args
...>::value
;
458 template <typename T
>
459 using detect_avalanching
= typename
T::is_avalanching
;
461 template <typename T
>
462 using detect_is_transparent
= typename
T::is_transparent
;
464 template <typename T
>
465 using detect_iterator
= typename
T::iterator
;
467 template <typename T
>
468 using detect_reserve
= decltype(std::declval
<T
&>().reserve(size_t{}));
472 template <typename Mapped
>
473 constexpr bool is_map_v
= !std::is_void_v
<Mapped
>;
476 template <typename Hash
, typename KeyEqual
>
477 constexpr bool is_transparent_v
= is_detected_v
<detect_is_transparent
, Hash
> && is_detected_v
<detect_is_transparent
, KeyEqual
>;
480 template <typename From
, typename To1
, typename To2
>
481 constexpr bool is_neither_convertible_v
= !std::is_convertible_v
<From
, To1
> && !std::is_convertible_v
<From
, To2
>;
483 template <typename T
>
484 constexpr bool has_reserve
= is_detected_v
<detect_reserve
, T
>;
486 // base type for map has mapped_type
488 struct base_table_type_map
{
489 using mapped_type
= T
;
492 // base type for set doesn't have mapped_type
493 struct base_table_type_set
{};
495 } // namespace detail
497 // Very much like std::deque, but faster for indexing (in most cases). As of now this doesn't implement the full std::vector
498 // API, but merely what's necessary to work as an underlying container for ankerl::unordered_dense::{map, set}.
499 // It allocates blocks of equal size and puts them into the m_blocks vector. That means it can grow simply by adding a new
500 // block to the back of m_blocks, and doesn't double its size like an std::vector. The disadvantage is that memory is not
501 // linear and thus there is one more indirection necessary for indexing.
502 template <typename T
, typename Allocator
= std::allocator
<T
>, size_t MaxSegmentSizeBytes
= 4096>
503 class segmented_vector
{
504 template <bool IsConst
>
508 using allocator_type
= Allocator
;
509 using pointer
= typename
std::allocator_traits
<allocator_type
>::pointer
;
510 using const_pointer
= typename
std::allocator_traits
<allocator_type
>::const_pointer
;
511 using difference_type
= typename
std::allocator_traits
<allocator_type
>::difference_type
;
512 using value_type
= T
;
513 using size_type
= std::size_t;
514 using reference
= T
&;
515 using const_reference
= T
const&;
516 using iterator
= iter_t
<false>;
517 using const_iterator
= iter_t
<true>;
520 using vec_alloc
= typename
std::allocator_traits
<Allocator
>::template rebind_alloc
<pointer
>;
521 std::vector
<pointer
, vec_alloc
> m_blocks
{};
524 // Calculates the maximum number for x in (s << x) <= max_val
525 static constexpr auto num_bits_closest(size_t max_val
, size_t s
) -> size_t {
527 while (s
<< (f
+ 1) <= max_val
) {
533 using self_t
= segmented_vector
<T
, Allocator
, MaxSegmentSizeBytes
>;
534 static constexpr auto num_bits
= num_bits_closest(MaxSegmentSizeBytes
, sizeof(T
));
535 static constexpr auto num_elements_in_block
= 1U << num_bits
;
536 static constexpr auto mask
= num_elements_in_block
- 1U;
539 * Iterator class doubles as const_iterator and iterator
541 template <bool IsConst
>
543 using ptr_t
= typename
std::conditional_t
<IsConst
, segmented_vector::const_pointer
const*, segmented_vector::pointer
*>;
551 using difference_type
= segmented_vector::difference_type
;
552 using value_type
= T
;
553 using reference
= typename
std::conditional_t
<IsConst
, value_type
const&, value_type
&>;
554 using pointer
= typename
std::conditional_t
<IsConst
, segmented_vector::const_pointer
, segmented_vector::pointer
>;
555 using iterator_category
= std::forward_iterator_tag
;
557 iter_t() noexcept
= default;
559 template <bool OtherIsConst
, typename
= typename
std::enable_if
<IsConst
&& !OtherIsConst
>::type
>
560 // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
561 constexpr iter_t(iter_t
<OtherIsConst
> const& other
) noexcept
562 : m_data(other
.m_data
)
563 , m_idx(other
.m_idx
) {}
565 constexpr iter_t(ptr_t data
, size_t idx
) noexcept
569 template <bool OtherIsConst
, typename
= typename
std::enable_if
<IsConst
&& !OtherIsConst
>::type
>
570 constexpr auto operator=(iter_t
<OtherIsConst
> const& other
) noexcept
-> iter_t
& {
571 m_data
= other
.m_data
;
576 constexpr auto operator++() noexcept
-> iter_t
& {
581 constexpr auto operator+(difference_type diff
) noexcept
-> iter_t
{
582 return {m_data
, static_cast<size_t>(static_cast<difference_type
>(m_idx
) + diff
)};
585 template <bool OtherIsConst
>
586 constexpr auto operator-(iter_t
<OtherIsConst
> const& other
) noexcept
-> difference_type
{
587 return static_cast<difference_type
>(m_idx
) - static_cast<difference_type
>(other
.m_idx
);
590 constexpr auto operator*() const noexcept
-> reference
{
591 return m_data
[m_idx
>> num_bits
][m_idx
& mask
];
594 constexpr auto operator->() const noexcept
-> pointer
{
595 return &m_data
[m_idx
>> num_bits
][m_idx
& mask
];
599 constexpr auto operator==(iter_t
<O
> const& o
) const noexcept
-> bool {
600 return m_idx
== o
.m_idx
;
604 constexpr auto operator!=(iter_t
<O
> const& o
) const noexcept
-> bool {
605 return !(*this == o
);
609 // slow path: need to allocate a new segment every once in a while
610 void increase_capacity() {
611 auto ba
= Allocator(m_blocks
.get_allocator());
612 pointer block
= std::allocator_traits
<Allocator
>::allocate(ba
, num_elements_in_block
);
613 m_blocks
.push_back(block
);
616 // Moves everything from other
617 void append_everything_from(segmented_vector
&& other
) {
618 reserve(size() + other
.size());
619 for (auto&& o
: other
) {
620 emplace_back(std::move(o
));
624 // Copies everything from other
625 void append_everything_from(segmented_vector
const& other
) {
626 reserve(size() + other
.size());
627 for (auto const& o
: other
) {
633 auto ba
= Allocator(m_blocks
.get_allocator());
634 for (auto ptr
: m_blocks
) {
635 std::allocator_traits
<Allocator
>::deallocate(ba
, ptr
, num_elements_in_block
);
639 [[nodiscard
]] static constexpr auto calc_num_blocks_for_capacity(size_t capacity
) {
640 return (capacity
+ num_elements_in_block
- 1U) / num_elements_in_block
;
644 segmented_vector() = default;
646 // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
647 segmented_vector(Allocator alloc
)
648 : m_blocks(vec_alloc(alloc
)) {}
650 segmented_vector(segmented_vector
&& other
, Allocator alloc
)
651 : segmented_vector(alloc
) {
652 *this = std::move(other
);
655 segmented_vector(segmented_vector
const& other
, Allocator alloc
)
656 : m_blocks(vec_alloc(alloc
)) {
657 append_everything_from(other
);
660 segmented_vector(segmented_vector
&& other
) noexcept
661 : segmented_vector(std::move(other
), get_allocator()) {}
663 segmented_vector(segmented_vector
const& other
) {
664 append_everything_from(other
);
667 auto operator=(segmented_vector
const& other
) -> segmented_vector
& {
668 if (this == &other
) {
672 append_everything_from(other
);
676 auto operator=(segmented_vector
&& other
) noexcept
-> segmented_vector
& {
679 if (other
.get_allocator() == get_allocator()) {
680 m_blocks
= std::move(other
.m_blocks
);
681 m_size
= std::exchange(other
.m_size
, {});
683 // make sure to construct with other's allocator!
684 m_blocks
= std::vector
<pointer
, vec_alloc
>(vec_alloc(other
.get_allocator()));
685 append_everything_from(std::move(other
));
690 ~segmented_vector() {
695 [[nodiscard
]] constexpr auto size() const -> size_t {
699 [[nodiscard
]] constexpr auto capacity() const -> size_t {
700 return m_blocks
.size() * num_elements_in_block
;
703 // Indexing is highly performance critical
704 [[nodiscard
]] constexpr auto operator[](size_t i
) const noexcept
-> T
const& {
705 return m_blocks
[i
>> num_bits
][i
& mask
];
708 [[nodiscard
]] constexpr auto operator[](size_t i
) noexcept
-> T
& {
709 return m_blocks
[i
>> num_bits
][i
& mask
];
712 [[nodiscard
]] constexpr auto begin() -> iterator
{
713 return {m_blocks
.data(), 0U};
715 [[nodiscard
]] constexpr auto begin() const -> const_iterator
{
716 return {m_blocks
.data(), 0U};
718 [[nodiscard
]] constexpr auto cbegin() const -> const_iterator
{
719 return {m_blocks
.data(), 0U};
722 [[nodiscard
]] constexpr auto end() -> iterator
{
723 return {m_blocks
.data(), m_size
};
725 [[nodiscard
]] constexpr auto end() const -> const_iterator
{
726 return {m_blocks
.data(), m_size
};
728 [[nodiscard
]] constexpr auto cend() const -> const_iterator
{
729 return {m_blocks
.data(), m_size
};
732 [[nodiscard
]] constexpr auto back() -> reference
{
733 return operator[](m_size
- 1);
735 [[nodiscard
]] constexpr auto back() const -> const_reference
{
736 return operator[](m_size
- 1);
744 [[nodiscard
]] auto empty() const {
748 void reserve(size_t new_capacity
) {
749 m_blocks
.reserve(calc_num_blocks_for_capacity(new_capacity
));
750 while (new_capacity
> capacity()) {
755 [[nodiscard
]] auto get_allocator() const -> allocator_type
{
756 return allocator_type
{m_blocks
.get_allocator()};
759 template <class... Args
>
760 auto emplace_back(Args
&&... args
) -> reference
{
761 if (m_size
== capacity()) {
764 auto* ptr
= static_cast<void*>(&operator[](m_size
));
765 auto& ref
= *new (ptr
) T(std::forward
<Args
>(args
)...);
771 if constexpr (!std::is_trivially_destructible_v
<T
>) {
772 for (size_t i
= 0, s
= size(); i
< s
; ++i
) {
779 void shrink_to_fit() {
780 auto ba
= Allocator(m_blocks
.get_allocator());
781 auto num_blocks_required
= calc_num_blocks_for_capacity(m_size
);
782 while (m_blocks
.size() > num_blocks_required
) {
783 std::allocator_traits
<Allocator
>::deallocate(ba
, m_blocks
.back(), num_elements_in_block
);
786 m_blocks
.shrink_to_fit();
792 // This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set.
794 class T
, // when void, treat it as a set.
797 class AllocatorOrContainer
,
800 class table
: public std::conditional_t
<is_map_v
<T
>, base_table_type_map
<T
>, base_table_type_set
> {
801 using underlying_value_type
= typename
std::conditional_t
<is_map_v
<T
>, std::pair
<Key
, T
>, Key
>;
802 using underlying_container_type
= std::conditional_t
<IsSegmented
,
803 segmented_vector
<underlying_value_type
, AllocatorOrContainer
>,
804 std::vector
<underlying_value_type
, AllocatorOrContainer
>>;
807 using value_container_type
= std::
808 conditional_t
<is_detected_v
<detect_iterator
, AllocatorOrContainer
>, AllocatorOrContainer
, underlying_container_type
>;
812 typename
std::allocator_traits
<typename
value_container_type::allocator_type
>::template rebind_alloc
<Bucket
>;
813 using bucket_alloc_traits
= std::allocator_traits
<bucket_alloc
>;
815 static constexpr uint8_t initial_shifts
= 64 - 2; // 2^(64-m_shift) number of buckets
816 static constexpr float default_max_load_factor
= 0.8F
;
819 using key_type
= Key
;
820 using value_type
= typename
value_container_type::value_type
;
821 using size_type
= typename
value_container_type::size_type
;
822 using difference_type
= typename
value_container_type::difference_type
;
824 using key_equal
= KeyEqual
;
825 using allocator_type
= typename
value_container_type::allocator_type
;
826 using reference
= typename
value_container_type::reference
;
827 using const_reference
= typename
value_container_type::const_reference
;
828 using pointer
= typename
value_container_type::pointer
;
829 using const_pointer
= typename
value_container_type::const_pointer
;
830 using const_iterator
= typename
value_container_type::const_iterator
;
831 using iterator
= std::conditional_t
<is_map_v
<T
>, typename
value_container_type::iterator
, const_iterator
>;
832 using bucket_type
= Bucket
;
835 using value_idx_type
= decltype(Bucket::m_value_idx
);
836 using dist_and_fingerprint_type
= decltype(Bucket::m_dist_and_fingerprint
);
838 static_assert(std::is_trivially_destructible_v
<Bucket
>, "assert there's no need to call destructor / std::destroy");
839 static_assert(std::is_trivially_copyable_v
<Bucket
>, "assert we can just memset / memcpy");
841 value_container_type m_values
{}; // Contains all the key-value pairs in one densely stored container. No holes.
842 using bucket_pointer
= typename
std::allocator_traits
<bucket_alloc
>::pointer
;
843 bucket_pointer m_buckets
{};
844 size_t m_num_buckets
= 0;
845 size_t m_max_bucket_capacity
= 0;
846 float m_max_load_factor
= default_max_load_factor
;
849 uint8_t m_shifts
= initial_shifts
;
851 [[nodiscard
]] auto next(value_idx_type bucket_idx
) const -> value_idx_type
{
852 return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx
+ 1U == m_num_buckets
)
854 : static_cast<value_idx_type
>(bucket_idx
+ 1U);
857 // Helper to access bucket through pointer types
858 [[nodiscard
]] static constexpr auto at(bucket_pointer bucket_ptr
, size_t offset
) -> Bucket
& {
859 return *(bucket_ptr
+ static_cast<typename
std::allocator_traits
<bucket_alloc
>::difference_type
>(offset
));
862 // use the dist_inc and dist_dec functions so that uint16_t types work without warning
863 [[nodiscard
]] static constexpr auto dist_inc(dist_and_fingerprint_type x
) -> dist_and_fingerprint_type
{
864 return static_cast<dist_and_fingerprint_type
>(x
+ Bucket::dist_inc
);
867 [[nodiscard
]] static constexpr auto dist_dec(dist_and_fingerprint_type x
) -> dist_and_fingerprint_type
{
868 return static_cast<dist_and_fingerprint_type
>(x
- Bucket::dist_inc
);
871 // The goal of mixed_hash is to always produce a high quality 64bit hash.
872 template <typename K
>
873 [[nodiscard
]] constexpr auto mixed_hash(K
const& key
) const -> uint64_t {
874 if constexpr (is_detected_v
<detect_avalanching
, Hash
>) {
875 // we know that the hash is good because is_avalanching.
876 if constexpr (sizeof(decltype(m_hash(key
))) < sizeof(uint64_t)) {
877 // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
878 return m_hash(key
) * UINT64_C(0x9ddfea08eb382d69);
880 // 64bit and is_avalanching => only use the hash itself.
884 // not is_avalanching => apply wyhash
885 return wyhash::hash(m_hash(key
));
889 [[nodiscard
]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash
) const -> dist_and_fingerprint_type
{
890 return Bucket::dist_inc
| (static_cast<dist_and_fingerprint_type
>(hash
) & Bucket::fingerprint_mask
);
893 [[nodiscard
]] constexpr auto bucket_idx_from_hash(uint64_t hash
) const -> value_idx_type
{
894 return static_cast<value_idx_type
>(hash
>> m_shifts
);
897 [[nodiscard
]] static constexpr auto get_key(value_type
const& vt
) -> key_type
const& {
898 if constexpr (is_map_v
<T
>) {
905 template <typename K
>
906 [[nodiscard
]] auto next_while_less(K
const& key
) const -> Bucket
{
907 auto hash
= mixed_hash(key
);
908 auto dist_and_fingerprint
= dist_and_fingerprint_from_hash(hash
);
909 auto bucket_idx
= bucket_idx_from_hash(hash
);
911 while (dist_and_fingerprint
< at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
) {
912 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
913 bucket_idx
= next(bucket_idx
);
915 return {dist_and_fingerprint
, bucket_idx
};
918 void place_and_shift_up(Bucket bucket
, value_idx_type place
) {
919 while (0 != at(m_buckets
, place
).m_dist_and_fingerprint
) {
920 bucket
= std::exchange(at(m_buckets
, place
), bucket
);
921 bucket
.m_dist_and_fingerprint
= dist_inc(bucket
.m_dist_and_fingerprint
);
924 at(m_buckets
, place
) = bucket
;
927 [[nodiscard
]] static constexpr auto calc_num_buckets(uint8_t shifts
) -> size_t {
928 return (std::min
)(max_bucket_count(), size_t{1} << (64U - shifts
));
931 [[nodiscard
]] constexpr auto calc_shifts_for_size(size_t s
) const -> uint8_t {
932 auto shifts
= initial_shifts
;
933 while (shifts
> 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts
)) * max_load_factor()) < s
) {
939 // assumes m_values has data, m_buckets=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS
940 void copy_buckets(table
const& other
) {
941 // assumes m_values has already the correct data copied over.
943 // when empty, at least allocate an initial buckets and clear them.
944 allocate_buckets_from_shift();
947 m_shifts
= other
.m_shifts
;
948 allocate_buckets_from_shift();
949 std::memcpy(m_buckets
, other
.m_buckets
, sizeof(Bucket
) * bucket_count());
954 * True when no element can be added any more without increasing the size
956 [[nodiscard
]] auto is_full() const -> bool {
957 return size() > m_max_bucket_capacity
;
960 void deallocate_buckets() {
961 auto ba
= bucket_alloc(m_values
.get_allocator());
962 if (nullptr != m_buckets
) {
963 bucket_alloc_traits::deallocate(ba
, m_buckets
, bucket_count());
967 m_max_bucket_capacity
= 0;
970 void allocate_buckets_from_shift() {
971 auto ba
= bucket_alloc(m_values
.get_allocator());
972 m_num_buckets
= calc_num_buckets(m_shifts
);
973 m_buckets
= bucket_alloc_traits::allocate(ba
, m_num_buckets
);
974 if (m_num_buckets
== max_bucket_count()) {
975 // reached the maximum, make sure we can use each bucket
976 m_max_bucket_capacity
= max_bucket_count();
978 m_max_bucket_capacity
= static_cast<value_idx_type
>(static_cast<float>(m_num_buckets
) * max_load_factor());
982 void clear_buckets() {
983 if (m_buckets
!= nullptr) {
984 std::memset(&*m_buckets
, 0, sizeof(Bucket
) * bucket_count());
988 void clear_and_fill_buckets_from_values() {
990 for (value_idx_type value_idx
= 0, end_idx
= static_cast<value_idx_type
>(m_values
.size()); value_idx
< end_idx
;
992 auto const& key
= get_key(m_values
[value_idx
]);
993 auto [dist_and_fingerprint
, bucket
] = next_while_less(key
);
995 // we know for certain that key has not yet been inserted, so no need to check it.
996 place_and_shift_up({dist_and_fingerprint
, value_idx
}, bucket
);
1000 void increase_size() {
1001 if (m_max_bucket_capacity
== max_bucket_count()) {
1002 // remove the value again, we can't add it!
1003 m_values
.pop_back();
1004 on_error_bucket_overflow();
1007 deallocate_buckets();
1008 allocate_buckets_from_shift();
1009 clear_and_fill_buckets_from_values();
1012 template <typename Op
>
1013 void do_erase(value_idx_type bucket_idx
, Op handle_erased_value
) {
1014 auto const value_idx_to_remove
= at(m_buckets
, bucket_idx
).m_value_idx
;
1016 // shift down until either empty or an element with correct spot is found
1017 auto next_bucket_idx
= next(bucket_idx
);
1018 while (at(m_buckets
, next_bucket_idx
).m_dist_and_fingerprint
>= Bucket::dist_inc
* 2) {
1019 at(m_buckets
, bucket_idx
) = {dist_dec(at(m_buckets
, next_bucket_idx
).m_dist_and_fingerprint
),
1020 at(m_buckets
, next_bucket_idx
).m_value_idx
};
1021 bucket_idx
= std::exchange(next_bucket_idx
, next(next_bucket_idx
));
1023 at(m_buckets
, bucket_idx
) = {};
1024 handle_erased_value(std::move(m_values
[value_idx_to_remove
]));
1027 if (value_idx_to_remove
!= m_values
.size() - 1) {
1028 // no luck, we'll have to replace the value with the last one and update the index accordingly
1029 auto& val
= m_values
[value_idx_to_remove
];
1030 val
= std::move(m_values
.back());
1032 // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
1033 auto mh
= mixed_hash(get_key(val
));
1034 bucket_idx
= bucket_idx_from_hash(mh
);
1036 auto const values_idx_back
= static_cast<value_idx_type
>(m_values
.size() - 1);
1037 while (values_idx_back
!= at(m_buckets
, bucket_idx
).m_value_idx
) {
1038 bucket_idx
= next(bucket_idx
);
1040 at(m_buckets
, bucket_idx
).m_value_idx
= value_idx_to_remove
;
1042 m_values
.pop_back();
1045 template <typename K
, typename Op
>
1046 auto do_erase_key(K
&& key
, Op handle_erased_value
) -> size_t {
1051 auto [dist_and_fingerprint
, bucket_idx
] = next_while_less(key
);
1053 while (dist_and_fingerprint
== at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
&&
1054 !m_equal(key
, get_key(m_values
[at(m_buckets
, bucket_idx
).m_value_idx
]))) {
1055 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1056 bucket_idx
= next(bucket_idx
);
1059 if (dist_and_fingerprint
!= at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
) {
1062 do_erase(bucket_idx
, handle_erased_value
);
1066 template <class K
, class M
>
1067 auto do_insert_or_assign(K
&& key
, M
&& mapped
) -> std::pair
<iterator
, bool> {
1068 auto it_isinserted
= try_emplace(std::forward
<K
>(key
), std::forward
<M
>(mapped
));
1069 if (!it_isinserted
.second
) {
1070 it_isinserted
.first
->second
= std::forward
<M
>(mapped
);
1072 return it_isinserted
;
1075 template <typename
... Args
>
1076 auto do_place_element(dist_and_fingerprint_type dist_and_fingerprint
, value_idx_type bucket_idx
, Args
&&... args
)
1077 -> std::pair
<iterator
, bool> {
1079 // emplace the new value. If that throws an exception, no harm done; index is still in a valid state
1080 m_values
.emplace_back(std::forward
<Args
>(args
)...);
1082 auto value_idx
= static_cast<value_idx_type
>(m_values
.size() - 1);
1083 if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
1086 place_and_shift_up({dist_and_fingerprint
, value_idx
}, bucket_idx
);
1089 // place element and shift up until we find an empty spot
1090 return {begin() + static_cast<difference_type
>(value_idx
), true};
1093 template <typename K
, typename
... Args
>
1094 auto do_try_emplace(K
&& key
, Args
&&... args
) -> std::pair
<iterator
, bool> {
1095 auto hash
= mixed_hash(key
);
1096 auto dist_and_fingerprint
= dist_and_fingerprint_from_hash(hash
);
1097 auto bucket_idx
= bucket_idx_from_hash(hash
);
1100 auto* bucket
= &at(m_buckets
, bucket_idx
);
1101 if (dist_and_fingerprint
== bucket
->m_dist_and_fingerprint
) {
1102 if (m_equal(key
, get_key(m_values
[bucket
->m_value_idx
]))) {
1103 return {begin() + static_cast<difference_type
>(bucket
->m_value_idx
), false};
1105 } else if (dist_and_fingerprint
> bucket
->m_dist_and_fingerprint
) {
1106 return do_place_element(dist_and_fingerprint
,
1108 std::piecewise_construct
,
1109 std::forward_as_tuple(std::forward
<K
>(key
)),
1110 std::forward_as_tuple(std::forward
<Args
>(args
)...));
1112 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1113 bucket_idx
= next(bucket_idx
);
1117 template <typename K
>
1118 auto do_find(K
const& key
) -> iterator
{
1119 if (ANKERL_UNORDERED_DENSE_UNLIKELY(empty())) {
1123 auto mh
= mixed_hash(key
);
1124 auto dist_and_fingerprint
= dist_and_fingerprint_from_hash(mh
);
1125 auto bucket_idx
= bucket_idx_from_hash(mh
);
1126 auto* bucket
= &at(m_buckets
, bucket_idx
);
1128 // unrolled loop. *Always* check a few directly, then enter the loop. This is faster.
1129 if (dist_and_fingerprint
== bucket
->m_dist_and_fingerprint
&& m_equal(key
, get_key(m_values
[bucket
->m_value_idx
]))) {
1130 return begin() + static_cast<difference_type
>(bucket
->m_value_idx
);
1132 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1133 bucket_idx
= next(bucket_idx
);
1134 bucket
= &at(m_buckets
, bucket_idx
);
1136 if (dist_and_fingerprint
== bucket
->m_dist_and_fingerprint
&& m_equal(key
, get_key(m_values
[bucket
->m_value_idx
]))) {
1137 return begin() + static_cast<difference_type
>(bucket
->m_value_idx
);
1139 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1140 bucket_idx
= next(bucket_idx
);
1141 bucket
= &at(m_buckets
, bucket_idx
);
1144 if (dist_and_fingerprint
== bucket
->m_dist_and_fingerprint
) {
1145 if (m_equal(key
, get_key(m_values
[bucket
->m_value_idx
]))) {
1146 return begin() + static_cast<difference_type
>(bucket
->m_value_idx
);
1148 } else if (dist_and_fingerprint
> bucket
->m_dist_and_fingerprint
) {
1151 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1152 bucket_idx
= next(bucket_idx
);
1153 bucket
= &at(m_buckets
, bucket_idx
);
1157 template <typename K
>
1158 auto do_find(K
const& key
) const -> const_iterator
{
1159 return const_cast<table
*>(this)->do_find(key
); // NOLINT(cppcoreguidelines-pro-type-const-cast)
1162 template <typename K
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1163 auto do_at(K
const& key
) -> Q
& {
1164 if (auto it
= find(key
); ANKERL_UNORDERED_DENSE_LIKELY(end() != it
)) {
1167 on_error_key_not_found();
1170 template <typename K
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1171 auto do_at(K
const& key
) const -> Q
const& {
1172 return const_cast<table
*>(this)->at(key
); // NOLINT(cppcoreguidelines-pro-type-const-cast)
1176 explicit table(size_t bucket_count
,
1177 Hash
const& hash
= Hash(),
1178 KeyEqual
const& equal
= KeyEqual(),
1179 allocator_type
const& alloc_or_container
= allocator_type())
1180 : m_values(alloc_or_container
)
1183 if (0 != bucket_count
) {
1184 reserve(bucket_count
);
1186 allocate_buckets_from_shift();
1194 table(size_t bucket_count
, allocator_type
const& alloc
)
1195 : table(bucket_count
, Hash(), KeyEqual(), alloc
) {}
1197 table(size_t bucket_count
, Hash
const& hash
, allocator_type
const& alloc
)
1198 : table(bucket_count
, hash
, KeyEqual(), alloc
) {}
1200 explicit table(allocator_type
const& alloc
)
1201 : table(0, Hash(), KeyEqual(), alloc
) {}
1203 template <class InputIt
>
1204 table(InputIt first
,
1206 size_type bucket_count
= 0,
1207 Hash
const& hash
= Hash(),
1208 KeyEqual
const& equal
= KeyEqual(),
1209 allocator_type
const& alloc
= allocator_type())
1210 : table(bucket_count
, hash
, equal
, alloc
) {
1211 insert(first
, last
);
1214 template <class InputIt
>
1215 table(InputIt first
, InputIt last
, size_type bucket_count
, allocator_type
const& alloc
)
1216 : table(first
, last
, bucket_count
, Hash(), KeyEqual(), alloc
) {}
1218 template <class InputIt
>
1219 table(InputIt first
, InputIt last
, size_type bucket_count
, Hash
const& hash
, allocator_type
const& alloc
)
1220 : table(first
, last
, bucket_count
, hash
, KeyEqual(), alloc
) {}
1222 table(table
const& other
)
1223 : table(other
, other
.m_values
.get_allocator()) {}
1225 table(table
const& other
, allocator_type
const& alloc
)
1226 : m_values(other
.m_values
, alloc
)
1227 , m_max_load_factor(other
.m_max_load_factor
)
1228 , m_hash(other
.m_hash
)
1229 , m_equal(other
.m_equal
) {
1230 copy_buckets(other
);
1233 table(table
&& other
) noexcept
1234 : table(std::move(other
), other
.m_values
.get_allocator()) {}
1236 table(table
&& other
, allocator_type
const& alloc
) noexcept
1238 *this = std::move(other
);
1241 table(std::initializer_list
<value_type
> ilist
,
1242 size_t bucket_count
= 0,
1243 Hash
const& hash
= Hash(),
1244 KeyEqual
const& equal
= KeyEqual(),
1245 allocator_type
const& alloc
= allocator_type())
1246 : table(bucket_count
, hash
, equal
, alloc
) {
1250 table(std::initializer_list
<value_type
> ilist
, size_type bucket_count
, allocator_type
const& alloc
)
1251 : table(ilist
, bucket_count
, Hash(), KeyEqual(), alloc
) {}
1253 table(std::initializer_list
<value_type
> init
, size_type bucket_count
, Hash
const& hash
, allocator_type
const& alloc
)
1254 : table(init
, bucket_count
, hash
, KeyEqual(), alloc
) {}
1257 if (nullptr != m_buckets
) {
1258 auto ba
= bucket_alloc(m_values
.get_allocator());
1259 bucket_alloc_traits::deallocate(ba
, m_buckets
, bucket_count());
1263 auto operator=(table
const& other
) -> table
& {
1264 if (&other
!= this) {
1265 deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
1266 m_values
= other
.m_values
;
1267 m_max_load_factor
= other
.m_max_load_factor
;
1268 m_hash
= other
.m_hash
;
1269 m_equal
= other
.m_equal
;
1270 m_shifts
= initial_shifts
;
1271 copy_buckets(other
);
1276 auto operator=(table
&& other
) noexcept(noexcept(std::is_nothrow_move_assignable_v
<value_container_type
> &&
1277 std::is_nothrow_move_assignable_v
<Hash
> &&
1278 std::is_nothrow_move_assignable_v
<KeyEqual
>)) -> table
& {
1279 if (&other
!= this) {
1280 deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
1281 m_values
= std::move(other
.m_values
);
1282 other
.m_values
.clear();
1284 // we can only reuse m_buckets when both maps have the same allocator!
1285 if (get_allocator() == other
.get_allocator()) {
1286 m_buckets
= std::exchange(other
.m_buckets
, nullptr);
1287 m_num_buckets
= std::exchange(other
.m_num_buckets
, 0);
1288 m_max_bucket_capacity
= std::exchange(other
.m_max_bucket_capacity
, 0);
1289 m_shifts
= std::exchange(other
.m_shifts
, initial_shifts
);
1290 m_max_load_factor
= std::exchange(other
.m_max_load_factor
, default_max_load_factor
);
1291 m_hash
= std::exchange(other
.m_hash
, {});
1292 m_equal
= std::exchange(other
.m_equal
, {});
1293 other
.allocate_buckets_from_shift();
1294 other
.clear_buckets();
1296 // set max_load_factor *before* copying the other's buckets, so we have the same
1298 m_max_load_factor
= other
.m_max_load_factor
;
1300 // copy_buckets sets m_buckets, m_num_buckets, m_max_bucket_capacity, m_shifts
1301 copy_buckets(other
);
1302 // clear's the other's buckets so other is now already usable.
1303 other
.clear_buckets();
1304 m_hash
= other
.m_hash
;
1305 m_equal
= other
.m_equal
;
1307 // map "other" is now already usable, it's empty.
1312 auto operator=(std::initializer_list
<value_type
> ilist
) -> table
& {
1318 auto get_allocator() const noexcept
-> allocator_type
{
1319 return m_values
.get_allocator();
1322 // iterators //////////////////////////////////////////////////////////////
1324 auto begin() noexcept
-> iterator
{
1325 return m_values
.begin();
1328 auto begin() const noexcept
-> const_iterator
{
1329 return m_values
.begin();
1332 auto cbegin() const noexcept
-> const_iterator
{
1333 return m_values
.cbegin();
1336 auto end() noexcept
-> iterator
{
1337 return m_values
.end();
1340 auto cend() const noexcept
-> const_iterator
{
1341 return m_values
.cend();
1344 auto end() const noexcept
-> const_iterator
{
1345 return m_values
.end();
1348 // capacity ///////////////////////////////////////////////////////////////
1350 [[nodiscard
]] auto empty() const noexcept
-> bool {
1351 return m_values
.empty();
1354 [[nodiscard
]] auto size() const noexcept
-> size_t {
1355 return m_values
.size();
1358 [[nodiscard
]] static constexpr auto max_size() noexcept
-> size_t {
1359 if constexpr ((std::numeric_limits
<value_idx_type
>::max
)() == (std::numeric_limits
<size_t>::max
)()) {
1360 return size_t{1} << (sizeof(value_idx_type
) * 8 - 1);
1362 return size_t{1} << (sizeof(value_idx_type
) * 8);
1366 // modifiers //////////////////////////////////////////////////////////////
1373 auto insert(value_type
const& value
) -> std::pair
<iterator
, bool> {
1374 return emplace(value
);
1377 auto insert(value_type
&& value
) -> std::pair
<iterator
, bool> {
1378 return emplace(std::move(value
));
1381 template <class P
, std::enable_if_t
<std::is_constructible_v
<value_type
, P
&&>, bool> = true>
1382 auto insert(P
&& value
) -> std::pair
<iterator
, bool> {
1383 return emplace(std::forward
<P
>(value
));
1386 auto insert(const_iterator
/*hint*/, value_type
const& value
) -> iterator
{
1387 return insert(value
).first
;
1390 auto insert(const_iterator
/*hint*/, value_type
&& value
) -> iterator
{
1391 return insert(std::move(value
)).first
;
1394 template <class P
, std::enable_if_t
<std::is_constructible_v
<value_type
, P
&&>, bool> = true>
1395 auto insert(const_iterator
/*hint*/, P
&& value
) -> iterator
{
1396 return insert(std::forward
<P
>(value
)).first
;
1399 template <class InputIt
>
1400 void insert(InputIt first
, InputIt last
) {
1401 while (first
!= last
) {
1407 void insert(std::initializer_list
<value_type
> ilist
) {
1408 insert(ilist
.begin(), ilist
.end());
1411 // nonstandard API: *this is emptied.
1412 // Also see "A Standard flat_map" https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p0429r9.pdf
1413 auto extract() && -> value_container_type
{
1414 return std::move(m_values
);
1418 // Discards the internally held container and replaces it with the one passed. Erases non-unique elements.
1419 auto replace(value_container_type
&& container
) {
1420 if (ANKERL_UNORDERED_DENSE_UNLIKELY(container
.size() > max_size())) {
1421 on_error_too_many_elements();
1423 auto shifts
= calc_shifts_for_size(container
.size());
1424 if (0 == m_num_buckets
|| shifts
< m_shifts
|| container
.get_allocator() != m_values
.get_allocator()) {
1426 deallocate_buckets();
1427 allocate_buckets_from_shift();
1431 m_values
= std::move(container
);
1433 // can't use clear_and_fill_buckets_from_values() because container elements might not be unique
1434 auto value_idx
= value_idx_type
{};
1436 // loop until we reach the end of the container. duplicated entries will be replaced with back().
1437 while (value_idx
!= static_cast<value_idx_type
>(m_values
.size())) {
1438 auto const& key
= get_key(m_values
[value_idx
]);
1440 auto hash
= mixed_hash(key
);
1441 auto dist_and_fingerprint
= dist_and_fingerprint_from_hash(hash
);
1442 auto bucket_idx
= bucket_idx_from_hash(hash
);
1444 bool key_found
= false;
1446 auto const& bucket
= at(m_buckets
, bucket_idx
);
1447 if (dist_and_fingerprint
> bucket
.m_dist_and_fingerprint
) {
1450 if (dist_and_fingerprint
== bucket
.m_dist_and_fingerprint
&&
1451 m_equal(key
, get_key(m_values
[bucket
.m_value_idx
]))) {
1455 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1456 bucket_idx
= next(bucket_idx
);
1460 if (value_idx
!= static_cast<value_idx_type
>(m_values
.size() - 1)) {
1461 m_values
[value_idx
] = std::move(m_values
.back());
1463 m_values
.pop_back();
1465 place_and_shift_up({dist_and_fingerprint
, value_idx
}, bucket_idx
);
1471 template <class M
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1472 auto insert_or_assign(Key
const& key
, M
&& mapped
) -> std::pair
<iterator
, bool> {
1473 return do_insert_or_assign(key
, std::forward
<M
>(mapped
));
1476 template <class M
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1477 auto insert_or_assign(Key
&& key
, M
&& mapped
) -> std::pair
<iterator
, bool> {
1478 return do_insert_or_assign(std::move(key
), std::forward
<M
>(mapped
));
1481 template <typename K
,
1485 typename KE
= KeyEqual
,
1486 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
>, bool> = true>
1487 auto insert_or_assign(K
&& key
, M
&& mapped
) -> std::pair
<iterator
, bool> {
1488 return do_insert_or_assign(std::forward
<K
>(key
), std::forward
<M
>(mapped
));
1491 template <class M
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1492 auto insert_or_assign(const_iterator
/*hint*/, Key
const& key
, M
&& mapped
) -> iterator
{
1493 return do_insert_or_assign(key
, std::forward
<M
>(mapped
)).first
;
1496 template <class M
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1497 auto insert_or_assign(const_iterator
/*hint*/, Key
&& key
, M
&& mapped
) -> iterator
{
1498 return do_insert_or_assign(std::move(key
), std::forward
<M
>(mapped
)).first
;
1501 template <typename K
,
1505 typename KE
= KeyEqual
,
1506 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
>, bool> = true>
1507 auto insert_or_assign(const_iterator
/*hint*/, K
&& key
, M
&& mapped
) -> iterator
{
1508 return do_insert_or_assign(std::forward
<K
>(key
), std::forward
<M
>(mapped
)).first
;
1511 // Single arguments for unordered_set can be used without having to construct the value_type
1515 typename KE
= KeyEqual
,
1516 std::enable_if_t
<!is_map_v
<Q
> && is_transparent_v
<H
, KE
>, bool> = true>
1517 auto emplace(K
&& key
) -> std::pair
<iterator
, bool> {
1518 auto hash
= mixed_hash(key
);
1519 auto dist_and_fingerprint
= dist_and_fingerprint_from_hash(hash
);
1520 auto bucket_idx
= bucket_idx_from_hash(hash
);
1522 while (dist_and_fingerprint
<= at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
) {
1523 if (dist_and_fingerprint
== at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
&&
1524 m_equal(key
, m_values
[at(m_buckets
, bucket_idx
).m_value_idx
])) {
1525 // found it, return without ever actually creating anything
1526 return {begin() + static_cast<difference_type
>(at(m_buckets
, bucket_idx
).m_value_idx
), false};
1528 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1529 bucket_idx
= next(bucket_idx
);
1532 // value is new, insert element first, so when exception happens we are in a valid state
1533 return do_place_element(dist_and_fingerprint
, bucket_idx
, std::forward
<K
>(key
));
1536 template <class... Args
>
1537 auto emplace(Args
&&... args
) -> std::pair
<iterator
, bool> {
1538 // we have to instantiate the value_type to be able to access the key.
1539 // 1. emplace_back the object so it is constructed. 2. If the key is already there, pop it later in the loop.
1540 auto& key
= get_key(m_values
.emplace_back(std::forward
<Args
>(args
)...));
1541 auto hash
= mixed_hash(key
);
1542 auto dist_and_fingerprint
= dist_and_fingerprint_from_hash(hash
);
1543 auto bucket_idx
= bucket_idx_from_hash(hash
);
1545 while (dist_and_fingerprint
<= at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
) {
1546 if (dist_and_fingerprint
== at(m_buckets
, bucket_idx
).m_dist_and_fingerprint
&&
1547 m_equal(key
, get_key(m_values
[at(m_buckets
, bucket_idx
).m_value_idx
]))) {
1548 m_values
.pop_back(); // value was already there, so get rid of it
1549 return {begin() + static_cast<difference_type
>(at(m_buckets
, bucket_idx
).m_value_idx
), false};
1551 dist_and_fingerprint
= dist_inc(dist_and_fingerprint
);
1552 bucket_idx
= next(bucket_idx
);
1555 // value is new, place the bucket and shift up until we find an empty spot
1556 auto value_idx
= static_cast<value_idx_type
>(m_values
.size() - 1);
1557 if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
1558 // increase_size just rehashes all the data we have in m_values
1561 // place element and shift up until we find an empty spot
1562 place_and_shift_up({dist_and_fingerprint
, value_idx
}, bucket_idx
);
1564 return {begin() + static_cast<difference_type
>(value_idx
), true};
1567 template <class... Args
>
1568 auto emplace_hint(const_iterator
/*hint*/, Args
&&... args
) -> iterator
{
1569 return emplace(std::forward
<Args
>(args
)...).first
;
1572 template <class... Args
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1573 auto try_emplace(Key
const& key
, Args
&&... args
) -> std::pair
<iterator
, bool> {
1574 return do_try_emplace(key
, std::forward
<Args
>(args
)...);
1577 template <class... Args
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1578 auto try_emplace(Key
&& key
, Args
&&... args
) -> std::pair
<iterator
, bool> {
1579 return do_try_emplace(std::move(key
), std::forward
<Args
>(args
)...);
1582 template <class... Args
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1583 auto try_emplace(const_iterator
/*hint*/, Key
const& key
, Args
&&... args
) -> iterator
{
1584 return do_try_emplace(key
, std::forward
<Args
>(args
)...).first
;
1587 template <class... Args
, typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1588 auto try_emplace(const_iterator
/*hint*/, Key
&& key
, Args
&&... args
) -> iterator
{
1589 return do_try_emplace(std::move(key
), std::forward
<Args
>(args
)...).first
;
1597 typename KE
= KeyEqual
,
1598 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
> && is_neither_convertible_v
<K
&&, iterator
, const_iterator
>,
1600 auto try_emplace(K
&& key
, Args
&&... args
) -> std::pair
<iterator
, bool> {
1601 return do_try_emplace(std::forward
<K
>(key
), std::forward
<Args
>(args
)...);
1609 typename KE
= KeyEqual
,
1610 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
> && is_neither_convertible_v
<K
&&, iterator
, const_iterator
>,
1612 auto try_emplace(const_iterator
/*hint*/, K
&& key
, Args
&&... args
) -> iterator
{
1613 return do_try_emplace(std::forward
<K
>(key
), std::forward
<Args
>(args
)...).first
;
1616 auto erase(iterator it
) -> iterator
{
1617 auto hash
= mixed_hash(get_key(*it
));
1618 auto bucket_idx
= bucket_idx_from_hash(hash
);
1620 auto const value_idx_to_remove
= static_cast<value_idx_type
>(it
- cbegin());
1621 while (at(m_buckets
, bucket_idx
).m_value_idx
!= value_idx_to_remove
) {
1622 bucket_idx
= next(bucket_idx
);
1625 do_erase(bucket_idx
, [](value_type
&& /*unused*/) {
1627 return begin() + static_cast<difference_type
>(value_idx_to_remove
);
1630 auto extract(iterator it
) -> value_type
{
1631 auto hash
= mixed_hash(get_key(*it
));
1632 auto bucket_idx
= bucket_idx_from_hash(hash
);
1634 auto const value_idx_to_remove
= static_cast<value_idx_type
>(it
- cbegin());
1635 while (at(m_buckets
, bucket_idx
).m_value_idx
!= value_idx_to_remove
) {
1636 bucket_idx
= next(bucket_idx
);
1639 auto tmp
= std::optional
<value_type
>{};
1640 do_erase(bucket_idx
, [&tmp
](value_type
&& val
) {
1641 tmp
= std::move(val
);
1643 return std::move(tmp
).value();
1646 template <typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1647 auto erase(const_iterator it
) -> iterator
{
1648 return erase(begin() + (it
- cbegin()));
1651 template <typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1652 auto extract(const_iterator it
) -> value_type
{
1653 return extract(begin() + (it
- cbegin()));
1656 auto erase(const_iterator first
, const_iterator last
) -> iterator
{
1657 auto const idx_first
= first
- cbegin();
1658 auto const idx_last
= last
- cbegin();
1659 auto const first_to_last
= std::distance(first
, last
);
1660 auto const last_to_end
= std::distance(last
, cend());
1662 // remove elements from left to right which moves elements from the end back
1663 auto const mid
= idx_first
+ (std::min
)(first_to_last
, last_to_end
);
1664 auto idx
= idx_first
;
1665 while (idx
!= mid
) {
1666 erase(begin() + idx
);
1670 // all elements from the right are moved, now remove the last element until all done
1672 while (idx
!= mid
) {
1674 erase(begin() + idx
);
1677 return begin() + idx_first
;
1680 auto erase(Key
const& key
) -> size_t {
1681 return do_erase_key(key
, [](value_type
&& /*unused*/) {
1685 auto extract(Key
const& key
) -> std::optional
<value_type
> {
1686 auto tmp
= std::optional
<value_type
>{};
1687 do_erase_key(key
, [&tmp
](value_type
&& val
) {
1688 tmp
= std::move(val
);
1693 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1694 auto erase(K
&& key
) -> size_t {
1695 return do_erase_key(std::forward
<K
>(key
), [](value_type
&& /*unused*/) {
1699 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1700 auto extract(K
&& key
) -> std::optional
<value_type
> {
1701 auto tmp
= std::optional
<value_type
>{};
1702 do_erase_key(std::forward
<K
>(key
), [&tmp
](value_type
&& val
) {
1703 tmp
= std::move(val
);
1708 void swap(table
& other
) noexcept(noexcept(std::is_nothrow_swappable_v
<value_container_type
> &&
1709 std::is_nothrow_swappable_v
<Hash
> && std::is_nothrow_swappable_v
<KeyEqual
>)) {
1714 // lookup /////////////////////////////////////////////////////////////////
1716 template <typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1717 auto at(key_type
const& key
) -> Q
& {
1721 template <typename K
,
1724 typename KE
= KeyEqual
,
1725 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
>, bool> = true>
1726 auto at(K
const& key
) -> Q
& {
1730 template <typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1731 auto at(key_type
const& key
) const -> Q
const& {
1735 template <typename K
,
1738 typename KE
= KeyEqual
,
1739 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
>, bool> = true>
1740 auto at(K
const& key
) const -> Q
const& {
1744 template <typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1745 auto operator[](Key
const& key
) -> Q
& {
1746 return try_emplace(key
).first
->second
;
1749 template <typename Q
= T
, std::enable_if_t
<is_map_v
<Q
>, bool> = true>
1750 auto operator[](Key
&& key
) -> Q
& {
1751 return try_emplace(std::move(key
)).first
->second
;
1754 template <typename K
,
1757 typename KE
= KeyEqual
,
1758 std::enable_if_t
<is_map_v
<Q
> && is_transparent_v
<H
, KE
>, bool> = true>
1759 auto operator[](K
&& key
) -> Q
& {
1760 return try_emplace(std::forward
<K
>(key
)).first
->second
;
1763 auto count(Key
const& key
) const -> size_t {
1764 return find(key
) == end() ? 0 : 1;
1767 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1768 auto count(K
const& key
) const -> size_t {
1769 return find(key
) == end() ? 0 : 1;
1772 auto find(Key
const& key
) -> iterator
{
1773 return do_find(key
);
1776 auto find(Key
const& key
) const -> const_iterator
{
1777 return do_find(key
);
1780 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1781 auto find(K
const& key
) -> iterator
{
1782 return do_find(key
);
1785 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1786 auto find(K
const& key
) const -> const_iterator
{
1787 return do_find(key
);
1790 auto contains(Key
const& key
) const -> bool {
1791 return find(key
) != end();
1794 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1795 auto contains(K
const& key
) const -> bool {
1796 return find(key
) != end();
1799 auto equal_range(Key
const& key
) -> std::pair
<iterator
, iterator
> {
1800 auto it
= do_find(key
);
1801 return {it
, it
== end() ? end() : it
+ 1};
1804 auto equal_range(const Key
& key
) const -> std::pair
<const_iterator
, const_iterator
> {
1805 auto it
= do_find(key
);
1806 return {it
, it
== end() ? end() : it
+ 1};
1809 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1810 auto equal_range(K
const& key
) -> std::pair
<iterator
, iterator
> {
1811 auto it
= do_find(key
);
1812 return {it
, it
== end() ? end() : it
+ 1};
1815 template <class K
, class H
= Hash
, class KE
= KeyEqual
, std::enable_if_t
<is_transparent_v
<H
, KE
>, bool> = true>
1816 auto equal_range(K
const& key
) const -> std::pair
<const_iterator
, const_iterator
> {
1817 auto it
= do_find(key
);
1818 return {it
, it
== end() ? end() : it
+ 1};
1821 // bucket interface ///////////////////////////////////////////////////////
1823 auto bucket_count() const noexcept
-> size_t { // NOLINT(modernize-use-nodiscard)
1824 return m_num_buckets
;
1827 static constexpr auto max_bucket_count() noexcept
-> size_t { // NOLINT(modernize-use-nodiscard)
1831 // hash policy ////////////////////////////////////////////////////////////
1833 [[nodiscard
]] auto load_factor() const -> float {
1834 return bucket_count() ? static_cast<float>(size()) / static_cast<float>(bucket_count()) : 0.0F
;
1837 [[nodiscard
]] auto max_load_factor() const -> float {
1838 return m_max_load_factor
;
1841 void max_load_factor(float ml
) {
1842 m_max_load_factor
= ml
;
1843 if (m_num_buckets
!= max_bucket_count()) {
1844 m_max_bucket_capacity
= static_cast<value_idx_type
>(static_cast<float>(bucket_count()) * max_load_factor());
1848 void rehash(size_t count
) {
1849 count
= (std::min
)(count
, max_size());
1850 auto shifts
= calc_shifts_for_size((std::max
)(count
, size()));
1851 if (shifts
!= m_shifts
) {
1853 deallocate_buckets();
1854 m_values
.shrink_to_fit();
1855 allocate_buckets_from_shift();
1856 clear_and_fill_buckets_from_values();
1860 void reserve(size_t capa
) {
1861 capa
= (std::min
)(capa
, max_size());
1862 if constexpr (has_reserve
<value_container_type
>) {
1863 // std::deque doesn't have reserve(). Make sure we only call when available
1864 m_values
.reserve(capa
);
1866 auto shifts
= calc_shifts_for_size((std::max
)(capa
, size()));
1867 if (0 == m_num_buckets
|| shifts
< m_shifts
) {
1869 deallocate_buckets();
1870 allocate_buckets_from_shift();
1871 clear_and_fill_buckets_from_values();
1875 // observers //////////////////////////////////////////////////////////////
1877 auto hash_function() const -> hasher
{
1881 auto key_eq() const -> key_equal
{
1885 // nonstandard API: expose the underlying values container
1886 [[nodiscard
]] auto values() const noexcept
-> value_container_type
const& {
1890 // non-member functions ///////////////////////////////////////////////////
1892 friend auto operator==(table
const& a
, table
const& b
) -> bool {
1896 if (a
.size() != b
.size()) {
1899 for (auto const& b_entry
: b
) {
1900 auto it
= a
.find(get_key(b_entry
));
1901 if constexpr (is_map_v
<T
>) {
1902 // map: check that key is here, then also check that value is the same
1903 if (a
.end() == it
|| !(b_entry
.second
== it
->second
)) {
1907 // set: only check that the key is here
1908 if (a
.end() == it
) {
1916 friend auto operator!=(table
const& a
, table
const& b
) -> bool {
1921 } // namespace detail
1923 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1925 class Hash
= hash
<Key
>,
1926 class KeyEqual
= std::equal_to
<Key
>,
1927 class AllocatorOrContainer
= std::allocator
<std::pair
<Key
, T
>>,
1928 class Bucket
= bucket_type::standard
>
1929 using map
= detail::table
<Key
, T
, Hash
, KeyEqual
, AllocatorOrContainer
, Bucket
, false>;
1931 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1933 class Hash
= hash
<Key
>,
1934 class KeyEqual
= std::equal_to
<Key
>,
1935 class AllocatorOrContainer
= std::allocator
<std::pair
<Key
, T
>>,
1936 class Bucket
= bucket_type::standard
>
1937 using segmented_map
= detail::table
<Key
, T
, Hash
, KeyEqual
, AllocatorOrContainer
, Bucket
, true>;
1939 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1940 class Hash
= hash
<Key
>,
1941 class KeyEqual
= std::equal_to
<Key
>,
1942 class AllocatorOrContainer
= std::allocator
<Key
>,
1943 class Bucket
= bucket_type::standard
>
1944 using set
= detail::table
<Key
, void, Hash
, KeyEqual
, AllocatorOrContainer
, Bucket
, false>;
1946 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1947 class Hash
= hash
<Key
>,
1948 class KeyEqual
= std::equal_to
<Key
>,
1949 class AllocatorOrContainer
= std::allocator
<Key
>,
1950 class Bucket
= bucket_type::standard
>
1951 using segmented_set
= detail::table
<Key
, void, Hash
, KeyEqual
, AllocatorOrContainer
, Bucket
, true>;
1953 # if defined(ANKERL_UNORDERED_DENSE_PMR)
1957 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1959 class Hash
= hash
<Key
>,
1960 class KeyEqual
= std::equal_to
<Key
>,
1961 class Bucket
= bucket_type::standard
>
1963 detail::table
<Key
, T
, Hash
, KeyEqual
, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator
<std::pair
<Key
, T
>>, Bucket
, false>;
1965 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1967 class Hash
= hash
<Key
>,
1968 class KeyEqual
= std::equal_to
<Key
>,
1969 class Bucket
= bucket_type::standard
>
1970 using segmented_map
=
1971 detail::table
<Key
, T
, Hash
, KeyEqual
, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator
<std::pair
<Key
, T
>>, Bucket
, true>;
1973 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1974 class Hash
= hash
<Key
>,
1975 class KeyEqual
= std::equal_to
<Key
>,
1976 class Bucket
= bucket_type::standard
>
1977 using set
= detail::table
<Key
, void, Hash
, KeyEqual
, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator
<Key
>, Bucket
, false>;
1979 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
1980 class Hash
= hash
<Key
>,
1981 class KeyEqual
= std::equal_to
<Key
>,
1982 class Bucket
= bucket_type::standard
>
1983 using segmented_set
=
1984 detail::table
<Key
, void, Hash
, KeyEqual
, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator
<Key
>, Bucket
, true>;
1990 // deduction guides ///////////////////////////////////////////////////////////
1992 // deduction guides for alias templates are only possible since C++20
1993 // see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
1995 } // namespace ANKERL_UNORDERED_DENSE_NAMESPACE
1996 } // namespace ankerl::unordered_dense
1998 // std extensions /////////////////////////////////////////////////////////////
2000 namespace std
{ // NOLINT(cert-dcl58-cpp)
2002 ANKERL_UNORDERED_DENSE_EXPORT
template <class Key
,
2006 class AllocatorOrContainer
,
2010 // NOLINTNEXTLINE(cert-dcl58-cpp)
2011 auto erase_if(ankerl::unordered_dense::detail::table
<Key
, T
, Hash
, KeyEqual
, AllocatorOrContainer
, Bucket
, IsSegmented
>& map
,
2012 Pred pred
) -> size_t {
2013 using map_t
= ankerl::unordered_dense::detail::table
<Key
, T
, Hash
, KeyEqual
, AllocatorOrContainer
, Bucket
, IsSegmented
>;
2015 // going back to front because erase() invalidates the end iterator
2016 auto const old_size
= map
.size();
2017 auto idx
= old_size
;
2020 auto it
= map
.begin() + static_cast<typename
map_t::difference_type
>(idx
);
2026 return old_size
- map
.size();