1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the DenseMap class.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_ADT_DENSEMAP_H
14 #define LLVM_ADT_DENSEMAP_H
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/EpochTracker.h"
18 #include "llvm/Support/AlignOf.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/MathExtras.h"
21 #include "llvm/Support/ReverseIteration.h"
22 #include "llvm/Support/type_traits.h"
27 #include <initializer_list>
30 #include <type_traits>
37 // We extend a pair to allow users to override the bucket type with their own
38 // implementation without requiring two members.
39 template <typename KeyT
, typename ValueT
>
40 struct DenseMapPair
: public std::pair
<KeyT
, ValueT
> {
41 using std::pair
<KeyT
, ValueT
>::pair
;
43 KeyT
&getFirst() { return std::pair
<KeyT
, ValueT
>::first
; }
44 const KeyT
&getFirst() const { return std::pair
<KeyT
, ValueT
>::first
; }
45 ValueT
&getSecond() { return std::pair
<KeyT
, ValueT
>::second
; }
46 const ValueT
&getSecond() const { return std::pair
<KeyT
, ValueT
>::second
; }
49 } // end namespace detail
51 template <typename KeyT
, typename ValueT
,
52 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
53 typename Bucket
= llvm::detail::DenseMapPair
<KeyT
, ValueT
>,
55 class DenseMapIterator
;
57 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
59 class DenseMapBase
: public DebugEpochBase
{
61 using const_arg_type_t
= typename const_pointer_or_const_ref
<T
>::type
;
64 using size_type
= unsigned;
65 using key_type
= KeyT
;
66 using mapped_type
= ValueT
;
67 using value_type
= BucketT
;
69 using iterator
= DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, BucketT
>;
70 using const_iterator
=
71 DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, BucketT
, true>;
73 inline iterator
begin() {
74 // When the map is empty, avoid the overhead of advancing/retreating past
78 if (shouldReverseIterate
<KeyT
>())
79 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
80 return makeIterator(getBuckets(), getBucketsEnd(), *this);
82 inline iterator
end() {
83 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
85 inline const_iterator
begin() const {
88 if (shouldReverseIterate
<KeyT
>())
89 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
90 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
92 inline const_iterator
end() const {
93 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96 LLVM_NODISCARD
bool empty() const {
97 return getNumEntries() == 0;
99 unsigned size() const { return getNumEntries(); }
101 /// Grow the densemap so that it can contain at least \p NumEntries items
102 /// before resizing again.
103 void reserve(size_type NumEntries
) {
104 auto NumBuckets
= getMinBucketToReserveForEntries(NumEntries
);
106 if (NumBuckets
> getNumBuckets())
112 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
114 // If the capacity of the array is huge, and the # elements used is small,
116 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
121 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
122 if (is_trivially_copyable
<KeyT
>::value
&&
123 is_trivially_copyable
<ValueT
>::value
) {
124 // Use a simpler loop when these are trivial types.
125 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
)
126 P
->getFirst() = EmptyKey
;
128 unsigned NumEntries
= getNumEntries();
129 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
130 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
)) {
131 if (!KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
132 P
->getSecond().~ValueT();
135 P
->getFirst() = EmptyKey
;
138 assert(NumEntries
== 0 && "Node count imbalance!");
144 /// Return 1 if the specified key is in the map, 0 otherwise.
145 size_type
count(const_arg_type_t
<KeyT
> Val
) const {
146 const BucketT
*TheBucket
;
147 return LookupBucketFor(Val
, TheBucket
) ? 1 : 0;
150 iterator
find(const_arg_type_t
<KeyT
> Val
) {
152 if (LookupBucketFor(Val
, TheBucket
))
153 return makeIterator(TheBucket
, getBucketsEnd(), *this, true);
156 const_iterator
find(const_arg_type_t
<KeyT
> Val
) const {
157 const BucketT
*TheBucket
;
158 if (LookupBucketFor(Val
, TheBucket
))
159 return makeConstIterator(TheBucket
, getBucketsEnd(), *this, true);
163 /// Alternate version of find() which allows a different, and possibly
164 /// less expensive, key type.
165 /// The DenseMapInfo is responsible for supplying methods
166 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
168 template<class LookupKeyT
>
169 iterator
find_as(const LookupKeyT
&Val
) {
171 if (LookupBucketFor(Val
, TheBucket
))
172 return makeIterator(TheBucket
, getBucketsEnd(), *this, true);
175 template<class LookupKeyT
>
176 const_iterator
find_as(const LookupKeyT
&Val
) const {
177 const BucketT
*TheBucket
;
178 if (LookupBucketFor(Val
, TheBucket
))
179 return makeConstIterator(TheBucket
, getBucketsEnd(), *this, true);
183 /// lookup - Return the entry for the specified key, or a default
184 /// constructed value if no such entry exists.
185 ValueT
lookup(const_arg_type_t
<KeyT
> Val
) const {
186 const BucketT
*TheBucket
;
187 if (LookupBucketFor(Val
, TheBucket
))
188 return TheBucket
->getSecond();
192 // Inserts key,value pair into the map if the key isn't already in the map.
193 // If the key is already in the map, it returns false and doesn't update the
195 std::pair
<iterator
, bool> insert(const std::pair
<KeyT
, ValueT
> &KV
) {
196 return try_emplace(KV
.first
, KV
.second
);
199 // Inserts key,value pair into the map if the key isn't already in the map.
200 // If the key is already in the map, it returns false and doesn't update the
202 std::pair
<iterator
, bool> insert(std::pair
<KeyT
, ValueT
> &&KV
) {
203 return try_emplace(std::move(KV
.first
), std::move(KV
.second
));
206 // Inserts key,value pair into the map if the key isn't already in the map.
207 // The value is constructed in-place if the key is not in the map, otherwise
209 template <typename
... Ts
>
210 std::pair
<iterator
, bool> try_emplace(KeyT
&&Key
, Ts
&&... Args
) {
212 if (LookupBucketFor(Key
, TheBucket
))
213 return std::make_pair(
214 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
215 false); // Already in map.
217 // Otherwise, insert the new element.
219 InsertIntoBucket(TheBucket
, std::move(Key
), std::forward
<Ts
>(Args
)...);
220 return std::make_pair(
221 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
225 // Inserts key,value pair into the map if the key isn't already in the map.
226 // The value is constructed in-place if the key is not in the map, otherwise
228 template <typename
... Ts
>
229 std::pair
<iterator
, bool> try_emplace(const KeyT
&Key
, Ts
&&... Args
) {
231 if (LookupBucketFor(Key
, TheBucket
))
232 return std::make_pair(
233 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
234 false); // Already in map.
236 // Otherwise, insert the new element.
237 TheBucket
= InsertIntoBucket(TheBucket
, Key
, std::forward
<Ts
>(Args
)...);
238 return std::make_pair(
239 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
243 /// Alternate version of insert() which allows a different, and possibly
244 /// less expensive, key type.
245 /// The DenseMapInfo is responsible for supplying methods
246 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
248 template <typename LookupKeyT
>
249 std::pair
<iterator
, bool> insert_as(std::pair
<KeyT
, ValueT
> &&KV
,
250 const LookupKeyT
&Val
) {
252 if (LookupBucketFor(Val
, TheBucket
))
253 return std::make_pair(
254 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
255 false); // Already in map.
257 // Otherwise, insert the new element.
258 TheBucket
= InsertIntoBucketWithLookup(TheBucket
, std::move(KV
.first
),
259 std::move(KV
.second
), Val
);
260 return std::make_pair(
261 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
265 /// insert - Range insertion of pairs.
266 template<typename InputIt
>
267 void insert(InputIt I
, InputIt E
) {
272 bool erase(const KeyT
&Val
) {
274 if (!LookupBucketFor(Val
, TheBucket
))
275 return false; // not in map.
277 TheBucket
->getSecond().~ValueT();
278 TheBucket
->getFirst() = getTombstoneKey();
279 decrementNumEntries();
280 incrementNumTombstones();
283 void erase(iterator I
) {
284 BucketT
*TheBucket
= &*I
;
285 TheBucket
->getSecond().~ValueT();
286 TheBucket
->getFirst() = getTombstoneKey();
287 decrementNumEntries();
288 incrementNumTombstones();
291 value_type
& FindAndConstruct(const KeyT
&Key
) {
293 if (LookupBucketFor(Key
, TheBucket
))
296 return *InsertIntoBucket(TheBucket
, Key
);
299 ValueT
&operator[](const KeyT
&Key
) {
300 return FindAndConstruct(Key
).second
;
303 value_type
& FindAndConstruct(KeyT
&&Key
) {
305 if (LookupBucketFor(Key
, TheBucket
))
308 return *InsertIntoBucket(TheBucket
, std::move(Key
));
311 ValueT
&operator[](KeyT
&&Key
) {
312 return FindAndConstruct(std::move(Key
)).second
;
315 /// isPointerIntoBucketsArray - Return true if the specified pointer points
316 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
317 /// value in the DenseMap).
318 bool isPointerIntoBucketsArray(const void *Ptr
) const {
319 return Ptr
>= getBuckets() && Ptr
< getBucketsEnd();
322 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
323 /// array. In conjunction with the previous method, this can be used to
324 /// determine whether an insertion caused the DenseMap to reallocate.
325 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
328 DenseMapBase() = default;
331 if (getNumBuckets() == 0) // Nothing to do.
334 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
335 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
336 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
337 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
))
338 P
->getSecond().~ValueT();
339 P
->getFirst().~KeyT();
347 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
348 "# initial buckets must be a power of two!");
349 const KeyT EmptyKey
= getEmptyKey();
350 for (BucketT
*B
= getBuckets(), *E
= getBucketsEnd(); B
!= E
; ++B
)
351 ::new (&B
->getFirst()) KeyT(EmptyKey
);
354 /// Returns the number of buckets to allocate to ensure that the DenseMap can
355 /// accommodate \p NumEntries without need to grow().
356 unsigned getMinBucketToReserveForEntries(unsigned NumEntries
) {
357 // Ensure that "NumEntries * 4 < NumBuckets * 3"
360 // +1 is required because of the strict equality.
361 // For example if NumEntries is 48, we need to return 401.
362 return NextPowerOf2(NumEntries
* 4 / 3 + 1);
365 void moveFromOldBuckets(BucketT
*OldBucketsBegin
, BucketT
*OldBucketsEnd
) {
368 // Insert all the old elements.
369 const KeyT EmptyKey
= getEmptyKey();
370 const KeyT TombstoneKey
= getTombstoneKey();
371 for (BucketT
*B
= OldBucketsBegin
, *E
= OldBucketsEnd
; B
!= E
; ++B
) {
372 if (!KeyInfoT::isEqual(B
->getFirst(), EmptyKey
) &&
373 !KeyInfoT::isEqual(B
->getFirst(), TombstoneKey
)) {
374 // Insert the key/value into the new table.
376 bool FoundVal
= LookupBucketFor(B
->getFirst(), DestBucket
);
377 (void)FoundVal
; // silence warning.
378 assert(!FoundVal
&& "Key already in new map?");
379 DestBucket
->getFirst() = std::move(B
->getFirst());
380 ::new (&DestBucket
->getSecond()) ValueT(std::move(B
->getSecond()));
381 incrementNumEntries();
384 B
->getSecond().~ValueT();
386 B
->getFirst().~KeyT();
390 template <typename OtherBaseT
>
392 const DenseMapBase
<OtherBaseT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &other
) {
393 assert(&other
!= this);
394 assert(getNumBuckets() == other
.getNumBuckets());
396 setNumEntries(other
.getNumEntries());
397 setNumTombstones(other
.getNumTombstones());
399 if (is_trivially_copyable
<KeyT
>::value
&&
400 is_trivially_copyable
<ValueT
>::value
)
401 memcpy(reinterpret_cast<void *>(getBuckets()), other
.getBuckets(),
402 getNumBuckets() * sizeof(BucketT
));
404 for (size_t i
= 0; i
< getNumBuckets(); ++i
) {
405 ::new (&getBuckets()[i
].getFirst())
406 KeyT(other
.getBuckets()[i
].getFirst());
407 if (!KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getEmptyKey()) &&
408 !KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getTombstoneKey()))
409 ::new (&getBuckets()[i
].getSecond())
410 ValueT(other
.getBuckets()[i
].getSecond());
414 static unsigned getHashValue(const KeyT
&Val
) {
415 return KeyInfoT::getHashValue(Val
);
418 template<typename LookupKeyT
>
419 static unsigned getHashValue(const LookupKeyT
&Val
) {
420 return KeyInfoT::getHashValue(Val
);
423 static const KeyT
getEmptyKey() {
424 static_assert(std::is_base_of
<DenseMapBase
, DerivedT
>::value
,
425 "Must pass the derived type to this template!");
426 return KeyInfoT::getEmptyKey();
429 static const KeyT
getTombstoneKey() {
430 return KeyInfoT::getTombstoneKey();
434 iterator
makeIterator(BucketT
*P
, BucketT
*E
,
435 DebugEpochBase
&Epoch
,
436 bool NoAdvance
=false) {
437 if (shouldReverseIterate
<KeyT
>()) {
438 BucketT
*B
= P
== getBucketsEnd() ? getBuckets() : P
+ 1;
439 return iterator(B
, E
, Epoch
, NoAdvance
);
441 return iterator(P
, E
, Epoch
, NoAdvance
);
444 const_iterator
makeConstIterator(const BucketT
*P
, const BucketT
*E
,
445 const DebugEpochBase
&Epoch
,
446 const bool NoAdvance
=false) const {
447 if (shouldReverseIterate
<KeyT
>()) {
448 const BucketT
*B
= P
== getBucketsEnd() ? getBuckets() : P
+ 1;
449 return const_iterator(B
, E
, Epoch
, NoAdvance
);
451 return const_iterator(P
, E
, Epoch
, NoAdvance
);
454 unsigned getNumEntries() const {
455 return static_cast<const DerivedT
*>(this)->getNumEntries();
458 void setNumEntries(unsigned Num
) {
459 static_cast<DerivedT
*>(this)->setNumEntries(Num
);
462 void incrementNumEntries() {
463 setNumEntries(getNumEntries() + 1);
466 void decrementNumEntries() {
467 setNumEntries(getNumEntries() - 1);
470 unsigned getNumTombstones() const {
471 return static_cast<const DerivedT
*>(this)->getNumTombstones();
474 void setNumTombstones(unsigned Num
) {
475 static_cast<DerivedT
*>(this)->setNumTombstones(Num
);
478 void incrementNumTombstones() {
479 setNumTombstones(getNumTombstones() + 1);
482 void decrementNumTombstones() {
483 setNumTombstones(getNumTombstones() - 1);
486 const BucketT
*getBuckets() const {
487 return static_cast<const DerivedT
*>(this)->getBuckets();
490 BucketT
*getBuckets() {
491 return static_cast<DerivedT
*>(this)->getBuckets();
494 unsigned getNumBuckets() const {
495 return static_cast<const DerivedT
*>(this)->getNumBuckets();
498 BucketT
*getBucketsEnd() {
499 return getBuckets() + getNumBuckets();
502 const BucketT
*getBucketsEnd() const {
503 return getBuckets() + getNumBuckets();
506 void grow(unsigned AtLeast
) {
507 static_cast<DerivedT
*>(this)->grow(AtLeast
);
510 void shrink_and_clear() {
511 static_cast<DerivedT
*>(this)->shrink_and_clear();
514 template <typename KeyArg
, typename
... ValueArgs
>
515 BucketT
*InsertIntoBucket(BucketT
*TheBucket
, KeyArg
&&Key
,
516 ValueArgs
&&... Values
) {
517 TheBucket
= InsertIntoBucketImpl(Key
, Key
, TheBucket
);
519 TheBucket
->getFirst() = std::forward
<KeyArg
>(Key
);
520 ::new (&TheBucket
->getSecond()) ValueT(std::forward
<ValueArgs
>(Values
)...);
524 template <typename LookupKeyT
>
525 BucketT
*InsertIntoBucketWithLookup(BucketT
*TheBucket
, KeyT
&&Key
,
526 ValueT
&&Value
, LookupKeyT
&Lookup
) {
527 TheBucket
= InsertIntoBucketImpl(Key
, Lookup
, TheBucket
);
529 TheBucket
->getFirst() = std::move(Key
);
530 ::new (&TheBucket
->getSecond()) ValueT(std::move(Value
));
534 template <typename LookupKeyT
>
535 BucketT
*InsertIntoBucketImpl(const KeyT
&Key
, const LookupKeyT
&Lookup
,
536 BucketT
*TheBucket
) {
539 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
540 // the buckets are empty (meaning that many are filled with tombstones),
543 // The later case is tricky. For example, if we had one empty bucket with
544 // tons of tombstones, failing lookups (e.g. for insertion) would have to
545 // probe almost the entire table until it found the empty bucket. If the
546 // table completely filled with tombstones, no lookup would ever succeed,
547 // causing infinite loops in lookup.
548 unsigned NewNumEntries
= getNumEntries() + 1;
549 unsigned NumBuckets
= getNumBuckets();
550 if (LLVM_UNLIKELY(NewNumEntries
* 4 >= NumBuckets
* 3)) {
551 this->grow(NumBuckets
* 2);
552 LookupBucketFor(Lookup
, TheBucket
);
553 NumBuckets
= getNumBuckets();
554 } else if (LLVM_UNLIKELY(NumBuckets
-(NewNumEntries
+getNumTombstones()) <=
556 this->grow(NumBuckets
);
557 LookupBucketFor(Lookup
, TheBucket
);
561 // Only update the state after we've grown our bucket space appropriately
562 // so that when growing buckets we have self-consistent entry count.
563 incrementNumEntries();
565 // If we are writing over a tombstone, remember this.
566 const KeyT EmptyKey
= getEmptyKey();
567 if (!KeyInfoT::isEqual(TheBucket
->getFirst(), EmptyKey
))
568 decrementNumTombstones();
573 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
574 /// FoundBucket. If the bucket contains the key and a value, this returns
575 /// true, otherwise it returns a bucket with an empty marker or tombstone and
577 template<typename LookupKeyT
>
578 bool LookupBucketFor(const LookupKeyT
&Val
,
579 const BucketT
*&FoundBucket
) const {
580 const BucketT
*BucketsPtr
= getBuckets();
581 const unsigned NumBuckets
= getNumBuckets();
583 if (NumBuckets
== 0) {
584 FoundBucket
= nullptr;
588 // FoundTombstone - Keep track of whether we find a tombstone while probing.
589 const BucketT
*FoundTombstone
= nullptr;
590 const KeyT EmptyKey
= getEmptyKey();
591 const KeyT TombstoneKey
= getTombstoneKey();
592 assert(!KeyInfoT::isEqual(Val
, EmptyKey
) &&
593 !KeyInfoT::isEqual(Val
, TombstoneKey
) &&
594 "Empty/Tombstone value shouldn't be inserted into map!");
596 unsigned BucketNo
= getHashValue(Val
) & (NumBuckets
-1);
597 unsigned ProbeAmt
= 1;
599 const BucketT
*ThisBucket
= BucketsPtr
+ BucketNo
;
600 // Found Val's bucket? If so, return it.
601 if (LLVM_LIKELY(KeyInfoT::isEqual(Val
, ThisBucket
->getFirst()))) {
602 FoundBucket
= ThisBucket
;
606 // If we found an empty bucket, the key doesn't exist in the set.
607 // Insert it and return the default value.
608 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket
->getFirst(), EmptyKey
))) {
609 // If we've already seen a tombstone while probing, fill it in instead
610 // of the empty bucket we eventually probed to.
611 FoundBucket
= FoundTombstone
? FoundTombstone
: ThisBucket
;
615 // If this is a tombstone, remember it. If Val ends up not in the map, we
616 // prefer to return it than something that would require more probing.
617 if (KeyInfoT::isEqual(ThisBucket
->getFirst(), TombstoneKey
) &&
619 FoundTombstone
= ThisBucket
; // Remember the first tombstone found.
621 // Otherwise, it's a hash collision or a tombstone, continue quadratic
623 BucketNo
+= ProbeAmt
++;
624 BucketNo
&= (NumBuckets
-1);
628 template <typename LookupKeyT
>
629 bool LookupBucketFor(const LookupKeyT
&Val
, BucketT
*&FoundBucket
) {
630 const BucketT
*ConstFoundBucket
;
631 bool Result
= const_cast<const DenseMapBase
*>(this)
632 ->LookupBucketFor(Val
, ConstFoundBucket
);
633 FoundBucket
= const_cast<BucketT
*>(ConstFoundBucket
);
638 /// Return the approximate size (in bytes) of the actual map.
639 /// This is just the raw memory used by DenseMap.
640 /// If entries are pointers to objects, the size of the referenced objects
641 /// are not included.
642 size_t getMemorySize() const {
643 return getNumBuckets() * sizeof(BucketT
);
647 /// Equality comparison for DenseMap.
649 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
650 /// is also in RHS, and that no additional pairs are in RHS.
651 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
652 /// complexity is linear, worst case is O(N^2) (if every hash collides).
653 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
656 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &LHS
,
657 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &RHS
) {
658 if (LHS
.size() != RHS
.size())
661 for (auto &KV
: LHS
) {
662 auto I
= RHS
.find(KV
.first
);
663 if (I
== RHS
.end() || I
->second
!= KV
.second
)
670 /// Inequality comparison for DenseMap.
672 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
673 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
676 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &LHS
,
677 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &RHS
) {
678 return !(LHS
== RHS
);
681 template <typename KeyT
, typename ValueT
,
682 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
683 typename BucketT
= llvm::detail::DenseMapPair
<KeyT
, ValueT
>>
684 class DenseMap
: public DenseMapBase
<DenseMap
<KeyT
, ValueT
, KeyInfoT
, BucketT
>,
685 KeyT
, ValueT
, KeyInfoT
, BucketT
> {
686 friend class DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
688 // Lift some types from the dependent base class into this class for
689 // simplicity of referring to them.
690 using BaseT
= DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
694 unsigned NumTombstones
;
698 /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
699 /// this number of elements can be inserted in the map without grow()
700 explicit DenseMap(unsigned InitialReserve
= 0) { init(InitialReserve
); }
702 DenseMap(const DenseMap
&other
) : BaseT() {
707 DenseMap(DenseMap
&&other
) : BaseT() {
712 template<typename InputIt
>
713 DenseMap(const InputIt
&I
, const InputIt
&E
) {
714 init(std::distance(I
, E
));
718 DenseMap(std::initializer_list
<typename
BaseT::value_type
> Vals
) {
720 this->insert(Vals
.begin(), Vals
.end());
725 deallocate_buffer(Buckets
, sizeof(BucketT
) * NumBuckets
, alignof(BucketT
));
728 void swap(DenseMap
& RHS
) {
729 this->incrementEpoch();
730 RHS
.incrementEpoch();
731 std::swap(Buckets
, RHS
.Buckets
);
732 std::swap(NumEntries
, RHS
.NumEntries
);
733 std::swap(NumTombstones
, RHS
.NumTombstones
);
734 std::swap(NumBuckets
, RHS
.NumBuckets
);
737 DenseMap
& operator=(const DenseMap
& other
) {
743 DenseMap
& operator=(DenseMap
&&other
) {
745 deallocate_buffer(Buckets
, sizeof(BucketT
) * NumBuckets
, alignof(BucketT
));
751 void copyFrom(const DenseMap
& other
) {
753 deallocate_buffer(Buckets
, sizeof(BucketT
) * NumBuckets
, alignof(BucketT
));
754 if (allocateBuckets(other
.NumBuckets
)) {
755 this->BaseT::copyFrom(other
);
762 void init(unsigned InitNumEntries
) {
763 auto InitBuckets
= BaseT::getMinBucketToReserveForEntries(InitNumEntries
);
764 if (allocateBuckets(InitBuckets
)) {
765 this->BaseT::initEmpty();
772 void grow(unsigned AtLeast
) {
773 unsigned OldNumBuckets
= NumBuckets
;
774 BucketT
*OldBuckets
= Buckets
;
776 allocateBuckets(std::max
<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast
-1))));
779 this->BaseT::initEmpty();
783 this->moveFromOldBuckets(OldBuckets
, OldBuckets
+OldNumBuckets
);
785 // Free the old table.
786 deallocate_buffer(OldBuckets
, sizeof(BucketT
) * OldNumBuckets
,
790 void shrink_and_clear() {
791 unsigned OldNumBuckets
= NumBuckets
;
792 unsigned OldNumEntries
= NumEntries
;
795 // Reduce the number of buckets.
796 unsigned NewNumBuckets
= 0;
798 NewNumBuckets
= std::max(64, 1 << (Log2_32_Ceil(OldNumEntries
) + 1));
799 if (NewNumBuckets
== NumBuckets
) {
800 this->BaseT::initEmpty();
804 deallocate_buffer(Buckets
, sizeof(BucketT
) * OldNumBuckets
,
810 unsigned getNumEntries() const {
814 void setNumEntries(unsigned Num
) {
818 unsigned getNumTombstones() const {
819 return NumTombstones
;
822 void setNumTombstones(unsigned Num
) {
826 BucketT
*getBuckets() const {
830 unsigned getNumBuckets() const {
834 bool allocateBuckets(unsigned Num
) {
836 if (NumBuckets
== 0) {
841 Buckets
= static_cast<BucketT
*>(
842 allocate_buffer(sizeof(BucketT
) * NumBuckets
, alignof(BucketT
)));
847 template <typename KeyT
, typename ValueT
, unsigned InlineBuckets
= 4,
848 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
849 typename BucketT
= llvm::detail::DenseMapPair
<KeyT
, ValueT
>>
851 : public DenseMapBase
<
852 SmallDenseMap
<KeyT
, ValueT
, InlineBuckets
, KeyInfoT
, BucketT
>, KeyT
,
853 ValueT
, KeyInfoT
, BucketT
> {
854 friend class DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
856 // Lift some types from the dependent base class into this class for
857 // simplicity of referring to them.
858 using BaseT
= DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
860 static_assert(isPowerOf2_64(InlineBuckets
),
861 "InlineBuckets must be a power of 2.");
864 unsigned NumEntries
: 31;
865 unsigned NumTombstones
;
872 /// A "union" of an inline bucket array and the struct representing
873 /// a large bucket. This union will be discriminated by the 'Small' bit.
874 AlignedCharArrayUnion
<BucketT
[InlineBuckets
], LargeRep
> storage
;
877 explicit SmallDenseMap(unsigned NumInitBuckets
= 0) {
878 init(NumInitBuckets
);
881 SmallDenseMap(const SmallDenseMap
&other
) : BaseT() {
886 SmallDenseMap(SmallDenseMap
&&other
) : BaseT() {
891 template<typename InputIt
>
892 SmallDenseMap(const InputIt
&I
, const InputIt
&E
) {
893 init(NextPowerOf2(std::distance(I
, E
)));
902 void swap(SmallDenseMap
& RHS
) {
903 unsigned TmpNumEntries
= RHS
.NumEntries
;
904 RHS
.NumEntries
= NumEntries
;
905 NumEntries
= TmpNumEntries
;
906 std::swap(NumTombstones
, RHS
.NumTombstones
);
908 const KeyT EmptyKey
= this->getEmptyKey();
909 const KeyT TombstoneKey
= this->getTombstoneKey();
910 if (Small
&& RHS
.Small
) {
911 // If we're swapping inline bucket arrays, we have to cope with some of
912 // the tricky bits of DenseMap's storage system: the buckets are not
913 // fully initialized. Thus we swap every key, but we may have
914 // a one-directional move of the value.
915 for (unsigned i
= 0, e
= InlineBuckets
; i
!= e
; ++i
) {
916 BucketT
*LHSB
= &getInlineBuckets()[i
],
917 *RHSB
= &RHS
.getInlineBuckets()[i
];
918 bool hasLHSValue
= (!KeyInfoT::isEqual(LHSB
->getFirst(), EmptyKey
) &&
919 !KeyInfoT::isEqual(LHSB
->getFirst(), TombstoneKey
));
920 bool hasRHSValue
= (!KeyInfoT::isEqual(RHSB
->getFirst(), EmptyKey
) &&
921 !KeyInfoT::isEqual(RHSB
->getFirst(), TombstoneKey
));
922 if (hasLHSValue
&& hasRHSValue
) {
923 // Swap together if we can...
924 std::swap(*LHSB
, *RHSB
);
927 // Swap separately and handle any assymetry.
928 std::swap(LHSB
->getFirst(), RHSB
->getFirst());
930 ::new (&RHSB
->getSecond()) ValueT(std::move(LHSB
->getSecond()));
931 LHSB
->getSecond().~ValueT();
932 } else if (hasRHSValue
) {
933 ::new (&LHSB
->getSecond()) ValueT(std::move(RHSB
->getSecond()));
934 RHSB
->getSecond().~ValueT();
939 if (!Small
&& !RHS
.Small
) {
940 std::swap(getLargeRep()->Buckets
, RHS
.getLargeRep()->Buckets
);
941 std::swap(getLargeRep()->NumBuckets
, RHS
.getLargeRep()->NumBuckets
);
945 SmallDenseMap
&SmallSide
= Small
? *this : RHS
;
946 SmallDenseMap
&LargeSide
= Small
? RHS
: *this;
948 // First stash the large side's rep and move the small side across.
949 LargeRep TmpRep
= std::move(*LargeSide
.getLargeRep());
950 LargeSide
.getLargeRep()->~LargeRep();
951 LargeSide
.Small
= true;
952 // This is similar to the standard move-from-old-buckets, but the bucket
953 // count hasn't actually rotated in this case. So we have to carefully
954 // move construct the keys and values into their new locations, but there
955 // is no need to re-hash things.
956 for (unsigned i
= 0, e
= InlineBuckets
; i
!= e
; ++i
) {
957 BucketT
*NewB
= &LargeSide
.getInlineBuckets()[i
],
958 *OldB
= &SmallSide
.getInlineBuckets()[i
];
959 ::new (&NewB
->getFirst()) KeyT(std::move(OldB
->getFirst()));
960 OldB
->getFirst().~KeyT();
961 if (!KeyInfoT::isEqual(NewB
->getFirst(), EmptyKey
) &&
962 !KeyInfoT::isEqual(NewB
->getFirst(), TombstoneKey
)) {
963 ::new (&NewB
->getSecond()) ValueT(std::move(OldB
->getSecond()));
964 OldB
->getSecond().~ValueT();
968 // The hard part of moving the small buckets across is done, just move
969 // the TmpRep into its new home.
970 SmallSide
.Small
= false;
971 new (SmallSide
.getLargeRep()) LargeRep(std::move(TmpRep
));
974 SmallDenseMap
& operator=(const SmallDenseMap
& other
) {
980 SmallDenseMap
& operator=(SmallDenseMap
&&other
) {
988 void copyFrom(const SmallDenseMap
& other
) {
992 if (other
.getNumBuckets() > InlineBuckets
) {
994 new (getLargeRep()) LargeRep(allocateBuckets(other
.getNumBuckets()));
996 this->BaseT::copyFrom(other
);
999 void init(unsigned InitBuckets
) {
1001 if (InitBuckets
> InlineBuckets
) {
1003 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets
));
1005 this->BaseT::initEmpty();
1008 void grow(unsigned AtLeast
) {
1009 if (AtLeast
>= InlineBuckets
)
1010 AtLeast
= std::max
<unsigned>(64, NextPowerOf2(AtLeast
-1));
1013 if (AtLeast
< InlineBuckets
)
1014 return; // Nothing to do.
1016 // First move the inline buckets into a temporary storage.
1017 AlignedCharArrayUnion
<BucketT
[InlineBuckets
]> TmpStorage
;
1018 BucketT
*TmpBegin
= reinterpret_cast<BucketT
*>(TmpStorage
.buffer
);
1019 BucketT
*TmpEnd
= TmpBegin
;
1021 // Loop over the buckets, moving non-empty, non-tombstones into the
1022 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1023 const KeyT EmptyKey
= this->getEmptyKey();
1024 const KeyT TombstoneKey
= this->getTombstoneKey();
1025 for (BucketT
*P
= getBuckets(), *E
= P
+ InlineBuckets
; P
!= E
; ++P
) {
1026 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
1027 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
1028 assert(size_t(TmpEnd
- TmpBegin
) < InlineBuckets
&&
1029 "Too many inline buckets!");
1030 ::new (&TmpEnd
->getFirst()) KeyT(std::move(P
->getFirst()));
1031 ::new (&TmpEnd
->getSecond()) ValueT(std::move(P
->getSecond()));
1033 P
->getSecond().~ValueT();
1035 P
->getFirst().~KeyT();
1038 // Now make this map use the large rep, and move all the entries back
1041 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
));
1042 this->moveFromOldBuckets(TmpBegin
, TmpEnd
);
1046 LargeRep OldRep
= std::move(*getLargeRep());
1047 getLargeRep()->~LargeRep();
1048 if (AtLeast
<= InlineBuckets
) {
1051 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
));
1054 this->moveFromOldBuckets(OldRep
.Buckets
, OldRep
.Buckets
+OldRep
.NumBuckets
);
1056 // Free the old table.
1057 deallocate_buffer(OldRep
.Buckets
, sizeof(BucketT
) * OldRep
.NumBuckets
,
1061 void shrink_and_clear() {
1062 unsigned OldSize
= this->size();
1065 // Reduce the number of buckets.
1066 unsigned NewNumBuckets
= 0;
1068 NewNumBuckets
= 1 << (Log2_32_Ceil(OldSize
) + 1);
1069 if (NewNumBuckets
> InlineBuckets
&& NewNumBuckets
< 64u)
1072 if ((Small
&& NewNumBuckets
<= InlineBuckets
) ||
1073 (!Small
&& NewNumBuckets
== getLargeRep()->NumBuckets
)) {
1074 this->BaseT::initEmpty();
1078 deallocateBuckets();
1079 init(NewNumBuckets
);
1083 unsigned getNumEntries() const {
1087 void setNumEntries(unsigned Num
) {
1088 // NumEntries is hardcoded to be 31 bits wide.
1089 assert(Num
< (1U << 31) && "Cannot support more than 1<<31 entries");
1093 unsigned getNumTombstones() const {
1094 return NumTombstones
;
1097 void setNumTombstones(unsigned Num
) {
1098 NumTombstones
= Num
;
1101 const BucketT
*getInlineBuckets() const {
1103 // Note that this cast does not violate aliasing rules as we assert that
1104 // the memory's dynamic type is the small, inline bucket buffer, and the
1105 // 'storage.buffer' static type is 'char *'.
1106 return reinterpret_cast<const BucketT
*>(storage
.buffer
);
1109 BucketT
*getInlineBuckets() {
1110 return const_cast<BucketT
*>(
1111 const_cast<const SmallDenseMap
*>(this)->getInlineBuckets());
1114 const LargeRep
*getLargeRep() const {
1116 // Note, same rule about aliasing as with getInlineBuckets.
1117 return reinterpret_cast<const LargeRep
*>(storage
.buffer
);
1120 LargeRep
*getLargeRep() {
1121 return const_cast<LargeRep
*>(
1122 const_cast<const SmallDenseMap
*>(this)->getLargeRep());
1125 const BucketT
*getBuckets() const {
1126 return Small
? getInlineBuckets() : getLargeRep()->Buckets
;
1129 BucketT
*getBuckets() {
1130 return const_cast<BucketT
*>(
1131 const_cast<const SmallDenseMap
*>(this)->getBuckets());
1134 unsigned getNumBuckets() const {
1135 return Small
? InlineBuckets
: getLargeRep()->NumBuckets
;
1138 void deallocateBuckets() {
1142 deallocate_buffer(getLargeRep()->Buckets
,
1143 sizeof(BucketT
) * getLargeRep()->NumBuckets
,
1145 getLargeRep()->~LargeRep();
1148 LargeRep
allocateBuckets(unsigned Num
) {
1149 assert(Num
> InlineBuckets
&& "Must allocate more buckets than are inline");
1150 LargeRep Rep
= {static_cast<BucketT
*>(allocate_buffer(
1151 sizeof(BucketT
) * Num
, alignof(BucketT
))),
1157 template <typename KeyT
, typename ValueT
, typename KeyInfoT
, typename Bucket
,
1159 class DenseMapIterator
: DebugEpochBase::HandleBase
{
1160 friend class DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, true>;
1161 friend class DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, false>;
1163 using ConstIterator
= DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, true>;
1166 using difference_type
= ptrdiff_t;
1168 typename
std::conditional
<IsConst
, const Bucket
, Bucket
>::type
;
1169 using pointer
= value_type
*;
1170 using reference
= value_type
&;
1171 using iterator_category
= std::forward_iterator_tag
;
1174 pointer Ptr
= nullptr;
1175 pointer End
= nullptr;
1178 DenseMapIterator() = default;
1180 DenseMapIterator(pointer Pos
, pointer E
, const DebugEpochBase
&Epoch
,
1181 bool NoAdvance
= false)
1182 : DebugEpochBase::HandleBase(&Epoch
), Ptr(Pos
), End(E
) {
1183 assert(isHandleInSync() && "invalid construction!");
1185 if (NoAdvance
) return;
1186 if (shouldReverseIterate
<KeyT
>()) {
1187 RetreatPastEmptyBuckets();
1190 AdvancePastEmptyBuckets();
1193 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1194 // for const iterator destinations so it doesn't end up as a user defined copy
1196 template <bool IsConstSrc
,
1197 typename
= typename
std::enable_if
<!IsConstSrc
&& IsConst
>::type
>
1199 const DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, IsConstSrc
> &I
)
1200 : DebugEpochBase::HandleBase(I
), Ptr(I
.Ptr
), End(I
.End
) {}
1202 reference
operator*() const {
1203 assert(isHandleInSync() && "invalid iterator access!");
1204 if (shouldReverseIterate
<KeyT
>())
1208 pointer
operator->() const {
1209 assert(isHandleInSync() && "invalid iterator access!");
1210 if (shouldReverseIterate
<KeyT
>())
1215 bool operator==(const ConstIterator
&RHS
) const {
1216 assert((!Ptr
|| isHandleInSync()) && "handle not in sync!");
1217 assert((!RHS
.Ptr
|| RHS
.isHandleInSync()) && "handle not in sync!");
1218 assert(getEpochAddress() == RHS
.getEpochAddress() &&
1219 "comparing incomparable iterators!");
1220 return Ptr
== RHS
.Ptr
;
1222 bool operator!=(const ConstIterator
&RHS
) const {
1223 assert((!Ptr
|| isHandleInSync()) && "handle not in sync!");
1224 assert((!RHS
.Ptr
|| RHS
.isHandleInSync()) && "handle not in sync!");
1225 assert(getEpochAddress() == RHS
.getEpochAddress() &&
1226 "comparing incomparable iterators!");
1227 return Ptr
!= RHS
.Ptr
;
1230 inline DenseMapIterator
& operator++() { // Preincrement
1231 assert(isHandleInSync() && "invalid iterator access!");
1232 if (shouldReverseIterate
<KeyT
>()) {
1234 RetreatPastEmptyBuckets();
1238 AdvancePastEmptyBuckets();
1241 DenseMapIterator
operator++(int) { // Postincrement
1242 assert(isHandleInSync() && "invalid iterator access!");
1243 DenseMapIterator tmp
= *this; ++*this; return tmp
;
1247 void AdvancePastEmptyBuckets() {
1249 const KeyT Empty
= KeyInfoT::getEmptyKey();
1250 const KeyT Tombstone
= KeyInfoT::getTombstoneKey();
1252 while (Ptr
!= End
&& (KeyInfoT::isEqual(Ptr
->getFirst(), Empty
) ||
1253 KeyInfoT::isEqual(Ptr
->getFirst(), Tombstone
)))
1257 void RetreatPastEmptyBuckets() {
1259 const KeyT Empty
= KeyInfoT::getEmptyKey();
1260 const KeyT Tombstone
= KeyInfoT::getTombstoneKey();
1262 while (Ptr
!= End
&& (KeyInfoT::isEqual(Ptr
[-1].getFirst(), Empty
) ||
1263 KeyInfoT::isEqual(Ptr
[-1].getFirst(), Tombstone
)))
1268 template <typename KeyT
, typename ValueT
, typename KeyInfoT
>
1269 inline size_t capacity_in_bytes(const DenseMap
<KeyT
, ValueT
, KeyInfoT
> &X
) {
1270 return X
.getMemorySize();
1273 } // end namespace llvm
1275 #endif // LLVM_ADT_DENSEMAP_H