1 //===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This is fork of llvm/ADT/DenseMap.h class with the following changes:
10 // * Use mmap to allocate.
14 //===----------------------------------------------------------------------===//
16 #ifndef SANITIZER_DENSE_MAP_H
17 #define SANITIZER_DENSE_MAP_H
19 #include "sanitizer_common.h"
20 #include "sanitizer_dense_map_info.h"
21 #include "sanitizer_internal_defs.h"
22 #include "sanitizer_type_traits.h"
24 namespace __sanitizer
{
26 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
30 using size_type
= unsigned;
31 using key_type
= KeyT
;
32 using mapped_type
= ValueT
;
33 using value_type
= BucketT
;
35 WARN_UNUSED_RESULT
bool empty() const { return getNumEntries() == 0; }
36 unsigned size() const { return getNumEntries(); }
38 /// Grow the densemap so that it can contain at least \p NumEntries items
39 /// before resizing again.
40 void reserve(size_type NumEntries
) {
41 auto NumBuckets
= getMinBucketToReserveForEntries(NumEntries
);
42 if (NumBuckets
> getNumBuckets())
47 if (getNumEntries() == 0 && getNumTombstones() == 0)
50 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
51 if (__sanitizer::is_trivially_destructible
<ValueT
>::value
) {
52 // Use a simpler loop when values don't need destruction.
53 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
)
54 P
->getFirst() = EmptyKey
;
56 unsigned NumEntries
= getNumEntries();
57 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
58 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
)) {
59 if (!KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
60 P
->getSecond().~ValueT();
63 P
->getFirst() = EmptyKey
;
66 CHECK_EQ(NumEntries
, 0);
72 /// Return 1 if the specified key is in the map, 0 otherwise.
73 size_type
count(const KeyT
&Key
) const {
74 const BucketT
*TheBucket
;
75 return LookupBucketFor(Key
, TheBucket
) ? 1 : 0;
78 value_type
*find(const KeyT
&Key
) {
80 if (LookupBucketFor(Key
, TheBucket
))
84 const value_type
*find(const KeyT
&Key
) const {
85 const BucketT
*TheBucket
;
86 if (LookupBucketFor(Key
, TheBucket
))
91 /// Alternate version of find() which allows a different, and possibly
92 /// less expensive, key type.
93 /// The DenseMapInfo is responsible for supplying methods
94 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
96 template <class LookupKeyT
>
97 value_type
*find_as(const LookupKeyT
&Key
) {
99 if (LookupBucketFor(Key
, TheBucket
))
103 template <class LookupKeyT
>
104 const value_type
*find_as(const LookupKeyT
&Key
) const {
105 const BucketT
*TheBucket
;
106 if (LookupBucketFor(Key
, TheBucket
))
111 /// lookup - Return the entry for the specified key, or a default
112 /// constructed value if no such entry exists.
113 ValueT
lookup(const KeyT
&Key
) const {
114 const BucketT
*TheBucket
;
115 if (LookupBucketFor(Key
, TheBucket
))
116 return TheBucket
->getSecond();
120 // Inserts key,value pair into the map if the key isn't already in the map.
121 // If the key is already in the map, it returns false and doesn't update the
123 detail::DenseMapPair
<value_type
*, bool> insert(const value_type
&KV
) {
124 return try_emplace(KV
.first
, KV
.second
);
127 // Inserts key,value pair into the map if the key isn't already in the map.
128 // If the key is already in the map, it returns false and doesn't update the
130 detail::DenseMapPair
<value_type
*, bool> insert(value_type
&&KV
) {
131 return try_emplace(__sanitizer::move(KV
.first
),
132 __sanitizer::move(KV
.second
));
135 // Inserts key,value pair into the map if the key isn't already in the map.
136 // The value is constructed in-place if the key is not in the map, otherwise
138 template <typename
... Ts
>
139 detail::DenseMapPair
<value_type
*, bool> try_emplace(KeyT
&&Key
,
142 if (LookupBucketFor(Key
, TheBucket
))
143 return {TheBucket
, false}; // Already in map.
145 // Otherwise, insert the new element.
146 TheBucket
= InsertIntoBucket(TheBucket
, __sanitizer::move(Key
),
147 __sanitizer::forward
<Ts
>(Args
)...);
148 return {TheBucket
, true};
151 // Inserts key,value pair into the map if the key isn't already in the map.
152 // The value is constructed in-place if the key is not in the map, otherwise
154 template <typename
... Ts
>
155 detail::DenseMapPair
<value_type
*, bool> try_emplace(const KeyT
&Key
,
158 if (LookupBucketFor(Key
, TheBucket
))
159 return {TheBucket
, false}; // Already in map.
161 // Otherwise, insert the new element.
163 InsertIntoBucket(TheBucket
, Key
, __sanitizer::forward
<Ts
>(Args
)...);
164 return {TheBucket
, true};
167 /// Alternate version of insert() which allows a different, and possibly
168 /// less expensive, key type.
169 /// The DenseMapInfo is responsible for supplying methods
170 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
172 template <typename LookupKeyT
>
173 detail::DenseMapPair
<value_type
*, bool> insert_as(value_type
&&KV
,
174 const LookupKeyT
&Val
) {
176 if (LookupBucketFor(Val
, TheBucket
))
177 return {TheBucket
, false}; // Already in map.
179 // Otherwise, insert the new element.
181 InsertIntoBucketWithLookup(TheBucket
, __sanitizer::move(KV
.first
),
182 __sanitizer::move(KV
.second
), Val
);
183 return {TheBucket
, true};
186 bool erase(const KeyT
&Val
) {
188 if (!LookupBucketFor(Val
, TheBucket
))
189 return false; // not in map.
191 TheBucket
->getSecond().~ValueT();
192 TheBucket
->getFirst() = getTombstoneKey();
193 decrementNumEntries();
194 incrementNumTombstones();
198 void erase(value_type
*I
) {
199 CHECK_NE(I
, nullptr);
200 BucketT
*TheBucket
= &*I
;
201 TheBucket
->getSecond().~ValueT();
202 TheBucket
->getFirst() = getTombstoneKey();
203 decrementNumEntries();
204 incrementNumTombstones();
207 value_type
&FindAndConstruct(const KeyT
&Key
) {
209 if (LookupBucketFor(Key
, TheBucket
))
212 return *InsertIntoBucket(TheBucket
, Key
);
215 ValueT
&operator[](const KeyT
&Key
) { return FindAndConstruct(Key
).second
; }
217 value_type
&FindAndConstruct(KeyT
&&Key
) {
219 if (LookupBucketFor(Key
, TheBucket
))
222 return *InsertIntoBucket(TheBucket
, __sanitizer::move(Key
));
225 ValueT
&operator[](KeyT
&&Key
) {
226 return FindAndConstruct(__sanitizer::move(Key
)).second
;
229 /// Iterate over active entries of the container.
231 /// Function can return fast to stop the process.
233 void forEach(Fn fn
) {
234 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
235 for (auto *P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
236 const KeyT K
= P
->getFirst();
237 if (!KeyInfoT::isEqual(K
, EmptyKey
) &&
238 !KeyInfoT::isEqual(K
, TombstoneKey
)) {
246 void forEach(Fn fn
) const {
247 const_cast<DenseMapBase
*>(this)->forEach(
248 [&](const value_type
&KV
) { return fn(KV
); });
252 DenseMapBase() = default;
255 if (getNumBuckets() == 0) // Nothing to do.
258 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
259 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
260 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
261 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
))
262 P
->getSecond().~ValueT();
263 P
->getFirst().~KeyT();
271 CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
272 const KeyT EmptyKey
= getEmptyKey();
273 for (BucketT
*B
= getBuckets(), *E
= getBucketsEnd(); B
!= E
; ++B
)
274 ::new (&B
->getFirst()) KeyT(EmptyKey
);
277 /// Returns the number of buckets to allocate to ensure that the DenseMap can
278 /// accommodate \p NumEntries without need to grow().
279 unsigned getMinBucketToReserveForEntries(unsigned NumEntries
) {
280 // Ensure that "NumEntries * 4 < NumBuckets * 3"
283 // +1 is required because of the strict equality.
284 // For example if NumEntries is 48, we need to return 401.
285 return RoundUpToPowerOfTwo((NumEntries
* 4 / 3 + 1) + /* NextPowerOf2 */ 1);
288 void moveFromOldBuckets(BucketT
*OldBucketsBegin
, BucketT
*OldBucketsEnd
) {
291 // Insert all the old elements.
292 const KeyT EmptyKey
= getEmptyKey();
293 const KeyT TombstoneKey
= getTombstoneKey();
294 for (BucketT
*B
= OldBucketsBegin
, *E
= OldBucketsEnd
; B
!= E
; ++B
) {
295 if (!KeyInfoT::isEqual(B
->getFirst(), EmptyKey
) &&
296 !KeyInfoT::isEqual(B
->getFirst(), TombstoneKey
)) {
297 // Insert the key/value into the new table.
299 bool FoundVal
= LookupBucketFor(B
->getFirst(), DestBucket
);
300 (void)FoundVal
; // silence warning.
302 DestBucket
->getFirst() = __sanitizer::move(B
->getFirst());
303 ::new (&DestBucket
->getSecond())
304 ValueT(__sanitizer::move(B
->getSecond()));
305 incrementNumEntries();
308 B
->getSecond().~ValueT();
310 B
->getFirst().~KeyT();
314 template <typename OtherBaseT
>
316 const DenseMapBase
<OtherBaseT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &other
) {
317 CHECK_NE(&other
, this);
318 CHECK_EQ(getNumBuckets(), other
.getNumBuckets());
320 setNumEntries(other
.getNumEntries());
321 setNumTombstones(other
.getNumTombstones());
323 if (__sanitizer::is_trivially_copyable
<KeyT
>::value
&&
324 __sanitizer::is_trivially_copyable
<ValueT
>::value
)
325 internal_memcpy(reinterpret_cast<void *>(getBuckets()),
326 other
.getBuckets(), getNumBuckets() * sizeof(BucketT
));
328 for (uptr i
= 0; i
< getNumBuckets(); ++i
) {
329 ::new (&getBuckets()[i
].getFirst())
330 KeyT(other
.getBuckets()[i
].getFirst());
331 if (!KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getEmptyKey()) &&
332 !KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getTombstoneKey()))
333 ::new (&getBuckets()[i
].getSecond())
334 ValueT(other
.getBuckets()[i
].getSecond());
338 static unsigned getHashValue(const KeyT
&Val
) {
339 return KeyInfoT::getHashValue(Val
);
342 template <typename LookupKeyT
>
343 static unsigned getHashValue(const LookupKeyT
&Val
) {
344 return KeyInfoT::getHashValue(Val
);
347 static const KeyT
getEmptyKey() { return KeyInfoT::getEmptyKey(); }
349 static const KeyT
getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
352 unsigned getNumEntries() const {
353 return static_cast<const DerivedT
*>(this)->getNumEntries();
356 void setNumEntries(unsigned Num
) {
357 static_cast<DerivedT
*>(this)->setNumEntries(Num
);
360 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
362 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
364 unsigned getNumTombstones() const {
365 return static_cast<const DerivedT
*>(this)->getNumTombstones();
368 void setNumTombstones(unsigned Num
) {
369 static_cast<DerivedT
*>(this)->setNumTombstones(Num
);
372 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
374 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
376 const BucketT
*getBuckets() const {
377 return static_cast<const DerivedT
*>(this)->getBuckets();
380 BucketT
*getBuckets() { return static_cast<DerivedT
*>(this)->getBuckets(); }
382 unsigned getNumBuckets() const {
383 return static_cast<const DerivedT
*>(this)->getNumBuckets();
386 BucketT
*getBucketsEnd() { return getBuckets() + getNumBuckets(); }
388 const BucketT
*getBucketsEnd() const {
389 return getBuckets() + getNumBuckets();
392 void grow(unsigned AtLeast
) { static_cast<DerivedT
*>(this)->grow(AtLeast
); }
394 template <typename KeyArg
, typename
... ValueArgs
>
395 BucketT
*InsertIntoBucket(BucketT
*TheBucket
, KeyArg
&&Key
,
396 ValueArgs
&&...Values
) {
397 TheBucket
= InsertIntoBucketImpl(Key
, Key
, TheBucket
);
399 TheBucket
->getFirst() = __sanitizer::forward
<KeyArg
>(Key
);
400 ::new (&TheBucket
->getSecond())
401 ValueT(__sanitizer::forward
<ValueArgs
>(Values
)...);
405 template <typename LookupKeyT
>
406 BucketT
*InsertIntoBucketWithLookup(BucketT
*TheBucket
, KeyT
&&Key
,
407 ValueT
&&Value
, LookupKeyT
&Lookup
) {
408 TheBucket
= InsertIntoBucketImpl(Key
, Lookup
, TheBucket
);
410 TheBucket
->getFirst() = __sanitizer::move(Key
);
411 ::new (&TheBucket
->getSecond()) ValueT(__sanitizer::move(Value
));
415 template <typename LookupKeyT
>
416 BucketT
*InsertIntoBucketImpl(const KeyT
&Key
, const LookupKeyT
&Lookup
,
417 BucketT
*TheBucket
) {
418 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
419 // the buckets are empty (meaning that many are filled with tombstones),
422 // The later case is tricky. For example, if we had one empty bucket with
423 // tons of tombstones, failing lookups (e.g. for insertion) would have to
424 // probe almost the entire table until it found the empty bucket. If the
425 // table completely filled with tombstones, no lookup would ever succeed,
426 // causing infinite loops in lookup.
427 unsigned NewNumEntries
= getNumEntries() + 1;
428 unsigned NumBuckets
= getNumBuckets();
429 if (UNLIKELY(NewNumEntries
* 4 >= NumBuckets
* 3)) {
430 this->grow(NumBuckets
* 2);
431 LookupBucketFor(Lookup
, TheBucket
);
432 NumBuckets
= getNumBuckets();
433 } else if (UNLIKELY(NumBuckets
- (NewNumEntries
+ getNumTombstones()) <=
435 this->grow(NumBuckets
);
436 LookupBucketFor(Lookup
, TheBucket
);
440 // Only update the state after we've grown our bucket space appropriately
441 // so that when growing buckets we have self-consistent entry count.
442 incrementNumEntries();
444 // If we are writing over a tombstone, remember this.
445 const KeyT EmptyKey
= getEmptyKey();
446 if (!KeyInfoT::isEqual(TheBucket
->getFirst(), EmptyKey
))
447 decrementNumTombstones();
452 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
453 /// FoundBucket. If the bucket contains the key and a value, this returns
454 /// true, otherwise it returns a bucket with an empty marker or tombstone and
456 template <typename LookupKeyT
>
457 bool LookupBucketFor(const LookupKeyT
&Val
,
458 const BucketT
*&FoundBucket
) const {
459 const BucketT
*BucketsPtr
= getBuckets();
460 const unsigned NumBuckets
= getNumBuckets();
462 if (NumBuckets
== 0) {
463 FoundBucket
= nullptr;
467 // FoundTombstone - Keep track of whether we find a tombstone while probing.
468 const BucketT
*FoundTombstone
= nullptr;
469 const KeyT EmptyKey
= getEmptyKey();
470 const KeyT TombstoneKey
= getTombstoneKey();
471 CHECK(!KeyInfoT::isEqual(Val
, EmptyKey
));
472 CHECK(!KeyInfoT::isEqual(Val
, TombstoneKey
));
474 unsigned BucketNo
= getHashValue(Val
) & (NumBuckets
- 1);
475 unsigned ProbeAmt
= 1;
477 const BucketT
*ThisBucket
= BucketsPtr
+ BucketNo
;
478 // Found Val's bucket? If so, return it.
479 if (LIKELY(KeyInfoT::isEqual(Val
, ThisBucket
->getFirst()))) {
480 FoundBucket
= ThisBucket
;
484 // If we found an empty bucket, the key doesn't exist in the set.
485 // Insert it and return the default value.
486 if (LIKELY(KeyInfoT::isEqual(ThisBucket
->getFirst(), EmptyKey
))) {
487 // If we've already seen a tombstone while probing, fill it in instead
488 // of the empty bucket we eventually probed to.
489 FoundBucket
= FoundTombstone
? FoundTombstone
: ThisBucket
;
493 // If this is a tombstone, remember it. If Val ends up not in the map, we
494 // prefer to return it than something that would require more probing.
495 if (KeyInfoT::isEqual(ThisBucket
->getFirst(), TombstoneKey
) &&
497 FoundTombstone
= ThisBucket
; // Remember the first tombstone found.
499 // Otherwise, it's a hash collision or a tombstone, continue quadratic
501 BucketNo
+= ProbeAmt
++;
502 BucketNo
&= (NumBuckets
- 1);
506 template <typename LookupKeyT
>
507 bool LookupBucketFor(const LookupKeyT
&Val
, BucketT
*&FoundBucket
) {
508 const BucketT
*ConstFoundBucket
;
509 bool Result
= const_cast<const DenseMapBase
*>(this)->LookupBucketFor(
510 Val
, ConstFoundBucket
);
511 FoundBucket
= const_cast<BucketT
*>(ConstFoundBucket
);
516 /// Return the approximate size (in bytes) of the actual map.
517 /// This is just the raw memory used by DenseMap.
518 /// If entries are pointers to objects, the size of the referenced objects
519 /// are not included.
520 uptr
getMemorySize() const {
521 return RoundUpTo(getNumBuckets() * sizeof(BucketT
), GetPageSizeCached());
525 /// Equality comparison for DenseMap.
527 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
528 /// is also in RHS, and that no additional pairs are in RHS.
529 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
530 /// complexity is linear, worst case is O(N^2) (if every hash collides).
531 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
534 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &LHS
,
535 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &RHS
) {
536 if (LHS
.size() != RHS
.size())
541 [&](const typename DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
,
542 BucketT
>::value_type
&KV
) -> bool {
543 const auto *I
= RHS
.find(KV
.first
);
544 if (!I
|| I
->second
!= KV
.second
) {
554 /// Inequality comparison for DenseMap.
556 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
557 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
560 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &LHS
,
561 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &RHS
) {
562 return !(LHS
== RHS
);
565 template <typename KeyT
, typename ValueT
,
566 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
567 typename BucketT
= detail::DenseMapPair
<KeyT
, ValueT
>>
568 class DenseMap
: public DenseMapBase
<DenseMap
<KeyT
, ValueT
, KeyInfoT
, BucketT
>,
569 KeyT
, ValueT
, KeyInfoT
, BucketT
> {
570 friend class DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
572 // Lift some types from the dependent base class into this class for
573 // simplicity of referring to them.
574 using BaseT
= DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
576 BucketT
*Buckets
= nullptr;
577 unsigned NumEntries
= 0;
578 unsigned NumTombstones
= 0;
579 unsigned NumBuckets
= 0;
582 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
583 /// this number of elements can be inserted in the map without grow()
584 explicit DenseMap(unsigned InitialReserve
) { init(InitialReserve
); }
585 constexpr DenseMap() = default;
587 DenseMap(const DenseMap
&other
) : BaseT() {
592 DenseMap(DenseMap
&&other
) : BaseT() {
599 deallocate_buffer(Buckets
, sizeof(BucketT
) * NumBuckets
);
602 void swap(DenseMap
&RHS
) {
603 Swap(Buckets
, RHS
.Buckets
);
604 Swap(NumEntries
, RHS
.NumEntries
);
605 Swap(NumTombstones
, RHS
.NumTombstones
);
606 Swap(NumBuckets
, RHS
.NumBuckets
);
609 DenseMap
&operator=(const DenseMap
&other
) {
615 DenseMap
&operator=(DenseMap
&&other
) {
617 deallocate_buffer(Buckets
, sizeof(BucketT
) * NumBuckets
, alignof(BucketT
));
623 void copyFrom(const DenseMap
&other
) {
625 deallocate_buffer(Buckets
, sizeof(BucketT
) * NumBuckets
);
626 if (allocateBuckets(other
.NumBuckets
)) {
627 this->BaseT::copyFrom(other
);
634 void init(unsigned InitNumEntries
) {
635 auto InitBuckets
= BaseT::getMinBucketToReserveForEntries(InitNumEntries
);
636 if (allocateBuckets(InitBuckets
)) {
637 this->BaseT::initEmpty();
644 void grow(unsigned AtLeast
) {
645 unsigned OldNumBuckets
= NumBuckets
;
646 BucketT
*OldBuckets
= Buckets
;
648 allocateBuckets(RoundUpToPowerOfTwo(Max
<unsigned>(64, AtLeast
)));
651 this->BaseT::initEmpty();
655 this->moveFromOldBuckets(OldBuckets
, OldBuckets
+ OldNumBuckets
);
657 // Free the old table.
658 deallocate_buffer(OldBuckets
, sizeof(BucketT
) * OldNumBuckets
);
662 unsigned getNumEntries() const { return NumEntries
; }
664 void setNumEntries(unsigned Num
) { NumEntries
= Num
; }
666 unsigned getNumTombstones() const { return NumTombstones
; }
668 void setNumTombstones(unsigned Num
) { NumTombstones
= Num
; }
670 BucketT
*getBuckets() const { return Buckets
; }
672 unsigned getNumBuckets() const { return NumBuckets
; }
674 bool allocateBuckets(unsigned Num
) {
676 if (NumBuckets
== 0) {
681 uptr Size
= sizeof(BucketT
) * NumBuckets
;
682 if (Size
* 2 <= GetPageSizeCached()) {
683 // We always allocate at least a page, so use entire space.
684 unsigned Log2
= MostSignificantSetBitIndex(GetPageSizeCached() / Size
);
687 CHECK_EQ(Size
, sizeof(BucketT
) * NumBuckets
);
688 CHECK_GT(Size
* 2, GetPageSizeCached());
690 Buckets
= static_cast<BucketT
*>(allocate_buffer(Size
));
694 static void *allocate_buffer(uptr Size
) {
695 return MmapOrDie(RoundUpTo(Size
, GetPageSizeCached()), "DenseMap");
698 static void deallocate_buffer(void *Ptr
, uptr Size
) {
699 UnmapOrDie(Ptr
, RoundUpTo(Size
, GetPageSizeCached()));
703 } // namespace __sanitizer
705 #endif // SANITIZER_DENSE_MAP_H