2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 David Levin <levin@chromium.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details.
13 * You should have received a copy of the GNU Library General Public License
14 * along with this library; see the file COPYING.LIB. If not, write to
15 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
16 * Boston, MA 02110-1301, USA.
20 #ifndef WTF_HashTable_h
21 #define WTF_HashTable_h
23 #include "wtf/Alignment.h"
24 #include "wtf/Assertions.h"
25 #include "wtf/ConditionalDestructor.h"
26 #include "wtf/DefaultAllocator.h"
27 #include "wtf/HashTraits.h"
29 #define DUMP_HASHTABLE_STATS 0
30 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
32 #if DUMP_HASHTABLE_STATS_PER_TABLE
33 #include "wtf/DataLog.h"
36 #if DUMP_HASHTABLE_STATS
37 #if DUMP_HASHTABLE_STATS_PER_TABLE
38 #define UPDATE_PROBE_COUNTS() \
40 HashTableStats::recordCollisionAtCount(probeCount); \
41 ++perTableProbeCount; \
42 m_stats->recordCollisionAtCount(perTableProbeCount)
43 #define UPDATE_ACCESS_COUNTS() \
44 atomicIncrement(&HashTableStats::numAccesses); \
46 ++m_stats->numAccesses; \
47 int perTableProbeCount = 0
49 #define UPDATE_PROBE_COUNTS() \
51 HashTableStats::recordCollisionAtCount(probeCount)
52 #define UPDATE_ACCESS_COUNTS() \
53 atomicIncrement(&HashTableStats::numAccesses); \
57 #if DUMP_HASHTABLE_STATS_PER_TABLE
58 #define UPDATE_PROBE_COUNTS() \
59 ++perTableProbeCount; \
60 m_stats->recordCollisionAtCount(perTableProbeCount)
61 #define UPDATE_ACCESS_COUNTS() \
62 ++m_stats->numAccesses; \
63 int perTableProbeCount = 0
65 #define UPDATE_PROBE_COUNTS() do { } while (0)
66 #define UPDATE_ACCESS_COUNTS() do { } while (0)
72 #if DUMP_HASHTABLE_STATS
74 struct HashTableStats
{
75 // The following variables are all atomically incremented when modified.
76 static int numAccesses
;
77 static int numRehashes
;
78 static int numRemoves
;
79 static int numReinserts
;
81 // The following variables are only modified in the recordCollisionAtCount method within a mutex.
82 static int maxCollisions
;
83 static int numCollisions
;
84 static int collisionGraph
[4096];
86 static void recordCollisionAtCount(int count
);
87 static void dumpStats();
92 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
94 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
95 class HashTableIterator
;
96 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
97 class HashTableConstIterator
;
98 template<typename Value
, typename HashFunctions
, typename HashTraits
, typename Allocator
>
100 template<WeakHandlingFlag x
, typename T
, typename U
, typename V
, typename W
, typename X
, typename Y
, typename Z
>
101 struct WeakProcessingHashTableHelper
;
103 typedef enum { HashItemKnownGood
} HashItemKnownGoodTag
;
105 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
106 class HashTableConstIterator
{
108 typedef HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> HashTableType
;
109 typedef HashTableIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> iterator
;
110 typedef HashTableConstIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> const_iterator
;
111 typedef Value ValueType
;
112 typedef typename
Traits::IteratorConstGetType GetType
;
113 typedef const ValueType
* PointerType
;
115 friend class HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>;
116 friend class HashTableIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>;
118 void skipEmptyBuckets()
120 while (m_position
!= m_endPosition
&& HashTableType::isEmptyOrDeletedBucket(*m_position
))
124 HashTableConstIterator(PointerType position
, PointerType endPosition
, const HashTableType
* container
)
125 : m_position(position
)
126 , m_endPosition(endPosition
)
128 , m_container(container
)
129 , m_containerModifications(container
->modifications())
135 HashTableConstIterator(PointerType position
, PointerType endPosition
, const HashTableType
* container
, HashItemKnownGoodTag
)
136 : m_position(position
)
137 , m_endPosition(endPosition
)
139 , m_container(container
)
140 , m_containerModifications(container
->modifications())
143 ASSERT(m_containerModifications
== m_container
->modifications());
146 void checkModifications() const
148 // HashTable and collections that build on it do not support
149 // modifications while there is an iterator in use. The exception
150 // is ListHashSet, which has its own iterators that tolerate
151 // modification of the underlying set.
152 ASSERT(m_containerModifications
== m_container
->modifications());
156 HashTableConstIterator()
162 checkModifications();
165 typename
Traits::IteratorConstReferenceType
operator*() const { return Traits::getToReferenceConstConversion(get()); }
166 GetType
operator->() const { return get(); }
168 const_iterator
& operator++()
170 ASSERT(m_position
!= m_endPosition
);
171 checkModifications();
177 // postfix ++ intentionally omitted
180 bool operator==(const const_iterator
& other
) const
182 return m_position
== other
.m_position
;
184 bool operator!=(const const_iterator
& other
) const
186 return m_position
!= other
.m_position
;
188 bool operator==(const iterator
& other
) const
190 return *this == static_cast<const_iterator
>(other
);
192 bool operator!=(const iterator
& other
) const
194 return *this != static_cast<const_iterator
>(other
);
198 PointerType m_position
;
199 PointerType m_endPosition
;
201 const HashTableType
* m_container
;
202 int64_t m_containerModifications
;
206 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
207 class HashTableIterator
{
209 typedef HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> HashTableType
;
210 typedef HashTableIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> iterator
;
211 typedef HashTableConstIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> const_iterator
;
212 typedef Value ValueType
;
213 typedef typename
Traits::IteratorGetType GetType
;
214 typedef ValueType
* PointerType
;
216 friend class HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>;
218 HashTableIterator(PointerType pos
, PointerType end
, const HashTableType
* container
) : m_iterator(pos
, end
, container
) { }
219 HashTableIterator(PointerType pos
, PointerType end
, const HashTableType
* container
, HashItemKnownGoodTag tag
) : m_iterator(pos
, end
, container
, tag
) { }
222 HashTableIterator() { }
224 // default copy, assignment and destructor are OK
226 GetType
get() const { return const_cast<GetType
>(m_iterator
.get()); }
227 typename
Traits::IteratorReferenceType
operator*() const { return Traits::getToReferenceConversion(get()); }
228 GetType
operator->() const { return get(); }
230 iterator
& operator++() { ++m_iterator
; return *this; }
232 // postfix ++ intentionally omitted
235 bool operator==(const iterator
& other
) const { return m_iterator
== other
.m_iterator
; }
236 bool operator!=(const iterator
& other
) const { return m_iterator
!= other
.m_iterator
; }
237 bool operator==(const const_iterator
& other
) const { return m_iterator
== other
; }
238 bool operator!=(const const_iterator
& other
) const { return m_iterator
!= other
; }
240 operator const_iterator() const { return m_iterator
; }
243 const_iterator m_iterator
;
248 // Work around MSVC's standard library, whose swap for pairs does not swap by component.
249 template<typename T
> inline void hashTableSwap(T
& a
, T
& b
)
254 template<typename T
, typename U
> inline void hashTableSwap(KeyValuePair
<T
, U
>& a
, KeyValuePair
<T
, U
>& b
)
257 swap(a
.value
, b
.value
);
260 template<typename T
, typename Allocator
, bool useSwap
= !IsTriviallyDestructible
<T
>::value
>
262 template<typename T
, typename Allocator
> struct Mover
<T
, Allocator
, true> {
263 static void move(T
& from
, T
& to
)
265 // The key and value cannot be swapped atomically, and it would be
266 // wrong to have a GC when only one was swapped and the other still
267 // contained garbage (eg. from a previous use of the same slot).
268 // Therefore we forbid a GC until both the key and the value are
270 Allocator::enterGCForbiddenScope();
271 hashTableSwap(from
, to
);
272 Allocator::leaveGCForbiddenScope();
275 template<typename T
, typename Allocator
> struct Mover
<T
, Allocator
, false> {
276 static void move(T
& from
, T
& to
) { to
= from
; }
279 template<typename HashFunctions
> class IdentityHashTranslator
{
281 template<typename T
> static unsigned hash(const T
& key
) { return HashFunctions::hash(key
); }
282 template<typename T
, typename U
> static bool equal(const T
& a
, const U
& b
) { return HashFunctions::equal(a
, b
); }
283 template<typename T
, typename U
, typename V
> static void translate(T
& location
, const U
&, const V
& value
) { location
= value
; }
286 template<typename HashTableType
, typename ValueType
> struct HashTableAddResult
{
287 HashTableAddResult(const HashTableType
* container
, ValueType
* storedValue
, bool isNewEntry
)
288 : storedValue(storedValue
)
289 , isNewEntry(isNewEntry
)
290 #if ENABLE(SECURITY_ASSERT)
291 , m_container(container
)
292 , m_containerModifications(container
->modifications())
295 ASSERT_UNUSED(container
, container
);
298 ValueType
* storedValue
;
301 #if ENABLE(SECURITY_ASSERT)
302 ~HashTableAddResult()
304 // If rehash happened before accessing storedValue, it's
305 // use-after-free. Any modification may cause a rehash, so we check
306 // for modifications here.
307 // Rehash after accessing storedValue is harmless but will assert if
308 // the AddResult destructor takes place after a modification. You
309 // may need to limit the scope of the AddResult.
310 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications
== m_container
->modifications());
314 const HashTableType
* m_container
;
315 const int64_t m_containerModifications
;
319 template<typename Value
, typename Extractor
, typename KeyTraits
>
320 struct HashTableHelper
{
321 static bool isEmptyBucket(const Value
& value
) { return isHashTraitsEmptyValue
<KeyTraits
>(Extractor::extract(value
)); }
322 static bool isDeletedBucket(const Value
& value
) { return KeyTraits::isDeletedValue(Extractor::extract(value
)); }
323 static bool isEmptyOrDeletedBucket(const Value
& value
) { return isEmptyBucket(value
) || isDeletedBucket(value
); }
326 template<typename HashTranslator
, typename KeyTraits
, bool safeToCompareToEmptyOrDeleted
>
327 struct HashTableKeyChecker
{
328 // There's no simple generic way to make this check if safeToCompareToEmptyOrDeleted is false,
329 // so the check always passes.
330 template <typename T
>
331 static bool checkKey(const T
&) { return true; }
334 template<typename HashTranslator
, typename KeyTraits
>
335 struct HashTableKeyChecker
<HashTranslator
, KeyTraits
, true> {
336 template <typename T
>
337 static bool checkKey(const T
& key
)
339 // FIXME : Check also equality to the deleted value.
340 return !HashTranslator::equal(KeyTraits::emptyValue(), key
);
344 // Note: empty or deleted key values are not allowed, using them may lead to undefined behavior.
345 // For pointer keys this means that null pointers are not allowed unless you supply custom key traits.
346 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
347 class HashTable
: public ConditionalDestructor
<HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>, Allocator::isGarbageCollected
> {
349 typedef HashTableIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> iterator
;
350 typedef HashTableConstIterator
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> const_iterator
;
351 typedef Traits ValueTraits
;
353 typedef typename
KeyTraits::PeekInType KeyPeekInType
;
354 typedef typename
KeyTraits::PassInType KeyPassInType
;
355 typedef Value ValueType
;
356 typedef Extractor ExtractorType
;
357 typedef KeyTraits KeyTraitsType
;
358 typedef typename
Traits::PassInType ValuePassInType
;
359 typedef IdentityHashTranslator
<HashFunctions
> IdentityTranslatorType
;
360 typedef HashTableAddResult
<HashTable
, ValueType
> AddResult
;
362 #if DUMP_HASHTABLE_STATS_PER_TABLE
382 int collisionGraph
[4096];
384 void recordCollisionAtCount(int count
)
386 if (count
> maxCollisions
)
387 maxCollisions
= count
;
389 collisionGraph
[count
]++;
394 dataLogF("\nWTF::HashTable::Stats dump\n\n");
395 dataLogF("%d accesses\n", numAccesses
);
396 dataLogF("%d total collisions, average %.2f probes per access\n", numCollisions
, 1.0 * (numAccesses
+ numCollisions
) / numAccesses
);
397 dataLogF("longest collision chain: %d\n", maxCollisions
);
398 for (int i
= 1; i
<= maxCollisions
; i
++) {
399 dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph
[i
], i
, 100.0 * (collisionGraph
[i
] - collisionGraph
[i
+1]) / numAccesses
, 100.0 * collisionGraph
[i
] / numAccesses
);
401 dataLogF("%d rehashes\n", numRehashes
);
402 dataLogF("%d reinserts\n", numReinserts
);
410 ASSERT(!Allocator::isGarbageCollected
);
411 if (LIKELY(!m_table
))
413 deleteAllBucketsAndDeallocate(m_table
, m_tableSize
);
417 HashTable(const HashTable
&);
418 void swap(HashTable
&);
419 HashTable
& operator=(const HashTable
&);
421 // When the hash table is empty, just return the same iterator for end as for begin.
422 // This is more efficient because we don't have to skip all the empty and deleted
423 // buckets, and iterating an empty table is a common case that's worth optimizing.
424 iterator
begin() { return isEmpty() ? end() : makeIterator(m_table
); }
425 iterator
end() { return makeKnownGoodIterator(m_table
+ m_tableSize
); }
426 const_iterator
begin() const { return isEmpty() ? end() : makeConstIterator(m_table
); }
427 const_iterator
end() const { return makeKnownGoodConstIterator(m_table
+ m_tableSize
); }
429 unsigned size() const { return m_keyCount
; }
430 unsigned capacity() const { return m_tableSize
; }
431 bool isEmpty() const { return !m_keyCount
; }
433 void reserveCapacityForSize(unsigned size
);
435 AddResult
add(ValuePassInType value
)
437 return add
<IdentityTranslatorType
>(Extractor::extract(value
), value
);
440 // A special version of add() that finds the object by hashing and comparing
441 // with some other type, to avoid the cost of type conversion if the object is already
443 template<typename HashTranslator
, typename T
, typename Extra
> AddResult
add(const T
& key
, const Extra
&);
444 template<typename HashTranslator
, typename T
, typename Extra
> AddResult
addPassingHashCode(const T
& key
, const Extra
&);
446 iterator
find(KeyPeekInType key
) { return find
<IdentityTranslatorType
>(key
); }
447 const_iterator
find(KeyPeekInType key
) const { return find
<IdentityTranslatorType
>(key
); }
448 bool contains(KeyPeekInType key
) const { return contains
<IdentityTranslatorType
>(key
); }
450 template<typename HashTranslator
, typename T
> iterator
find(const T
&);
451 template<typename HashTranslator
, typename T
> const_iterator
find(const T
&) const;
452 template<typename HashTranslator
, typename T
> bool contains(const T
&) const;
454 void remove(KeyPeekInType
);
455 void remove(iterator
);
456 void remove(const_iterator
);
459 static bool isEmptyBucket(const ValueType
& value
) { return isHashTraitsEmptyValue
<KeyTraits
>(Extractor::extract(value
)); }
460 static bool isDeletedBucket(const ValueType
& value
) { return KeyTraits::isDeletedValue(Extractor::extract(value
)); }
461 static bool isEmptyOrDeletedBucket(const ValueType
& value
) { return HashTableHelper
<ValueType
, Extractor
, KeyTraits
>:: isEmptyOrDeletedBucket(value
); }
463 ValueType
* lookup(KeyPeekInType key
) { return lookup
<IdentityTranslatorType
, KeyPeekInType
>(key
); }
464 template<typename HashTranslator
, typename T
> ValueType
* lookup(T
);
465 template<typename HashTranslator
, typename T
> const ValueType
* lookup(T
) const;
467 template<typename VisitorDispatcher
> void trace(VisitorDispatcher
);
470 int64_t modifications() const { return m_modifications
; }
471 void registerModification() { m_modifications
++; }
472 // HashTable and collections that build on it do not support
473 // modifications while there is an iterator in use. The exception is
474 // ListHashSet, which has its own iterators that tolerate modification
475 // of the underlying set.
476 void checkModifications(int64_t mods
) const { ASSERT(mods
== m_modifications
); }
478 int64_t modifications() const { return 0; }
479 void registerModification() { }
480 void checkModifications(int64_t mods
) const { }
484 static ValueType
* allocateTable(unsigned size
);
485 static void deleteAllBucketsAndDeallocate(ValueType
* table
, unsigned size
);
487 typedef std::pair
<ValueType
*, bool> LookupType
;
488 typedef std::pair
<LookupType
, unsigned> FullLookupType
;
490 LookupType
lookupForWriting(const Key
& key
) { return lookupForWriting
<IdentityTranslatorType
>(key
); }
491 template<typename HashTranslator
, typename T
> FullLookupType
fullLookupForWriting(const T
&);
492 template<typename HashTranslator
, typename T
> LookupType
lookupForWriting(const T
&);
494 void remove(ValueType
*);
496 bool shouldExpand() const { return (m_keyCount
+ m_deletedCount
) * m_maxLoad
>= m_tableSize
; }
497 bool mustRehashInPlace() const { return m_keyCount
* m_minLoad
< m_tableSize
* 2; }
498 bool shouldShrink() const
500 // isAllocationAllowed check should be at the last because it's
502 return m_keyCount
* m_minLoad
< m_tableSize
503 && m_tableSize
> KeyTraits::minimumTableSize
504 && Allocator::isAllocationAllowed();
506 ValueType
* expand(ValueType
* entry
= 0);
507 void shrink() { rehash(m_tableSize
/ 2, 0); }
509 ValueType
* expandBuffer(unsigned newTableSize
, ValueType
* entry
, bool&);
510 ValueType
* rehashTo(ValueType
* newTable
, unsigned newTableSize
, ValueType
* entry
);
511 ValueType
* rehash(unsigned newTableSize
, ValueType
* entry
);
512 ValueType
* reinsert(ValueType
&);
514 static void initializeBucket(ValueType
& bucket
);
515 static void deleteBucket(ValueType
& bucket
) { bucket
.~ValueType(); Traits::constructDeletedValue(bucket
, Allocator::isGarbageCollected
); }
517 FullLookupType
makeLookupResult(ValueType
* position
, bool found
, unsigned hash
)
518 { return FullLookupType(LookupType(position
, found
), hash
); }
520 iterator
makeIterator(ValueType
* pos
) { return iterator(pos
, m_table
+ m_tableSize
, this); }
521 const_iterator
makeConstIterator(ValueType
* pos
) const { return const_iterator(pos
, m_table
+ m_tableSize
, this); }
522 iterator
makeKnownGoodIterator(ValueType
* pos
) { return iterator(pos
, m_table
+ m_tableSize
, this, HashItemKnownGood
); }
523 const_iterator
makeKnownGoodConstIterator(ValueType
* pos
) const { return const_iterator(pos
, m_table
+ m_tableSize
, this, HashItemKnownGood
); }
525 static const unsigned m_maxLoad
= 2;
526 static const unsigned m_minLoad
= 6;
528 unsigned tableSizeMask() const
530 size_t mask
= m_tableSize
- 1;
531 ASSERT((mask
& m_tableSize
) == 0);
535 void setEnqueued() { m_queueFlag
= true; }
536 void clearEnqueued() { m_queueFlag
= false; }
537 bool enqueued() { return m_queueFlag
; }
540 unsigned m_tableSize
;
542 unsigned m_deletedCount
:31;
545 unsigned m_modifications
;
548 #if DUMP_HASHTABLE_STATS_PER_TABLE
550 mutable OwnPtr
<Stats
> m_stats
;
553 template<WeakHandlingFlag x
, typename T
, typename U
, typename V
, typename W
, typename X
, typename Y
, typename Z
> friend struct WeakProcessingHashTableHelper
;
554 template<typename T
, typename U
, typename V
, typename W
> friend class LinkedHashSet
;
557 // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111.
558 template<unsigned size
> struct OneifyLowBits
;
560 struct OneifyLowBits
<0> {
561 static const unsigned value
= 0;
563 template<unsigned number
>
564 struct OneifyLowBits
{
565 static const unsigned value
= number
| OneifyLowBits
<(number
>> 1)>::value
;
567 // Compute the first power of two integer that is an upper bound of the parameter 'number'.
568 template<unsigned number
>
569 struct UpperPowerOfTwoBound
{
570 static const unsigned value
= (OneifyLowBits
<number
- 1>::value
+ 1) * 2;
573 // Because power of two numbers are the limit of maxLoad, their capacity is twice the
574 // UpperPowerOfTwoBound, or 4 times their values.
575 template<unsigned size
, bool isPowerOfTwo
> struct HashTableCapacityForSizeSplitter
;
576 template<unsigned size
>
577 struct HashTableCapacityForSizeSplitter
<size
, true> {
578 static const unsigned value
= size
* 4;
580 template<unsigned size
>
581 struct HashTableCapacityForSizeSplitter
<size
, false> {
582 static const unsigned value
= UpperPowerOfTwoBound
<size
>::value
;
585 // HashTableCapacityForSize computes the upper power of two capacity to hold the size parameter.
586 // This is done at compile time to initialize the HashTraits.
587 template<unsigned size
>
588 struct HashTableCapacityForSize
{
589 static const unsigned value
= HashTableCapacityForSizeSplitter
<size
, !(size
& (size
- 1))>::value
;
590 static_assert(size
> 0, "HashTable minimum capacity should be > 0");
591 static_assert(!static_cast<int>(value
>> 31), "HashTable capacity should not overflow 32bit int");
592 static_assert(value
> (2 * size
), "HashTable capacity should be able to hold content size");
595 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
596 inline HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::HashTable()
605 #if DUMP_HASHTABLE_STATS_PER_TABLE
606 , m_stats(adoptPtr(new Stats
))
611 inline unsigned doubleHash(unsigned key
)
613 key
= ~key
+ (key
>> 23);
621 inline unsigned calculateCapacity(unsigned size
)
623 for (unsigned mask
= size
; mask
; mask
>>= 1)
624 size
|= mask
; // 00110101010 -> 00111111111
625 return (size
+ 1) * 2; // 00111111111 -> 10000000000
628 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
629 void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::reserveCapacityForSize(unsigned newSize
)
631 unsigned newCapacity
= calculateCapacity(newSize
);
632 if (newCapacity
> capacity()) {
633 RELEASE_ASSERT(!static_cast<int>(newCapacity
>> 31)); // HashTable capacity should not overflow 32bit int.
634 rehash(newCapacity
, 0);
638 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
639 template<typename HashTranslator
, typename T
>
640 inline Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::lookup(T key
)
642 return const_cast<Value
*>(const_cast<const HashTable
*>(this)->lookup
<HashTranslator
, T
>(key
));
645 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
646 template<typename HashTranslator
, typename T
>
647 inline const Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::lookup(T key
) const
649 ASSERT((HashTableKeyChecker
<HashTranslator
, KeyTraits
, HashFunctions::safeToCompareToEmptyOrDeleted
>::checkKey(key
)));
650 const ValueType
* table
= m_table
;
655 size_t sizeMask
= tableSizeMask();
656 unsigned h
= HashTranslator::hash(key
);
657 size_t i
= h
& sizeMask
;
659 UPDATE_ACCESS_COUNTS();
662 const ValueType
* entry
= table
+ i
;
664 if (HashFunctions::safeToCompareToEmptyOrDeleted
) {
665 if (HashTranslator::equal(Extractor::extract(*entry
), key
))
668 if (isEmptyBucket(*entry
))
671 if (isEmptyBucket(*entry
))
674 if (!isDeletedBucket(*entry
) && HashTranslator::equal(Extractor::extract(*entry
), key
))
677 UPDATE_PROBE_COUNTS();
679 k
= 1 | doubleHash(h
);
680 i
= (i
+ k
) & sizeMask
;
684 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
685 template<typename HashTranslator
, typename T
>
686 inline typename HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::LookupType HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::lookupForWriting(const T
& key
)
689 registerModification();
691 ValueType
* table
= m_table
;
693 size_t sizeMask
= tableSizeMask();
694 unsigned h
= HashTranslator::hash(key
);
695 size_t i
= h
& sizeMask
;
697 UPDATE_ACCESS_COUNTS();
699 ValueType
* deletedEntry
= 0;
702 ValueType
* entry
= table
+ i
;
704 if (isEmptyBucket(*entry
))
705 return LookupType(deletedEntry
? deletedEntry
: entry
, false);
707 if (HashFunctions::safeToCompareToEmptyOrDeleted
) {
708 if (HashTranslator::equal(Extractor::extract(*entry
), key
))
709 return LookupType(entry
, true);
711 if (isDeletedBucket(*entry
))
712 deletedEntry
= entry
;
714 if (isDeletedBucket(*entry
))
715 deletedEntry
= entry
;
716 else if (HashTranslator::equal(Extractor::extract(*entry
), key
))
717 return LookupType(entry
, true);
719 UPDATE_PROBE_COUNTS();
721 k
= 1 | doubleHash(h
);
722 i
= (i
+ k
) & sizeMask
;
726 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
727 template<typename HashTranslator
, typename T
>
728 inline typename HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::FullLookupType HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::fullLookupForWriting(const T
& key
)
731 registerModification();
733 ValueType
* table
= m_table
;
735 size_t sizeMask
= tableSizeMask();
736 unsigned h
= HashTranslator::hash(key
);
737 size_t i
= h
& sizeMask
;
739 UPDATE_ACCESS_COUNTS();
741 ValueType
* deletedEntry
= 0;
744 ValueType
* entry
= table
+ i
;
746 if (isEmptyBucket(*entry
))
747 return makeLookupResult(deletedEntry
? deletedEntry
: entry
, false, h
);
749 if (HashFunctions::safeToCompareToEmptyOrDeleted
) {
750 if (HashTranslator::equal(Extractor::extract(*entry
), key
))
751 return makeLookupResult(entry
, true, h
);
753 if (isDeletedBucket(*entry
))
754 deletedEntry
= entry
;
756 if (isDeletedBucket(*entry
))
757 deletedEntry
= entry
;
758 else if (HashTranslator::equal(Extractor::extract(*entry
), key
))
759 return makeLookupResult(entry
, true, h
);
761 UPDATE_PROBE_COUNTS();
763 k
= 1 | doubleHash(h
);
764 i
= (i
+ k
) & sizeMask
;
768 template<bool emptyValueIsZero
> struct HashTableBucketInitializer
;
770 template<> struct HashTableBucketInitializer
<false> {
771 template<typename Traits
, typename Value
> static void initialize(Value
& bucket
)
773 new (NotNull
, &bucket
) Value(Traits::emptyValue());
777 template<> struct HashTableBucketInitializer
<true> {
778 template<typename Traits
, typename Value
> static void initialize(Value
& bucket
)
780 // This initializes the bucket without copying the empty value.
781 // That makes it possible to use this with types that don't support copying.
782 // The memset to 0 looks like a slow operation but is optimized by the compilers.
783 memset(&bucket
, 0, sizeof(bucket
));
787 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
788 inline void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::initializeBucket(ValueType
& bucket
)
790 // The key and value cannot be initialied atomically, and it would be
791 // wrong to have a GC when only one was initialized and the other still
792 // contained garbage (eg. from a previous use of the same slot).
793 // Therefore we forbid a GC while both the key and the value are
795 Allocator::enterGCForbiddenScope();
796 HashTableBucketInitializer
<Traits::emptyValueIsZero
>::template initialize
<Traits
>(bucket
);
797 Allocator::leaveGCForbiddenScope();
800 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
801 template<typename HashTranslator
, typename T
, typename Extra
>
802 typename HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::AddResult HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::add(const T
& key
, const Extra
& extra
)
804 ASSERT(Allocator::isAllocationAllowed());
810 ValueType
* table
= m_table
;
812 size_t sizeMask
= tableSizeMask();
813 unsigned h
= HashTranslator::hash(key
);
814 size_t i
= h
& sizeMask
;
816 UPDATE_ACCESS_COUNTS();
818 ValueType
* deletedEntry
= 0;
823 if (isEmptyBucket(*entry
))
826 if (HashFunctions::safeToCompareToEmptyOrDeleted
) {
827 if (HashTranslator::equal(Extractor::extract(*entry
), key
))
828 return AddResult(this, entry
, false);
830 if (isDeletedBucket(*entry
))
831 deletedEntry
= entry
;
833 if (isDeletedBucket(*entry
))
834 deletedEntry
= entry
;
835 else if (HashTranslator::equal(Extractor::extract(*entry
), key
))
836 return AddResult(this, entry
, false);
838 UPDATE_PROBE_COUNTS();
840 k
= 1 | doubleHash(h
);
841 i
= (i
+ k
) & sizeMask
;
844 registerModification();
847 // Overwrite any data left over from last use, using placement new
849 initializeBucket(*deletedEntry
);
850 entry
= deletedEntry
;
854 HashTranslator::translate(*entry
, key
, extra
);
855 ASSERT(!isEmptyOrDeletedBucket(*entry
));
860 entry
= expand(entry
);
862 return AddResult(this, entry
, true);
865 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
866 template<typename HashTranslator
, typename T
, typename Extra
>
867 typename HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::AddResult HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::addPassingHashCode(const T
& key
, const Extra
& extra
)
869 ASSERT(Allocator::isAllocationAllowed());
873 FullLookupType lookupResult
= fullLookupForWriting
<HashTranslator
>(key
);
875 ValueType
* entry
= lookupResult
.first
.first
;
876 bool found
= lookupResult
.first
.second
;
877 unsigned h
= lookupResult
.second
;
880 return AddResult(this, entry
, false);
882 registerModification();
884 if (isDeletedBucket(*entry
)) {
885 initializeBucket(*entry
);
889 HashTranslator::translate(*entry
, key
, extra
, h
);
890 ASSERT(!isEmptyOrDeletedBucket(*entry
));
894 entry
= expand(entry
);
896 return AddResult(this, entry
, true);
899 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
900 Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::reinsert(ValueType
& entry
)
903 registerModification();
904 ASSERT(!lookupForWriting(Extractor::extract(entry
)).second
);
905 ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry
)).first
)));
906 #if DUMP_HASHTABLE_STATS
907 atomicIncrement(&HashTableStats::numReinserts
);
909 #if DUMP_HASHTABLE_STATS_PER_TABLE
910 ++m_stats
->numReinserts
;
912 Value
* newEntry
= lookupForWriting(Extractor::extract(entry
)).first
;
913 Mover
<ValueType
, Allocator
>::move(entry
, *newEntry
);
918 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
919 template <typename HashTranslator
, typename T
>
920 inline typename HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::iterator HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::find(const T
& key
)
922 ValueType
* entry
= lookup
<HashTranslator
>(key
);
926 return makeKnownGoodIterator(entry
);
929 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
930 template <typename HashTranslator
, typename T
>
931 inline typename HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::const_iterator HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::find(const T
& key
) const
933 ValueType
* entry
= const_cast<HashTable
*>(this)->lookup
<HashTranslator
>(key
);
937 return makeKnownGoodConstIterator(entry
);
940 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
941 template <typename HashTranslator
, typename T
>
942 bool HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::contains(const T
& key
) const
944 return const_cast<HashTable
*>(this)->lookup
<HashTranslator
>(key
);
947 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
948 void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::remove(ValueType
* pos
)
950 registerModification();
951 #if DUMP_HASHTABLE_STATS
952 atomicIncrement(&HashTableStats::numRemoves
);
954 #if DUMP_HASHTABLE_STATS_PER_TABLE
955 ++m_stats
->numRemoves
;
966 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
967 inline void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::remove(iterator it
)
972 remove(const_cast<ValueType
*>(it
.m_iterator
.m_position
));
975 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
976 inline void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::remove(const_iterator it
)
981 remove(const_cast<ValueType
*>(it
.m_position
));
984 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
985 inline void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::remove(KeyPeekInType key
)
990 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
991 Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::allocateTable(unsigned size
)
993 size_t allocSize
= size
* sizeof(ValueType
);
995 // Assert that we will not use memset on things with a vtable entry.
996 // The compiler will also check this on some platforms. We would
997 // like to check this on the whole value (key-value pair), but
998 // IsPolymorphic will return false for a pair of two types, even if
999 // one of the components is polymorphic.
1000 static_assert(!Traits::emptyValueIsZero
|| !IsPolymorphic
<KeyType
>::value
, "empty value cannot be zero for things with a vtable");
1003 static_assert(Allocator::isGarbageCollected
1004 || ((!IsAllowOnlyInlineAllocation
<KeyType
>::value
|| !NeedsTracing
<KeyType
>::value
)
1005 && (!IsAllowOnlyInlineAllocation
<ValueType
>::value
|| !NeedsTracing
<ValueType
>::value
))
1006 , "Cannot put ALLOW_ONLY_INLINE_ALLOCATION objects that have trace methods into an off-heap HashTable");
1008 if (Traits::emptyValueIsZero
) {
1009 result
= Allocator::template allocateZeroedHashTableBacking
<ValueType
, HashTable
>(allocSize
);
1011 result
= Allocator::template allocateHashTableBacking
<ValueType
, HashTable
>(allocSize
);
1012 for (unsigned i
= 0; i
< size
; i
++)
1013 initializeBucket(result
[i
]);
1018 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1019 void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::deleteAllBucketsAndDeallocate(ValueType
* table
, unsigned size
)
1021 if (!IsTriviallyDestructible
<ValueType
>::value
) {
1022 for (unsigned i
= 0; i
< size
; ++i
) {
1023 // This code is called when the hash table is cleared or
1024 // resized. We have allocated a new backing store and we need
1025 // to run the destructors on the old backing store, as it is
1026 // being freed. If we are GCing we need to both call the
1027 // destructor and mark the bucket as deleted, otherwise the
1028 // destructor gets called again when the GC finds the backing
1029 // store. With the default allocator it's enough to call the
1030 // destructor, since we will free the memory explicitly and
1031 // we won't see the memory with the bucket again.
1032 if (Allocator::isGarbageCollected
) {
1033 if (!isEmptyOrDeletedBucket(table
[i
]))
1034 deleteBucket(table
[i
]);
1036 if (!isDeletedBucket(table
[i
]))
1037 table
[i
].~ValueType();
1041 Allocator::freeHashTableBacking(table
);
1044 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1045 Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::expand(Value
* entry
)
1049 newSize
= KeyTraits::minimumTableSize
;
1050 } else if (mustRehashInPlace()) {
1051 newSize
= m_tableSize
;
1053 newSize
= m_tableSize
* 2;
1054 RELEASE_ASSERT(newSize
> m_tableSize
);
1057 return rehash(newSize
, entry
);
1060 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1061 Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::expandBuffer(unsigned newTableSize
, Value
* entry
, bool& success
)
1064 ASSERT(m_tableSize
< newTableSize
);
1065 if (!Allocator::expandHashTableBacking(m_table
, newTableSize
* sizeof(ValueType
)))
1070 Value
* newEntry
= nullptr;
1071 unsigned oldTableSize
= m_tableSize
;
1072 ValueType
* originalTable
= m_table
;
1074 ValueType
* temporaryTable
= allocateTable(oldTableSize
);
1075 for (unsigned i
= 0; i
< oldTableSize
; i
++) {
1076 if (&m_table
[i
] == entry
)
1077 newEntry
= &temporaryTable
[i
];
1078 if (isEmptyOrDeletedBucket(m_table
[i
])) {
1079 ASSERT(&m_table
[i
] != entry
);
1080 if (Traits::emptyValueIsZero
) {
1081 memset(&temporaryTable
[i
], 0, sizeof(ValueType
));
1083 initializeBucket(temporaryTable
[i
]);
1086 Mover
<ValueType
, Allocator
>::move(m_table
[i
], temporaryTable
[i
]);
1089 m_table
= temporaryTable
;
1091 if (Traits::emptyValueIsZero
) {
1092 memset(originalTable
, 0, newTableSize
* sizeof(ValueType
));
1094 for (unsigned i
= 0; i
< newTableSize
; i
++)
1095 initializeBucket(originalTable
[i
]);
1097 newEntry
= rehashTo(originalTable
, newTableSize
, newEntry
);
1098 deleteAllBucketsAndDeallocate(temporaryTable
, oldTableSize
);
1103 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1104 Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::rehashTo(ValueType
* newTable
, unsigned newTableSize
, Value
* entry
)
1106 unsigned oldTableSize
= m_tableSize
;
1107 ValueType
* oldTable
= m_table
;
1109 #if DUMP_HASHTABLE_STATS
1110 if (oldTableSize
!= 0)
1111 atomicIncrement(&HashTableStats::numRehashes
);
1114 #if DUMP_HASHTABLE_STATS_PER_TABLE
1115 if (oldTableSize
!= 0)
1116 ++m_stats
->numRehashes
;
1120 m_tableSize
= newTableSize
;
1122 Value
* newEntry
= 0;
1123 for (unsigned i
= 0; i
!= oldTableSize
; ++i
) {
1124 if (isEmptyOrDeletedBucket(oldTable
[i
])) {
1125 ASSERT(&oldTable
[i
] != entry
);
1128 Value
* reinsertedEntry
= reinsert(oldTable
[i
]);
1129 if (&oldTable
[i
] == entry
) {
1131 newEntry
= reinsertedEntry
;
1140 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1141 Value
* HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::rehash(unsigned newTableSize
, Value
* entry
)
1143 unsigned oldTableSize
= m_tableSize
;
1144 ValueType
* oldTable
= m_table
;
1146 #if DUMP_HASHTABLE_STATS
1147 if (oldTableSize
!= 0)
1148 atomicIncrement(&HashTableStats::numRehashes
);
1151 #if DUMP_HASHTABLE_STATS_PER_TABLE
1152 if (oldTableSize
!= 0)
1153 ++m_stats
->numRehashes
;
1156 // The Allocator::isGarbageCollected check is not needed.
1157 // The check is just a static hint for a compiler to indicate that
1158 // Base::expandBuffer returns false if Allocator is a DefaultAllocator.
1159 if (Allocator::isGarbageCollected
&& newTableSize
> oldTableSize
) {
1161 Value
* newEntry
= expandBuffer(newTableSize
, entry
, success
);
1166 ValueType
* newTable
= allocateTable(newTableSize
);
1167 Value
* newEntry
= rehashTo(newTable
, newTableSize
, entry
);
1168 deleteAllBucketsAndDeallocate(oldTable
, oldTableSize
);
1173 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1174 void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::clear()
1176 registerModification();
1180 deleteAllBucketsAndDeallocate(m_table
, m_tableSize
);
1186 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1187 HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::HashTable(const HashTable
& other
)
1192 , m_queueFlag(false)
1194 , m_modifications(0)
1196 #if DUMP_HASHTABLE_STATS_PER_TABLE
1197 , m_stats(adoptPtr(new Stats(*other
.m_stats
)))
1200 // Copy the hash table the dumb way, by adding each element to the new table.
1201 // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
1202 const_iterator end
= other
.end();
1203 for (const_iterator it
= other
.begin(); it
!= end
; ++it
)
1207 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1208 void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::swap(HashTable
& other
)
1210 std::swap(m_table
, other
.m_table
);
1211 std::swap(m_tableSize
, other
.m_tableSize
);
1212 std::swap(m_keyCount
, other
.m_keyCount
);
1213 // std::swap does not work for bit fields.
1214 unsigned deleted
= m_deletedCount
;
1215 m_deletedCount
= other
.m_deletedCount
;
1216 other
.m_deletedCount
= deleted
;
1217 ASSERT(!m_queueFlag
);
1218 ASSERT(!other
.m_queueFlag
);
1221 std::swap(m_modifications
, other
.m_modifications
);
1224 #if DUMP_HASHTABLE_STATS_PER_TABLE
1225 m_stats
.swap(other
.m_stats
);
1229 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1230 HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>& HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::operator=(const HashTable
& other
)
1232 HashTable
tmp(other
);
1237 template<WeakHandlingFlag weakHandlingFlag
, typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1238 struct WeakProcessingHashTableHelper
;
1240 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1241 struct WeakProcessingHashTableHelper
<NoWeakHandlingInCollections
, Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> {
1242 static void process(typename
Allocator::Visitor
* visitor
, void* closure
) { }
1243 static void ephemeronIteration(typename
Allocator::Visitor
* visitor
, void* closure
) { }
1244 static void ephemeronIterationDone(typename
Allocator::Visitor
* visitor
, void* closure
) { }
1247 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1248 struct WeakProcessingHashTableHelper
<WeakHandlingInCollections
, Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> {
1249 // Used for purely weak and for weak-and-strong tables (ephemerons).
1250 static void process(typename
Allocator::Visitor
* visitor
, void* closure
)
1252 typedef HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> HashTableType
;
1253 HashTableType
* table
= reinterpret_cast<HashTableType
*>(closure
);
1254 ASSERT(table
->m_table
);
1255 // Now perform weak processing (this is a no-op if the backing
1256 // was accessible through an iterator and was already marked
1258 typedef typename
HashTableType::ValueType ValueType
;
1259 for (ValueType
* element
= table
->m_table
+ table
->m_tableSize
- 1; element
>= table
->m_table
; element
--) {
1260 if (!HashTableType::isEmptyOrDeletedBucket(*element
)) {
1261 // At this stage calling trace can make no difference
1262 // (everything is already traced), but we use the
1263 // return value to remove things from the collection.
1265 // FIXME: This should be rewritten so that this can check
1266 // if the element is dead without calling trace,
1267 // which is semantically not correct to be called in
1268 // weak processing stage.
1269 if (TraceInCollectionTrait
<WeakHandlingInCollections
, WeakPointersActWeak
, ValueType
, Traits
>::trace(visitor
, *element
)) {
1270 table
->registerModification();
1271 HashTableType::deleteBucket(*element
); // Also calls the destructor.
1272 table
->m_deletedCount
++;
1273 table
->m_keyCount
--;
1274 // We don't rehash the backing until the next add
1275 // or delete, because that would cause allocation
1282 // Called repeatedly for tables that have both weak and strong pointers.
1283 static void ephemeronIteration(typename
Allocator::Visitor
* visitor
, void* closure
)
1285 typedef HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> HashTableType
;
1286 HashTableType
* table
= reinterpret_cast<HashTableType
*>(closure
);
1287 ASSERT(table
->m_table
);
1288 // Check the hash table for elements that we now know will not
1289 // be removed by weak processing. Those elements need to have
1290 // their strong pointers traced.
1291 typedef typename
HashTableType::ValueType ValueType
;
1292 for (ValueType
* element
= table
->m_table
+ table
->m_tableSize
- 1; element
>= table
->m_table
; element
--) {
1293 if (!HashTableType::isEmptyOrDeletedBucket(*element
))
1294 TraceInCollectionTrait
<WeakHandlingInCollections
, WeakPointersActWeak
, ValueType
, Traits
>::trace(visitor
, *element
);
1298 // Called when the ephemeron iteration is done and before running the per thread
1299 // weak processing. It is guaranteed to be called before any thread is resumed.
1300 static void ephemeronIterationDone(typename
Allocator::Visitor
* visitor
, void* closure
)
1302 typedef HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
> HashTableType
;
1303 HashTableType
* table
= reinterpret_cast<HashTableType
*>(closure
);
1304 ASSERT(Allocator::weakTableRegistered(visitor
, table
));
1305 table
->clearEnqueued();
1309 template<typename Key
, typename Value
, typename Extractor
, typename HashFunctions
, typename Traits
, typename KeyTraits
, typename Allocator
>
1310 template<typename VisitorDispatcher
>
1311 void HashTable
<Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::trace(VisitorDispatcher visitor
)
1313 // If someone else already marked the backing and queued up the trace
1314 // and/or weak callback then we are done. This optimization does not
1315 // happen for ListHashSet since its iterator does not point at the
1317 if (!m_table
|| Allocator::isHeapObjectAlive(m_table
))
1319 // Normally, we mark the backing store without performing trace. This
1320 // means it is marked live, but the pointers inside it are not marked.
1321 // Instead we will mark the pointers below. However, for backing
1322 // stores that contain weak pointers the handling is rather different.
1323 // We don't mark the backing store here, so the marking GC will leave
1324 // the backing unmarked. If the backing is found in any other way than
1325 // through its HashTable (ie from an iterator) then the mark bit will
1326 // be set and the pointers will be marked strongly, avoiding problems
1327 // with iterating over things that disappear due to weak processing
1328 // while we are iterating over them. We register the backing store
1329 // pointer for delayed marking which will take place after we know if
1330 // the backing is reachable from elsewhere. We also register a
1331 // weakProcessing callback which will perform weak processing if needed.
1332 if (Traits::weakHandlingFlag
== NoWeakHandlingInCollections
) {
1333 Allocator::markNoTracing(visitor
, m_table
);
1335 Allocator::registerDelayedMarkNoTracing(visitor
, m_table
);
1336 // Since we're delaying marking this HashTable, it is possible
1337 // that the registerWeakMembers is called multiple times (in rare
1338 // cases). However, it shouldn't cause any issue.
1339 Allocator::registerWeakMembers(visitor
, this, m_table
, WeakProcessingHashTableHelper
<Traits::weakHandlingFlag
, Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::process
);
1341 if (ShouldBeTraced
<Traits
>::value
) {
1342 if (Traits::weakHandlingFlag
== WeakHandlingInCollections
) {
1343 // If we have both strong and weak pointers in the collection
1344 // then we queue up the collection for fixed point iteration a
1346 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1347 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1348 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor
, this));
1350 Allocator::registerWeakTable(visitor
, this,
1351 WeakProcessingHashTableHelper
<Traits::weakHandlingFlag
, Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::ephemeronIteration
,
1352 WeakProcessingHashTableHelper
<Traits::weakHandlingFlag
, Key
, Value
, Extractor
, HashFunctions
, Traits
, KeyTraits
, Allocator
>::ephemeronIterationDone
);
1355 // We don't need to trace the elements here, since registering
1356 // as a weak table above will cause them to be traced (perhaps
1357 // several times). It's better to wait until everything else is
1358 // traced before tracing the elements for the first time; this
1359 // may reduce (by one) the number of iterations needed to get
1360 // to a fixed point.
1363 for (ValueType
* element
= m_table
+ m_tableSize
- 1; element
>= m_table
; element
--) {
1364 if (!isEmptyOrDeletedBucket(*element
))
1365 Allocator::template trace
<VisitorDispatcher
, ValueType
, Traits
>(visitor
, *element
);
1370 // iterator adapters
1372 template<typename HashTableType
, typename Traits
> struct HashTableConstIteratorAdapter
{
1373 HashTableConstIteratorAdapter() {}
1374 HashTableConstIteratorAdapter(const typename
HashTableType::const_iterator
& impl
) : m_impl(impl
) {}
1375 typedef typename
Traits::IteratorConstGetType GetType
;
1376 typedef typename
HashTableType::ValueTraits::IteratorConstGetType SourceGetType
;
1378 GetType
get() const { return const_cast<GetType
>(SourceGetType(m_impl
.get())); }
1379 typename
Traits::IteratorConstReferenceType
operator*() const { return Traits::getToReferenceConstConversion(get()); }
1380 GetType
operator->() const { return get(); }
1382 HashTableConstIteratorAdapter
& operator++() { ++m_impl
; return *this; }
1383 // postfix ++ intentionally omitted
1385 typename
HashTableType::const_iterator m_impl
;
1388 template<typename HashTableType
, typename Traits
> struct HashTableIteratorAdapter
{
1389 typedef typename
Traits::IteratorGetType GetType
;
1390 typedef typename
HashTableType::ValueTraits::IteratorGetType SourceGetType
;
1392 HashTableIteratorAdapter() {}
1393 HashTableIteratorAdapter(const typename
HashTableType::iterator
& impl
) : m_impl(impl
) {}
1395 GetType
get() const { return const_cast<GetType
>(SourceGetType(m_impl
.get())); }
1396 typename
Traits::IteratorReferenceType
operator*() const { return Traits::getToReferenceConversion(get()); }
1397 GetType
operator->() const { return get(); }
1399 HashTableIteratorAdapter
& operator++() { ++m_impl
; return *this; }
1400 // postfix ++ intentionally omitted
1402 operator HashTableConstIteratorAdapter
<HashTableType
, Traits
>()
1404 typename
HashTableType::const_iterator i
= m_impl
;
1408 typename
HashTableType::iterator m_impl
;
1411 template<typename T
, typename U
>
1412 inline bool operator==(const HashTableConstIteratorAdapter
<T
, U
>& a
, const HashTableConstIteratorAdapter
<T
, U
>& b
)
1414 return a
.m_impl
== b
.m_impl
;
1417 template<typename T
, typename U
>
1418 inline bool operator!=(const HashTableConstIteratorAdapter
<T
, U
>& a
, const HashTableConstIteratorAdapter
<T
, U
>& b
)
1420 return a
.m_impl
!= b
.m_impl
;
1423 template<typename T
, typename U
>
1424 inline bool operator==(const HashTableIteratorAdapter
<T
, U
>& a
, const HashTableIteratorAdapter
<T
, U
>& b
)
1426 return a
.m_impl
== b
.m_impl
;
1429 template<typename T
, typename U
>
1430 inline bool operator!=(const HashTableIteratorAdapter
<T
, U
>& a
, const HashTableIteratorAdapter
<T
, U
>& b
)
1432 return a
.m_impl
!= b
.m_impl
;
1435 // All 4 combinations of ==, != and Const,non const.
1436 template<typename T
, typename U
>
1437 inline bool operator==(const HashTableConstIteratorAdapter
<T
, U
>& a
, const HashTableIteratorAdapter
<T
, U
>& b
)
1439 return a
.m_impl
== b
.m_impl
;
1442 template<typename T
, typename U
>
1443 inline bool operator!=(const HashTableConstIteratorAdapter
<T
, U
>& a
, const HashTableIteratorAdapter
<T
, U
>& b
)
1445 return a
.m_impl
!= b
.m_impl
;
1448 template<typename T
, typename U
>
1449 inline bool operator==(const HashTableIteratorAdapter
<T
, U
>& a
, const HashTableConstIteratorAdapter
<T
, U
>& b
)
1451 return a
.m_impl
== b
.m_impl
;
1454 template<typename T
, typename U
>
1455 inline bool operator!=(const HashTableIteratorAdapter
<T
, U
>& a
, const HashTableConstIteratorAdapter
<T
, U
>& b
)
1457 return a
.m_impl
!= b
.m_impl
;
1460 template<typename Collection1
, typename Collection2
>
1461 inline void removeAll(Collection1
& collection
, const Collection2
& toBeRemoved
)
1463 if (collection
.isEmpty() || toBeRemoved
.isEmpty())
1465 typedef typename
Collection2::const_iterator CollectionIterator
;
1466 CollectionIterator
end(toBeRemoved
.end());
1467 for (CollectionIterator
it(toBeRemoved
.begin()); it
!= end
; ++it
)
1468 collection
.remove(*it
);
1473 #include "wtf/HashIterators.h"
1475 #endif // WTF_HashTable_h