1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Craig Silverstein
33 // A sparse hashtable is a particular implementation of
34 // a hashtable: one that is meant to minimize memory use.
35 // It does this by using a *sparse table* (cf sparsetable.h),
36 // which uses between 1 and 2 bits to store empty buckets
37 // (we may need another bit for hashtables that support deletion).
39 // When empty buckets are so cheap, an appealing hashtable
40 // implementation is internal probing, in which the hashtable
41 // is a single table, and collisions are resolved by trying
42 // to insert again in another bucket. The most cache-efficient
43 // internal probing schemes are linear probing (which suffers,
44 // alas, from clumping) and quadratic probing, which is what
45 // we implement by default.
47 // Deleted buckets are a bit of a pain. We have to somehow mark
48 // deleted buckets (the probing must distinguish them from empty
49 // buckets). The most principled way is to have another bitmap,
50 // but that's annoying and takes up space. Instead we let the
51 // user specify an "impossible" key. We set deleted buckets
52 // to have the impossible key.
54 // Note it is possible to change the value of the delete key
55 // on the fly; you can even remove it, though after that point
56 // the hashtable is insert_only until you set it again.
58 // You probably shouldn't use this code directly. Use
59 // <google/sparse_hash_table> or <google/sparse_hash_set> instead.
61 // You can change the following below:
62 // HT_OCCUPANCY_FLT -- how full before we double size
63 // HT_EMPTY_FLT -- how empty before we halve size
64 // HT_MIN_BUCKETS -- smallest bucket size
66 // How to decide what values to use?
67 // HT_EMPTY_FLT's default of .4 * OCCUPANCY_FLT, is probably good.
68 // HT_MIN_BUCKETS is probably unnecessary since you can specify
69 // (indirectly) the starting number of buckets at construct-time.
70 // For HT_OCCUPANCY_FLT, you can use this chart to try to trade-off
71 // expected lookup time to the space taken up. By default, this
72 // code uses quadratic probing, though you can change it to linear
73 // via _JUMP below if you really want to.
75 // From http://www.augustana.ca/~mohrj/courses/1999.fall/csc210/lecture_notes/hashing.html
76 // NUMBER OF PROBES / LOOKUP Successful Unsuccessful
77 // Quadratic collision resolution 1 - ln(1-L) - L/2 1/(1-L) - L - ln(1-L)
78 // Linear collision resolution [1+1/(1-L)]/2 [1+1/(1-L)2]/2
80 // -- HT_OCCUPANCY_FLT -- 0.10 0.50 0.60 0.75 0.80 0.90 0.99
81 // QUADRATIC COLLISION RES.
82 // probes/successful lookup 1.05 1.44 1.62 2.01 2.21 2.85 5.11
83 // probes/unsuccessful lookup 1.11 2.19 2.82 4.64 5.81 11.4 103.6
84 // LINEAR COLLISION RES.
85 // probes/successful lookup 1.06 1.5 1.75 2.5 3.0 5.5 50.5
86 // probes/unsuccessful lookup 1.12 2.5 3.6 8.5 13.0 50.0 5000.0
89 #ifndef _SPARSEHASHTABLE_H_
90 #define _SPARSEHASHTABLE_H_
92 #ifndef SPARSEHASH_STAT_UPDATE
93 #define SPARSEHASH_STAT_UPDATE(x) ((void) 0)
98 // #define JUMP_(key, num_probes) ( 1 )
99 // Quadratic-ish probing
100 #define JUMP_(key, num_probes) ( num_probes )
103 // Hashtable class, used to implement the hashed associative containers
104 // hash_set and hash_map.
106 // !!! DR changed some <google/...> to "..." due to include path problems...
107 #include "google_config.h"
109 #include <algorithm> // For swap(), eg
110 #include "../sparsetable" // Since that's basically what we are
112 _START_GOOGLE_NAMESPACE_
114 using STL_NAMESPACE::pair
;
116 // sparsetable uses malloc()/realloc()/free(), so Alloc is basically
117 // ignored. We include it because we're being compatible.
118 // TODO(csilvers): is that the right thing to do?
120 template <class Value
, class Key
, class HashFcn
,
121 class ExtractKey
, class EqualKey
, class Alloc
>
122 class sparse_hashtable
;
124 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
125 struct sparse_hashtable_iterator
;
127 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
128 struct sparse_hashtable_const_iterator
;
130 // As far as iterating, we're basically just a sparsetable
131 // that skips over deleted elements.
132 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
133 struct sparse_hashtable_iterator
{
135 typedef sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
> sparse_hashtable
;
136 typedef sparse_hashtable_iterator
<V
,K
,HF
,ExK
,EqK
,A
> iterator
;
137 typedef sparse_hashtable_const_iterator
<V
,K
,HF
,ExK
,EqK
,A
> const_iterator
;
138 typedef typename sparsetable
<V
>::nonempty_iterator st_iterator
;
140 #ifdef UNDERSTANDS_ITERATOR_TAGS
141 typedef STL_NAMESPACE::forward_iterator_tag iterator_category
;
143 typedef V value_type
;
144 typedef ptrdiff_t difference_type
;
145 typedef size_t size_type
;
146 typedef V
& reference
; // Value
149 // "Real" constructor and default constructor
150 sparse_hashtable_iterator(const sparse_hashtable
*h
,
151 st_iterator it
, st_iterator it_end
)
152 : ht(h
), pos(it
), end(it_end
) { advance_past_deleted(); }
153 sparse_hashtable_iterator() { } // not ever used internally
154 // The default destructor is fine; we don't define one
155 // The default operator= is fine; we don't define one
157 // Happy dereferencer
158 reference
operator*() const { return *pos
; }
159 pointer
operator->() const { return &(operator*()); }
161 // Arithmetic. The only hard part is making sure that
162 // we're not on a marked-deleted array element
163 void advance_past_deleted() {
164 while ( pos
!= end
&& ht
->test_deleted(*this) )
167 iterator
& operator++() {
168 assert(pos
!= end
); ++pos
; advance_past_deleted(); return *this;
170 iterator
operator++(int) { iterator
tmp(*this); ++*this; return tmp
; }
173 bool operator==(const iterator
& it
) const { return pos
== it
.pos
; }
174 bool operator!=(const iterator
& it
) const { return pos
!= it
.pos
; }
178 const sparse_hashtable
*ht
;
179 st_iterator pos
, end
;
182 // Now do it all again, but with const-ness!
183 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
184 struct sparse_hashtable_const_iterator
{
186 typedef sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
> sparse_hashtable
;
187 typedef sparse_hashtable_iterator
<V
,K
,HF
,ExK
,EqK
,A
> iterator
;
188 typedef sparse_hashtable_const_iterator
<V
,K
,HF
,ExK
,EqK
,A
> const_iterator
;
189 typedef typename sparsetable
<V
>::const_nonempty_iterator st_iterator
;
191 #ifdef UNDERSTANDS_ITERATOR_TAGS
192 typedef STL_NAMESPACE::forward_iterator_tag iterator_category
;
194 typedef V value_type
;
195 typedef ptrdiff_t difference_type
;
196 typedef size_t size_type
;
197 typedef const V
& reference
; // Value
198 typedef const V
* pointer
;
200 // "Real" constructor and default constructor
201 sparse_hashtable_const_iterator(const sparse_hashtable
*h
,
202 st_iterator it
, st_iterator it_end
)
203 : ht(h
), pos(it
), end(it_end
) { advance_past_deleted(); }
204 // This lets us convert regular iterators to const iterators
205 sparse_hashtable_const_iterator() { } // never used internally
206 sparse_hashtable_const_iterator(const iterator
&it
)
207 : ht(it
.ht
), pos(it
.pos
), end(it
.end
) { }
208 // The default destructor is fine; we don't define one
209 // The default operator= is fine; we don't define one
211 // Happy dereferencer
212 reference
operator*() const { return *pos
; }
213 pointer
operator->() const { return &(operator*()); }
215 // Arithmetic. The only hard part is making sure that
216 // we're not on a marked-deleted array element
217 void advance_past_deleted() {
218 while ( pos
!= end
&& ht
->test_deleted(*this) )
221 const_iterator
& operator++() {
222 assert(pos
!= end
); ++pos
; advance_past_deleted(); return *this;
224 const_iterator
operator++(int) { iterator
tmp(*this); ++*this; return tmp
; }
227 bool operator==(const const_iterator
& it
) const { return pos
== it
.pos
; }
228 bool operator!=(const const_iterator
& it
) const { return pos
!= it
.pos
; }
232 const sparse_hashtable
*ht
;
233 st_iterator pos
, end
;
236 // And once again, but this time freeing up memory as we iterate
237 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
238 struct sparse_hashtable_destructive_iterator
{
240 typedef sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
> sparse_hashtable
;
241 typedef sparse_hashtable_destructive_iterator
<V
,K
,HF
,ExK
,EqK
,A
> iterator
;
242 typedef typename sparsetable
<V
>::destructive_iterator st_iterator
;
244 #ifdef UNDERSTANDS_ITERATOR_TAGS
245 typedef STL_NAMESPACE::forward_iterator_tag iterator_category
;
247 typedef V value_type
;
248 typedef ptrdiff_t difference_type
;
249 typedef size_t size_type
;
250 typedef V
& reference
; // Value
253 // "Real" constructor and default constructor
254 sparse_hashtable_destructive_iterator(const sparse_hashtable
*h
,
255 st_iterator it
, st_iterator it_end
)
256 : ht(h
), pos(it
), end(it_end
) { advance_past_deleted(); }
257 sparse_hashtable_destructive_iterator() { } // never used internally
258 // The default destructor is fine; we don't define one
259 // The default operator= is fine; we don't define one
261 // Happy dereferencer
262 reference
operator*() const { return *pos
; }
263 pointer
operator->() const { return &(operator*()); }
265 // Arithmetic. The only hard part is making sure that
266 // we're not on a marked-deleted array element
267 void advance_past_deleted() {
268 while ( pos
!= end
&& ht
->test_deleted(*this) )
271 iterator
& operator++() {
272 assert(pos
!= end
); ++pos
; advance_past_deleted(); return *this;
274 iterator
operator++(int) { iterator
tmp(*this); ++*this; return tmp
; }
277 bool operator==(const iterator
& it
) const { return pos
== it
.pos
; }
278 bool operator!=(const iterator
& it
) const { return pos
!= it
.pos
; }
282 const sparse_hashtable
*ht
;
283 st_iterator pos
, end
;
288 template <class Value
, class Key
, class HashFcn
,
289 class ExtractKey
, class EqualKey
, class Alloc
>
290 class sparse_hashtable
{
292 typedef Key key_type
;
293 typedef Value value_type
;
294 typedef HashFcn hasher
;
295 typedef EqualKey key_equal
;
297 typedef size_t size_type
;
298 typedef ptrdiff_t difference_type
;
299 typedef value_type
* pointer
;
300 typedef const value_type
* const_pointer
;
301 typedef value_type
& reference
;
302 typedef const value_type
& const_reference
;
303 typedef sparse_hashtable_iterator
<Value
, Key
, HashFcn
,
304 ExtractKey
, EqualKey
, Alloc
>
307 typedef sparse_hashtable_const_iterator
<Value
, Key
, HashFcn
,
308 ExtractKey
, EqualKey
, Alloc
>
311 typedef sparse_hashtable_destructive_iterator
<Value
, Key
, HashFcn
,
312 ExtractKey
, EqualKey
, Alloc
>
313 destructive_iterator
;
315 // How full we let the table get before we resize. Knuth says .8 is
316 // good -- higher causes us to probe too much, though saves memory
317 static const float HT_OCCUPANCY_FLT
; // = 0.8f;
319 // How empty we let the table get before we resize lower.
320 // It should be less than OCCUPANCY_FLT / 2 or we thrash resizing
321 static const float HT_EMPTY_FLT
; // = 0.4 * HT_OCCUPANCY_FLT;
323 // Minimum size we're willing to let hashtables be.
324 // Must be a power of two, and at least 4.
325 // Note, however, that for a given hashtable, the minimum size is
326 // determined by the first constructor arg, and may be >HT_MIN_BUCKETS.
327 static const size_t HT_MIN_BUCKETS
= 32;
330 // ITERATOR FUNCTIONS
331 iterator
begin() { return iterator(this, table
.nonempty_begin(),
332 table
.nonempty_end()); }
333 iterator
end() { return iterator(this, table
.nonempty_end(),
334 table
.nonempty_end()); }
335 const_iterator
begin() const { return const_iterator(this,
336 table
.nonempty_begin(),
337 table
.nonempty_end()); }
338 const_iterator
end() const { return const_iterator(this,
339 table
.nonempty_end(),
340 table
.nonempty_end()); }
342 // This is used when resizing
343 destructive_iterator
destructive_begin() {
344 return destructive_iterator(this, table
.destructive_begin(),
345 table
.destructive_end());
347 destructive_iterator
destructive_end() {
348 return destructive_iterator(this, table
.destructive_end(),
349 table
.destructive_end());
353 // ACCESSOR FUNCTIONS for the things we templatize on, basically
354 hasher
hash_funct() const { return hash
; }
355 key_equal
key_eq() const { return equals
; }
357 // Annoyingly, we can't copy values around, because they might have
358 // const components (they're probably pair<const X, Y>). We use
359 // placement new to get around this. Arg.
361 void set_value(value_type
* dst
, const value_type src
) {
362 new(dst
) value_type(src
);
365 void set_key(key_type
* dst
, const key_type src
) {
366 new(dst
) key_type(src
); // used for set_deleted_key(), etc
369 // This is used as a tag for the copy constructor, saying to destroy its
370 // arg We have two ways of destructively copying: with potentially growing
371 // the hashtable as we copy, and without. To make sure the outside world
372 // can't do a destructive copy, we make the typename private.
373 enum MoveDontCopyT
{MoveDontCopy
, MoveDontGrow
};
376 // DELETE HELPER FUNCTIONS
377 // This lets the user describe a key that will indicate deleted
378 // table entries. This key should be an "impossible" entry --
379 // if you try to insert it for real, you won't be able to retrieve it!
380 // (NB: while you pass in an entire value, only the key part is looked
381 // at. This is just because I don't know how to assign just a key.)
383 void squash_deleted() { // gets rid of any deleted entries we have
384 if ( num_deleted
) { // get rid of deleted before writing
385 sparse_hashtable
tmp(MoveDontGrow
, *this);
386 swap(tmp
); // now we are tmp
388 assert(num_deleted
== 0);
392 void set_deleted_key(const key_type
&key
) {
393 // It's only safe to change what "deleted" means if we purge deleted guys
396 set_key(&delkey
, key
); // save the key
398 void clear_deleted_key() {
403 // These are public so the iterators can use them
404 // True if the item at position bucknum is "deleted" marker
405 bool test_deleted(size_type bucknum
) const {
406 // The num_deleted test is crucial for read(): after read(), the ht values
407 // are garbage, and we don't want to think some of them are deleted.
408 return (use_deleted
&& num_deleted
> 0 && table
.test(bucknum
) &&
409 equals(delkey
, get_key(table
.get(bucknum
))));
411 bool test_deleted(const iterator
&it
) const {
412 return (use_deleted
&& num_deleted
> 0 &&
413 equals(delkey
, get_key(*it
)));
415 bool test_deleted(const const_iterator
&it
) const {
416 return (use_deleted
&& num_deleted
> 0 &&
417 equals(delkey
, get_key(*it
)));
419 bool test_deleted(const destructive_iterator
&it
) const {
420 return (use_deleted
&& num_deleted
> 0 &&
421 equals(delkey
, get_key(*it
)));
423 // Set it so test_deleted is true. true if object didn't used to be deleted
424 // See below (at erase()) to explain why we allow const_iterators
425 bool set_deleted(const_iterator
&it
) {
426 assert(use_deleted
); // bad if set_deleted_key() wasn't called
427 bool retval
= !test_deleted(it
);
428 // &* converts from iterator to value-type
429 set_key(const_cast<key_type
*>(&get_key(*it
)), delkey
);
432 // Set it so test_deleted is false. true if object used to be deleted
433 bool clear_deleted(const_iterator
&it
) {
434 assert(use_deleted
); // bad if set_deleted_key() wasn't called
435 // happens automatically when we assign something else in its place
436 return test_deleted(it
);
440 // FUNCTIONS CONCERNING SIZE
441 size_type
size() const { return table
.num_nonempty() - num_deleted
; }
442 // Buckets are always a power of 2
443 size_type
max_size() const { return (size_type(-1) >> 1U) + 1; }
444 bool empty() const { return size() == 0; }
445 size_type
bucket_count() const { return table
.size(); }
446 size_type
max_bucket_count() const { return max_size(); }
449 // Because of the above, size_type(-1) is never legal; use it for errors
450 static const size_type ILLEGAL_BUCKET
= size_type(-1);
453 // This is the smallest size a hashtable can be without being too crowded
454 // If you like, you can give a min #buckets as well as a min #elts
455 size_type
min_size(size_type num_elts
, size_type min_buckets_wanted
) {
456 size_type sz
= HT_MIN_BUCKETS
;
457 while ( sz
< min_buckets_wanted
|| num_elts
>= sz
* HT_OCCUPANCY_FLT
)
462 // Used after a string of deletes
463 void maybe_shrink() {
464 assert(table
.num_nonempty() >= num_deleted
);
465 assert((bucket_count() & (bucket_count()-1)) == 0); // is a power of two
466 assert(bucket_count() >= HT_MIN_BUCKETS
);
468 if ( (table
.num_nonempty()-num_deleted
) <= shrink_threshold
&&
469 bucket_count() > HT_MIN_BUCKETS
) {
470 size_type sz
= bucket_count() / 2; // find how much we should shrink
471 while ( sz
> HT_MIN_BUCKETS
&&
472 (table
.num_nonempty() - num_deleted
) <= sz
* HT_EMPTY_FLT
)
473 sz
/= 2; // stay a power of 2
474 sparse_hashtable
tmp(MoveDontCopy
, *this, sz
);
475 swap(tmp
); // now we are tmp
477 consider_shrink
= false; // because we just considered it
480 // We'll let you resize a hashtable -- though this makes us copy all!
481 // When you resize, you say, "make it big enough for this many more elements"
482 void resize_delta(size_type delta
, size_type min_buckets_wanted
= 0) {
483 if ( consider_shrink
) // see if lots of deletes happened
485 if ( bucket_count() > min_buckets_wanted
&&
486 (table
.num_nonempty() + delta
) <= enlarge_threshold
)
487 return; // we're ok as we are
489 const size_type resize_to
= min_size(table
.num_nonempty() + delta
,
491 if ( resize_to
> bucket_count() ) { // we don't have enough buckets
492 sparse_hashtable
tmp(MoveDontCopy
, *this, resize_to
);
493 swap(tmp
); // now we are tmp
497 // Used to actually do the rehashing when we grow/shrink a hashtable
498 void copy_from(const sparse_hashtable
&ht
, size_type min_buckets_wanted
= 0) {
499 clear(); // clear table, set num_deleted to 0
501 // If we need to change the size of our table, do it now
502 const size_type resize_to
= min_size(ht
.size(), min_buckets_wanted
);
503 if ( resize_to
> bucket_count() ) { // we don't have enough buckets
504 table
.resize(resize_to
); // sets the number of buckets
508 // We use a normal iterator to get non-deleted bcks from ht
509 // We could use insert() here, but since we know there are
510 // no duplicates and no deleted items, we can be more efficient
511 assert( (bucket_count() & (bucket_count()-1)) == 0); // a power of two
512 for ( const_iterator it
= ht
.begin(); it
!= ht
.end(); ++it
) {
513 size_type num_probes
= 0; // how many times we've probed
515 const size_type bucket_count_minus_one
= bucket_count() - 1;
516 for (bucknum
= hash(get_key(*it
)) & bucket_count_minus_one
;
517 table
.test(bucknum
); // not empty
518 bucknum
= (bucknum
+ JUMP_(key
, num_probes
)) & bucket_count_minus_one
) {
520 assert(num_probes
< bucket_count()); // or else the hashtable is full
522 table
.set(bucknum
, *it
); // copies the value to here
526 // Implementation is like copy_from, but it destroys the table of the
527 // "from" guy by freeing sparsetable memory as we iterate. This is
528 // useful in resizing, since we're throwing away the "from" guy anyway.
529 void move_from(MoveDontCopyT mover
, sparse_hashtable
&ht
,
530 size_type min_buckets_wanted
= 0) {
531 clear(); // clear table, set num_deleted to 0
533 // If we need to change the size of our table, do it now
535 if ( mover
== MoveDontGrow
)
536 resize_to
= ht
.bucket_count(); // keep same size as old ht
538 resize_to
= min_size(ht
.size(), min_buckets_wanted
);
539 if ( resize_to
> bucket_count() ) { // we don't have enough buckets
540 table
.resize(resize_to
); // sets the number of buckets
544 // We use a normal iterator to get non-deleted bcks from ht
545 // We could use insert() here, but since we know there are
546 // no duplicates and no deleted items, we can be more efficient
547 assert( (bucket_count() & (bucket_count()-1)) == 0); // a power of two
548 // THIS IS THE MAJOR LINE THAT DIFFERS FROM COPY_FROM():
549 for ( destructive_iterator it
= ht
.destructive_begin();
550 it
!= ht
.destructive_end(); ++it
) {
551 size_type num_probes
= 0; // how many times we've probed
553 for ( bucknum
= hash(get_key(*it
)) & (bucket_count()-1); // h % buck_cnt
554 table
.test(bucknum
); // not empty
555 bucknum
= (bucknum
+ JUMP_(key
, num_probes
)) & (bucket_count()-1) ) {
557 assert(num_probes
< bucket_count()); // or else the hashtable is full
559 table
.set(bucknum
, *it
); // copies the value to here
564 // Required by the spec for hashed associative container
566 // Though the docs say this should be num_buckets, I think it's much
567 // more useful as num_elements. As a special feature, calling with
568 // req_elements==0 will cause us to shrink if we can, saving space.
569 void resize(size_type req_elements
) { // resize to this or larger
570 if ( consider_shrink
|| req_elements
== 0 )
572 if ( req_elements
> table
.num_nonempty() ) // we only grow
573 resize_delta(req_elements
- table
.num_nonempty(), 0);
577 // CONSTRUCTORS -- as required by the specs, we take a size,
578 // but also let you specify a hashfunction, key comparator,
579 // and key extractor. We also define a copy constructor and =.
580 // DESTRUCTOR -- the default is fine, surprisingly.
581 explicit sparse_hashtable(size_type n
= 0,
582 const HashFcn
& hf
= HashFcn(),
583 const EqualKey
& eql
= EqualKey(),
584 const ExtractKey
& ext
= ExtractKey())
585 : hash(hf
), equals(eql
), get_key(ext
), num_deleted(0),
586 use_deleted(false), delkey(), table(min_size(0, n
)) { // start small
590 // As a convenience for resize(), we allow an optional second argument
591 // which lets you make this new hashtable a different size than ht.
592 // We also provide a mechanism of saying you want to "move" the ht argument
593 // into us instead of copying.
594 sparse_hashtable(const sparse_hashtable
& ht
, size_type min_buckets_wanted
= 0)
595 : hash(ht
.hash
), equals(ht
.equals
), get_key(ht
.get_key
),
596 num_deleted(0), use_deleted(ht
.use_deleted
), delkey(ht
.delkey
), table() {
598 copy_from(ht
, min_buckets_wanted
); // copy_from() ignores deleted entries
600 sparse_hashtable(MoveDontCopyT mover
, sparse_hashtable
& ht
,
601 size_type min_buckets_wanted
=0)
602 : hash(ht
.hash
), equals(ht
.equals
), get_key(ht
.get_key
),
603 num_deleted(0), use_deleted(ht
.use_deleted
), delkey(ht
.delkey
), table() {
605 move_from(mover
, ht
, min_buckets_wanted
); // ignores deleted entries
608 sparse_hashtable
& operator= (const sparse_hashtable
& ht
) {
609 if (&ht
== this) return *this; // don't copy onto ourselves
613 get_key
= ht
.get_key
;
614 use_deleted
= ht
.use_deleted
;
615 set_key(&delkey
, ht
.delkey
);
616 copy_from(ht
); // sets num_deleted to 0 too
620 // Many STL algorithms use swap instead of copy constructors
621 void swap(sparse_hashtable
& ht
) {
622 STL_NAMESPACE::swap(hash
, ht
.hash
);
623 STL_NAMESPACE::swap(equals
, ht
.equals
);
624 STL_NAMESPACE::swap(get_key
, ht
.get_key
);
625 STL_NAMESPACE::swap(num_deleted
, ht
.num_deleted
);
626 STL_NAMESPACE::swap(use_deleted
, ht
.use_deleted
);
627 { key_type tmp
; // for annoying reasons, swap() doesn't work
628 set_key(&tmp
, delkey
);
629 set_key(&delkey
, ht
.delkey
);
630 set_key(&ht
.delkey
, tmp
);
632 table
.swap(ht
.table
);
634 ht
.reset_thresholds();
637 // It's always nice to be able to clear a table without deallocating it
647 // Returns a pair of positions: 1st where the object is, 2nd where
648 // it would go if you wanted to insert it. 1st is ILLEGAL_BUCKET
649 // if object is not found; 2nd is ILLEGAL_BUCKET if it is.
650 // Note: because of deletions where-to-insert is not trivial: it's the
651 // first deleted bucket we see, as long as we don't find the key later
652 pair
<size_type
, size_type
> find_position(const key_type
&key
) const {
653 size_type num_probes
= 0; // how many times we've probed
654 const size_type bucket_count_minus_one
= bucket_count() - 1;
655 size_type bucknum
= hash(key
) & bucket_count_minus_one
;
656 size_type insert_pos
= ILLEGAL_BUCKET
; // where we would insert
657 SPARSEHASH_STAT_UPDATE(total_lookups
+= 1);
658 while ( 1 ) { // probe until something happens
659 if ( !table
.test(bucknum
) ) { // bucket is empty
660 SPARSEHASH_STAT_UPDATE(total_probes
+= num_probes
);
661 if ( insert_pos
== ILLEGAL_BUCKET
) // found no prior place to insert
662 return pair
<size_type
,size_type
>(ILLEGAL_BUCKET
, bucknum
);
664 return pair
<size_type
,size_type
>(ILLEGAL_BUCKET
, insert_pos
);
666 } else if ( test_deleted(bucknum
) ) {// keep searching, but mark to insert
667 if ( insert_pos
== ILLEGAL_BUCKET
)
668 insert_pos
= bucknum
;
670 } else if ( equals(key
, get_key(table
.get(bucknum
))) ) {
671 SPARSEHASH_STAT_UPDATE(total_probes
+= num_probes
);
672 return pair
<size_type
,size_type
>(bucknum
, ILLEGAL_BUCKET
);
674 ++num_probes
; // we're doing another probe
675 bucknum
= (bucknum
+ JUMP_(key
, num_probes
)) & bucket_count_minus_one
;
676 assert(num_probes
< bucket_count()); // don't probe too many times!
681 iterator
find(const key_type
& key
) {
682 if ( size() == 0 ) return end();
683 pair
<size_type
, size_type
> pos
= find_position(key
);
684 if ( pos
.first
== ILLEGAL_BUCKET
) // alas, not there
687 return iterator(this, table
.get_iter(pos
.first
), table
.nonempty_end());
690 const_iterator
find(const key_type
& key
) const {
691 if ( size() == 0 ) return end();
692 pair
<size_type
, size_type
> pos
= find_position(key
);
693 if ( pos
.first
== ILLEGAL_BUCKET
) // alas, not there
696 return const_iterator(this,
697 table
.get_iter(pos
.first
), table
.nonempty_end());
700 // Counts how many elements have key key. For maps, it's either 0 or 1.
701 size_type
count(const key_type
&key
) const {
702 pair
<size_type
, size_type
> pos
= find_position(key
);
703 return pos
.first
== ILLEGAL_BUCKET
? 0 : 1;
706 // Likewise, equal_range doesn't really make sense for us. Oh well.
707 pair
<iterator
,iterator
> equal_range(const key_type
& key
) {
708 const iterator pos
= find(key
); // either an iterator or end
709 return pair
<iterator
,iterator
>(pos
, pos
);
711 pair
<const_iterator
,const_iterator
> equal_range(const key_type
& key
) const {
712 const const_iterator pos
= find(key
); // either an iterator or end
713 return pair
<iterator
,iterator
>(pos
, pos
);
717 // INSERTION ROUTINES
719 // If you know *this is big enough to hold obj, use this routine
720 pair
<iterator
, bool> insert_noresize(const value_type
& obj
) {
721 const pair
<size_type
,size_type
> pos
= find_position(get_key(obj
));
722 if ( pos
.first
!= ILLEGAL_BUCKET
) { // object was already there
723 return pair
<iterator
,bool>(iterator(this, table
.get_iter(pos
.first
),
724 table
.nonempty_end()),
725 false); // false: we didn't insert
726 } else { // pos.second says where to put it
727 if ( test_deleted(pos
.second
) ) { // just replace if it's been del.
728 // The set() below will undelete this object. We just worry about stats
729 assert(num_deleted
> 0);
730 --num_deleted
; // used to be, now it isn't
732 table
.set(pos
.second
, obj
);
733 return pair
<iterator
,bool>(iterator(this, table
.get_iter(pos
.second
),
734 table
.nonempty_end()),
735 true); // true: we did insert
740 // This is the normal insert routine, used by the outside world
741 pair
<iterator
, bool> insert(const value_type
& obj
) {
742 resize_delta(1); // adding an object, grow if need be
743 return insert_noresize(obj
);
746 #ifdef UNDERSTANDS_ITERATOR_TAGS
747 // When inserting a lot at a time, we specialize on the type of iterator
748 template <class InputIterator
>
749 void insert(InputIterator f
, InputIterator l
) {
750 // specializes on iterator type
751 insert(f
, l
, typename
STL_NAMESPACE::iterator_traits
<InputIterator
>::iterator_category());
754 // Iterator supports operator-, resize before inserting
755 template <class ForwardIterator
>
756 void insert(ForwardIterator f
, ForwardIterator l
,
757 STL_NAMESPACE::forward_iterator_tag
) {
758 size_type n
= STL_NAMESPACE::distance(f
, l
); // TODO(csilvers): standard?
760 for ( ; n
> 0; --n
, ++f
)
764 // Arbitrary iterator, can't tell how much to resize
765 template <class InputIterator
>
766 void insert(InputIterator f
, InputIterator l
,
767 STL_NAMESPACE::input_iterator_tag
) {
772 template <class InputIterator
>
773 void insert(InputIterator f
, InputIterator l
) {
781 size_type
erase(const key_type
& key
) {
782 const_iterator pos
= find(key
); // shrug: shouldn't need to be const
783 if ( pos
!= end() ) {
784 assert(!test_deleted(pos
)); // or find() shouldn't have returned it
787 consider_shrink
= true; // will think about shrink after next insert
788 return 1; // because we deleted one thing
790 return 0; // because we deleted nothing
794 // This is really evil: really it should be iterator, not const_iterator.
795 // But...the only reason keys are const is to allow lookup.
796 // Since that's a moot issue for deleted keys, we allow const_iterators
797 void erase(const_iterator pos
) {
798 if ( pos
== end() ) return; // sanity check
799 if ( set_deleted(pos
) ) { // true if object has been newly deleted
801 consider_shrink
= true; // will think about shrink after next insert
805 void erase(const_iterator f
, const_iterator l
) {
806 for ( ; f
!= l
; ++f
) {
807 if ( set_deleted(f
) ) // should always be true
810 consider_shrink
= true; // will think about shrink after next insert
815 bool operator==(const sparse_hashtable
& ht
) const {
816 // We really want to check that the hash functions are the same
817 // but alas there's no way to do this. We just hope.
818 return ( num_deleted
== ht
.num_deleted
&& table
== ht
.table
);
820 bool operator!=(const sparse_hashtable
& ht
) const {
821 return !(*this == ht
);
826 // We support reading and writing hashtables to disk. NOTE that
827 // this only stores the hashtable metadata, not the stuff you've
828 // actually put in the hashtable! Alas, since I don't know how to
829 // write a hasher or key_equal, you have to make sure everything
830 // but the table is the same. We compact before writing.
831 bool write_metadata(FILE *fp
) {
832 squash_deleted(); // so we don't have to worry about delkey
833 return table
.write_metadata(fp
);
836 bool read_metadata(FILE *fp
) {
837 num_deleted
= 0; // since we got rid before writing
838 bool result
= table
.read_metadata(fp
);
843 bool write_nopointer_data(FILE *fp
) {
844 return table
.write_nopointer_data(fp
);
847 bool read_nopointer_data(FILE *fp
) {
848 return table
.read_nopointer_data(fp
);
853 hasher hash
; // required by hashed_associative_container
856 size_type num_deleted
; // how many occupied buckets are marked deleted
857 bool use_deleted
; // false until delkey has been set
858 key_type delkey
; // which key marks deleted entries
859 sparsetable
<value_type
> table
; // holds num_buckets and num_elements too
860 size_type shrink_threshold
; // table.size() * HT_EMPTY_FLT
861 size_type enlarge_threshold
; // table.size() * HT_OCCUPANCY_FLT
862 bool consider_shrink
; // true if we should try to shrink before next insert
864 void reset_thresholds() {
865 enlarge_threshold
= static_cast<size_type
>(table
.size()*HT_OCCUPANCY_FLT
);
866 shrink_threshold
= static_cast<size_type
>(table
.size()*HT_EMPTY_FLT
);
867 consider_shrink
= false; // whatever caused us to reset already considered
871 // We need a global swap as well
872 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
873 inline void swap(sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
> &x
,
874 sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
> &y
) {
880 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
881 const typename sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
>::size_type
882 sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
>::ILLEGAL_BUCKET
;
884 // How full we let the table get before we resize. Knuth says .8 is
885 // good -- higher causes us to probe too much, though saves memory
886 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
887 const float sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
>::HT_OCCUPANCY_FLT
= 0.8f
;
889 // How empty we let the table get before we resize lower.
890 // It should be less than OCCUPANCY_FLT / 2 or we thrash resizing
891 template <class V
, class K
, class HF
, class ExK
, class EqK
, class A
>
892 const float sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
>::HT_EMPTY_FLT
= 0.4 *
893 sparse_hashtable
<V
,K
,HF
,ExK
,EqK
,A
>::HT_OCCUPANCY_FLT
;
895 _END_GOOGLE_NAMESPACE_
897 #endif /* _SPARSEHASHTABLE_H_ */