1 // Internal header for TR1 unordered_set and unordered_map -*- C++ -*-
3 // Copyright (C) 2005, 2006 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 * This is a TR1 C++ Library header.
34 // This header file defines std::tr1::hashtable, which is used to
35 // implement std::tr1::unordered_set, std::tr1::unordered_map,
36 // std::tr1::unordered_multiset, and std::tr1::unordered_multimap.
37 // hashtable has many template parameters, partly to accommodate
38 // the differences between those four classes and partly to
39 // accommodate policy choices that go beyond what TR1 calls for.
41 // ??? Arguably this should be Internal::hashtable, not std::tr1::hashtable.
43 // Class template hashtable attempts to encapsulate all reasonable
44 // variation among hash tables that use chaining. It does not handle
48 // M. Austern, "A Proposal to Add Hash Tables to the Standard
49 // Library (revision 4)," WG21 Document N1456=03-0039, 2003.
50 // D. E. Knuth, The Art of Computer Programming, v. 3, Sorting and Searching.
51 // A. Tavori and V. Dreizin, "Generic Associative Containers", 2004.
54 #ifndef GNU_LIBSTDCXX_TR1_HASHTABLE_
55 #define GNU_LIBSTDCXX_TR1_HASHTABLE_
57 #include <utility> // For std::pair
62 #include <bits/functexcept.h>
63 #include <tr1/type_traits> // For true_type and false_type
65 //----------------------------------------------------------------------
70 template<bool Flag, typename IfTrue, typename IfFalse>
73 template<typename IfTrue, typename IfFalse>
74 struct IF<true, IfTrue, IfFalse>
75 { typedef IfTrue type; };
77 template <typename IfTrue, typename IfFalse>
78 struct IF<false, IfTrue, IfFalse>
79 { typedef IfFalse type; };
81 // Helper function: return distance(first, last) for forward
82 // iterators, or 0 for input iterators.
83 template<class Iterator>
84 inline typename std::iterator_traits<Iterator>::difference_type
85 distance_fw(Iterator first, Iterator last, std::input_iterator_tag)
88 template<class Iterator>
89 inline typename std::iterator_traits<Iterator>::difference_type
90 distance_fw(Iterator first, Iterator last, std::forward_iterator_tag)
91 { return std::distance(first, last); }
93 template<class Iterator>
94 inline typename std::iterator_traits<Iterator>::difference_type
95 distance_fw(Iterator first, Iterator last)
97 typedef typename std::iterator_traits<Iterator>::iterator_category tag;
98 return distance_fw(first, last, tag());
101 } // namespace Internal
103 //----------------------------------------------------------------------
104 // Auxiliary types used for all instantiations of hashtable: nodes
107 // Nodes, used to wrap elements stored in the hash table. A policy
108 // template parameter of class template hashtable controls whether
109 // nodes also store a hash code. In some cases (e.g. strings) this may
110 // be a performance win.
114 template<typename Value, bool cache_hash_code>
117 template<typename Value>
118 struct hash_node<Value, true>
121 std::size_t hash_code;
125 template<typename Value>
126 struct hash_node<Value, false>
132 // Local iterators, used to iterate within a bucket but not between
135 template<typename Value, bool cache>
136 struct node_iterator_base
138 node_iterator_base(hash_node<Value, cache>* p)
143 { m_cur = m_cur->m_next; }
145 hash_node<Value, cache>* m_cur;
148 template<typename Value, bool cache>
150 operator==(const node_iterator_base<Value, cache>& x,
151 const node_iterator_base<Value, cache>& y)
152 { return x.m_cur == y.m_cur; }
154 template<typename Value, bool cache>
156 operator!=(const node_iterator_base<Value, cache>& x,
157 const node_iterator_base<Value, cache>& y)
158 { return x.m_cur != y.m_cur; }
160 template<typename Value, bool constant_iterators, bool cache>
162 : public node_iterator_base<Value, cache>
164 typedef Value value_type;
165 typedef typename IF<constant_iterators, const Value*, Value*>::type
167 typedef typename IF<constant_iterators, const Value&, Value&>::type
169 typedef std::ptrdiff_t difference_type;
170 typedef std::forward_iterator_tag iterator_category;
173 : node_iterator_base<Value, cache>(0) { }
176 node_iterator(hash_node<Value, cache>* p)
177 : node_iterator_base<Value, cache>(p) { }
181 { return this->m_cur->m_v; }
185 { return &this->m_cur->m_v; }
197 node_iterator tmp(*this);
203 template<typename Value, bool constant_iterators, bool cache>
204 struct node_const_iterator
205 : public node_iterator_base<Value, cache>
207 typedef Value value_type;
208 typedef const Value* pointer;
209 typedef const Value& reference;
210 typedef std::ptrdiff_t difference_type;
211 typedef std::forward_iterator_tag iterator_category;
213 node_const_iterator()
214 : node_iterator_base<Value, cache>(0) { }
217 node_const_iterator(hash_node<Value, cache>* p)
218 : node_iterator_base<Value, cache>(p) { }
220 node_const_iterator(const node_iterator<Value, constant_iterators,
222 : node_iterator_base<Value, cache>(x.m_cur) { }
226 { return this->m_cur->m_v; }
230 { return &this->m_cur->m_v; }
242 node_const_iterator tmp(*this);
248 template<typename Value, bool cache>
249 struct hashtable_iterator_base
251 hashtable_iterator_base(hash_node<Value, cache>* node,
252 hash_node<Value, cache>** bucket)
253 : m_cur_node(node), m_cur_bucket(bucket) { }
258 m_cur_node = m_cur_node->m_next;
266 hash_node<Value, cache>* m_cur_node;
267 hash_node<Value, cache>** m_cur_bucket;
270 // Global iterators, used for arbitrary iteration within a hash
271 // table. Larger and more expensive than local iterators.
272 template<typename Value, bool cache>
274 hashtable_iterator_base<Value, cache>::
279 // This loop requires the bucket array to have a non-null sentinel.
280 while (!*m_cur_bucket)
282 m_cur_node = *m_cur_bucket;
285 template<typename Value, bool cache>
287 operator==(const hashtable_iterator_base<Value, cache>& x,
288 const hashtable_iterator_base<Value, cache>& y)
289 { return x.m_cur_node == y.m_cur_node; }
291 template<typename Value, bool cache>
293 operator!=(const hashtable_iterator_base<Value, cache>& x,
294 const hashtable_iterator_base<Value, cache>& y)
295 { return x.m_cur_node != y.m_cur_node; }
297 template<typename Value, bool constant_iterators, bool cache>
298 struct hashtable_iterator
299 : public hashtable_iterator_base<Value, cache>
301 typedef Value value_type;
302 typedef typename IF<constant_iterators, const Value*, Value*>::type
304 typedef typename IF<constant_iterators, const Value&, Value&>::type
306 typedef std::ptrdiff_t difference_type;
307 typedef std::forward_iterator_tag iterator_category;
310 : hashtable_iterator_base<Value, cache>(0, 0) { }
312 hashtable_iterator(hash_node<Value, cache>* p,
313 hash_node<Value, cache>** b)
314 : hashtable_iterator_base<Value, cache>(p, b) { }
317 hashtable_iterator(hash_node<Value, cache>** b)
318 : hashtable_iterator_base<Value, cache>(*b, b) { }
322 { return this->m_cur_node->m_v; }
326 { return &this->m_cur_node->m_v; }
338 hashtable_iterator tmp(*this);
344 template<typename Value, bool constant_iterators, bool cache>
345 struct hashtable_const_iterator
346 : public hashtable_iterator_base<Value, cache>
348 typedef Value value_type;
349 typedef const Value* pointer;
350 typedef const Value& reference;
351 typedef std::ptrdiff_t difference_type;
352 typedef std::forward_iterator_tag iterator_category;
354 hashtable_const_iterator()
355 : hashtable_iterator_base<Value, cache>(0, 0) { }
357 hashtable_const_iterator(hash_node<Value, cache>* p,
358 hash_node<Value, cache>** b)
359 : hashtable_iterator_base<Value, cache>(p, b) { }
362 hashtable_const_iterator(hash_node<Value, cache>** b)
363 : hashtable_iterator_base<Value, cache>(*b, b) { }
365 hashtable_const_iterator(const hashtable_iterator<Value,
366 constant_iterators, cache>& x)
367 : hashtable_iterator_base<Value, cache>(x.m_cur_node, x.m_cur_bucket) { }
371 { return this->m_cur_node->m_v; }
375 { return &this->m_cur_node->m_v; }
377 hashtable_const_iterator&
384 hashtable_const_iterator
387 hashtable_const_iterator tmp(*this);
392 } // namespace Internal
394 // ----------------------------------------------------------------------
395 // Many of class template hashtable's template parameters are policy
396 // classes. These are defaults for the policies.
400 // The two key extraction policies used by the *set and *map variants.
405 operator()(const T& t) const
409 template<typename Pair>
412 const typename Pair::first_type&
413 operator()(const Pair& p) const
417 // Default range hashing function: use division to fold a large number
418 // into the range [0, N).
419 struct mod_range_hashing
421 typedef std::size_t first_argument_type;
422 typedef std::size_t second_argument_type;
423 typedef std::size_t result_type;
426 operator()(first_argument_type r, second_argument_type N) const
430 // Default ranged hash function H. In principle it should be a
431 // function object composed from objects of type H1 and H2 such that
432 // h(k, N) = h2(h1(k), N), but that would mean making extra copies of
433 // h1 and h2. So instead we'll just use a tag to tell class template
434 // hashtable to do that composition.
435 struct default_ranged_hash { };
437 // Default value for rehash policy. Bucket size is (usually) the
438 // smallest prime that keeps the load factor small enough.
439 struct prime_rehash_policy
441 prime_rehash_policy(float z = 1.0);
444 max_load_factor() const;
446 // Return a bucket size no smaller than n.
448 next_bkt(std::size_t n) const;
450 // Return a bucket count appropriate for n elements
452 bkt_for_elements(std::size_t n) const;
454 // n_bkt is current bucket count, n_elt is current element count,
455 // and n_ins is number of elements to be inserted. Do we need to
456 // increase bucket count? If so, return make_pair(true, n), where n
457 // is the new bucket count. If not, return make_pair(false, 0).
458 std::pair<bool, std::size_t>
459 need_rehash(std::size_t n_bkt, std::size_t n_elt, std::size_t n_ins) const;
461 float m_max_load_factor;
462 float m_growth_factor;
463 mutable std::size_t m_next_resize;
466 // XXX This is a hack. prime_rehash_policy's member functions, and
467 // certainly the list of primes, should be defined in a .cc file.
468 // We're temporarily putting them in a header because we don't have a
469 // place to put TR1 .cc files yet. There's no good reason for any of
470 // prime_rehash_policy's member functions to be inline, and there's
471 // certainly no good reason for X<> to exist at all.
475 template<typename X, typename Y>
481 template<int ulongsize = sizeof(unsigned long)>
484 static const int n_primes = ulongsize != 8 ? 256 : 256 + 48;
485 static const unsigned long primes[256 + 48 + 1];
488 template<int ulongsize>
489 const int X<ulongsize>::n_primes;
491 template<int ulongsize>
492 const unsigned long X<ulongsize>::primes[256 + 48 + 1] =
494 2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
495 37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
496 83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
497 157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
498 277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
499 503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
500 953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
501 1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
502 3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
503 5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
504 11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
505 19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
506 33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
507 57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
508 99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
509 159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
510 256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
511 410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
512 658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
513 1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
514 1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
515 2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
516 4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
517 6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
518 11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
519 16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
520 24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
521 36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
522 54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
523 80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
524 118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
525 176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
526 260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
527 386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
528 573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
529 849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
530 1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul,
531 1725587117ul, 1866894511ul, 2019773507ul, 2185171673ul,
532 2364114217ul, 2557710269ul, 2767159799ul, 2993761039ul,
533 3238918481ul, 3504151727ul, 3791104843ul, 4101556399ul,
535 // Sentinel, so we don't have to test the result of lower_bound,
536 // or, on 64-bit machines, rest of the table.
537 ulongsize != 8 ? 4294967291ul : (unsigned long)6442450933ull,
538 (unsigned long)8589934583ull,
539 (unsigned long)12884901857ull, (unsigned long)17179869143ull,
540 (unsigned long)25769803693ull, (unsigned long)34359738337ull,
541 (unsigned long)51539607367ull, (unsigned long)68719476731ull,
542 (unsigned long)103079215087ull, (unsigned long)137438953447ull,
543 (unsigned long)206158430123ull, (unsigned long)274877906899ull,
544 (unsigned long)412316860387ull, (unsigned long)549755813881ull,
545 (unsigned long)824633720731ull, (unsigned long)1099511627689ull,
546 (unsigned long)1649267441579ull, (unsigned long)2199023255531ull,
547 (unsigned long)3298534883309ull, (unsigned long)4398046511093ull,
548 (unsigned long)6597069766607ull, (unsigned long)8796093022151ull,
549 (unsigned long)13194139533241ull, (unsigned long)17592186044399ull,
550 (unsigned long)26388279066581ull, (unsigned long)35184372088777ull,
551 (unsigned long)52776558133177ull, (unsigned long)70368744177643ull,
552 (unsigned long)105553116266399ull, (unsigned long)140737488355213ull,
553 (unsigned long)211106232532861ull, (unsigned long)281474976710597ull,
554 (unsigned long)562949953421231ull, (unsigned long)1125899906842597ull,
555 (unsigned long)2251799813685119ull, (unsigned long)4503599627370449ull,
556 (unsigned long)9007199254740881ull, (unsigned long)18014398509481951ull,
557 (unsigned long)36028797018963913ull, (unsigned long)72057594037927931ull,
558 (unsigned long)144115188075855859ull,
559 (unsigned long)288230376151711717ull,
560 (unsigned long)576460752303423433ull,
561 (unsigned long)1152921504606846883ull,
562 (unsigned long)2305843009213693951ull,
563 (unsigned long)4611686018427387847ull,
564 (unsigned long)9223372036854775783ull,
565 (unsigned long)18446744073709551557ull,
566 (unsigned long)18446744073709551557ull
570 prime_rehash_policy::
571 prime_rehash_policy(float z)
572 : m_max_load_factor(z), m_growth_factor(2.f), m_next_resize(0)
576 prime_rehash_policy::
577 max_load_factor() const
578 { return m_max_load_factor; }
580 // Return a prime no smaller than n.
582 prime_rehash_policy::
583 next_bkt(std::size_t n) const
585 const unsigned long* const last = X<>::primes + X<>::n_primes;
586 const unsigned long* p = std::lower_bound(X<>::primes, last, n);
587 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
591 // Return the smallest prime p such that alpha p >= n, where alpha
592 // is the load factor.
594 prime_rehash_policy::
595 bkt_for_elements(std::size_t n) const
597 const unsigned long* const last = X<>::primes + X<>::n_primes;
598 const float min_bkts = n / m_max_load_factor;
599 const unsigned long* p = std::lower_bound(X<>::primes, last,
601 m_next_resize = static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
605 // Finds the smallest prime p such that alpha p > n_elt + n_ins.
606 // If p > n_bkt, return make_pair(true, p); otherwise return
607 // make_pair(false, 0). In principle this isn't very different from
610 // The only tricky part is that we're caching the element count at
611 // which we need to rehash, so we don't have to do a floating-point
612 // multiply for every insertion.
614 inline std::pair<bool, std::size_t>
615 prime_rehash_policy::
616 need_rehash(std::size_t n_bkt, std::size_t n_elt, std::size_t n_ins) const
618 if (n_elt + n_ins > m_next_resize)
620 float min_bkts = (float(n_ins) + float(n_elt)) / m_max_load_factor;
621 if (min_bkts > n_bkt)
623 min_bkts = std::max(min_bkts, m_growth_factor * n_bkt);
624 const unsigned long* const last = X<>::primes + X<>::n_primes;
625 const unsigned long* p = std::lower_bound(X<>::primes, last,
628 static_cast<std::size_t>(std::ceil(*p * m_max_load_factor));
629 return std::make_pair(true, *p);
634 static_cast<std::size_t>(std::ceil(n_bkt * m_max_load_factor));
635 return std::make_pair(false, 0);
639 return std::make_pair(false, 0);
642 } // namespace Internal
644 //----------------------------------------------------------------------
645 // Base classes for std::tr1::hashtable. We define these base classes
646 // because in some cases we want to do different things depending on
647 // the value of a policy class. In some cases the policy class affects
648 // which member functions and nested typedefs are defined; we handle that
649 // by specializing base class templates. Several of the base class templates
650 // need to access other members of class template hashtable, so we use
651 // the "curiously recurring template pattern" for them.
655 // class template map_base. If the hashtable has a value type of the
656 // form pair<T1, T2> and a key extraction policy that returns the
657 // first part of the pair, the hashtable gets a mapped_type typedef.
658 // If it satisfies those criteria and also has unique keys, then it
659 // also gets an operator[].
661 template<typename K, typename V, typename Ex, bool unique, typename Hashtable>
664 template<typename K, typename Pair, typename Hashtable>
665 struct map_base<K, Pair, extract1st<Pair>, false, Hashtable>
667 typedef typename Pair::second_type mapped_type;
670 template<typename K, typename Pair, typename Hashtable>
671 struct map_base<K, Pair, extract1st<Pair>, true, Hashtable>
673 typedef typename Pair::second_type mapped_type;
676 operator[](const K& k);
679 template<typename K, typename Pair, typename Hashtable>
680 typename map_base<K, Pair, extract1st<Pair>, true, Hashtable>::mapped_type&
681 map_base<K, Pair, extract1st<Pair>, true, Hashtable>::
682 operator[](const K& k)
684 Hashtable* h = static_cast<Hashtable*>(this);
685 typename Hashtable::hash_code_t code = h->m_hash_code(k);
686 std::size_t n = h->bucket_index(k, code, h->bucket_count());
688 typename Hashtable::node* p = h->m_find_node(h->m_buckets[n], k, code);
690 return h->m_insert_bucket(std::make_pair(k, mapped_type()),
692 return (p->m_v).second;
695 // class template rehash_base. Give hashtable the max_load_factor
696 // functions iff the rehash policy is prime_rehash_policy.
697 template<typename RehashPolicy, typename Hashtable>
698 struct rehash_base { };
700 template<typename Hashtable>
701 struct rehash_base<prime_rehash_policy, Hashtable>
704 max_load_factor() const
706 const Hashtable* This = static_cast<const Hashtable*>(this);
707 return This->rehash_policy().max_load_factor();
711 max_load_factor(float z)
713 Hashtable* This = static_cast<Hashtable*>(this);
714 This->rehash_policy(prime_rehash_policy(z));
718 // Class template hash_code_base. Encapsulates two policy issues that
719 // aren't quite orthogonal.
720 // (1) the difference between using a ranged hash function and using
721 // the combination of a hash function and a range-hashing function.
722 // In the former case we don't have such things as hash codes, so
723 // we have a dummy type as placeholder.
724 // (2) Whether or not we cache hash codes. Caching hash codes is
725 // meaningless if we have a ranged hash function.
726 // We also put the key extraction and equality comparison function
727 // objects here, for convenience.
729 // Primary template: unused except as a hook for specializations.
731 template<typename Key, typename Value,
732 typename ExtractKey, typename Equal,
733 typename H1, typename H2, typename H,
734 bool cache_hash_code>
735 struct hash_code_base;
737 // Specialization: ranged hash function, no caching hash codes. H1
738 // and H2 are provided but ignored. We define a dummy hash code type.
739 template<typename Key, typename Value,
740 typename ExtractKey, typename Equal,
741 typename H1, typename H2, typename H>
742 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, false>
745 hash_code_base(const ExtractKey& ex, const Equal& eq,
746 const H1&, const H2&, const H& h)
747 : m_extract(ex), m_eq(eq), m_ranged_hash(h) { }
749 typedef void* hash_code_t;
752 m_hash_code(const Key& k) const
756 bucket_index(const Key& k, hash_code_t, std::size_t N) const
757 { return m_ranged_hash(k, N); }
760 bucket_index(const hash_node<Value, false>* p, std::size_t N) const
761 { return m_ranged_hash(m_extract(p->m_v), N); }
764 compare(const Key& k, hash_code_t, hash_node<Value, false>* n) const
765 { return m_eq(k, m_extract(n->m_v)); }
768 store_code(hash_node<Value, false>*, hash_code_t) const
772 copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
776 m_swap(hash_code_base& x)
778 std::swap(m_extract, x.m_extract);
779 std::swap(m_eq, x.m_eq);
780 std::swap(m_ranged_hash, x.m_ranged_hash);
784 ExtractKey m_extract;
790 // No specialization for ranged hash function while caching hash codes.
791 // That combination is meaningless, and trying to do it is an error.
794 // Specialization: ranged hash function, cache hash codes. This
795 // combination is meaningless, so we provide only a declaration
796 // and no definition.
798 template<typename Key, typename Value,
799 typename ExtractKey, typename Equal,
800 typename H1, typename H2, typename H>
801 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, true>;
804 // Specialization: hash function and range-hashing function, no
805 // caching of hash codes. H is provided but ignored. Provides
806 // typedef and accessor required by TR1.
808 template<typename Key, typename Value,
809 typename ExtractKey, typename Equal,
810 typename H1, typename H2>
811 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2,
812 default_ranged_hash, false>
817 hash_function() const
821 hash_code_base(const ExtractKey& ex, const Equal& eq,
822 const H1& h1, const H2& h2, const default_ranged_hash&)
823 : m_extract(ex), m_eq(eq), m_h1(h1), m_h2(h2) { }
825 typedef std::size_t hash_code_t;
828 m_hash_code(const Key& k) const
832 bucket_index(const Key&, hash_code_t c, std::size_t N) const
833 { return m_h2(c, N); }
836 bucket_index(const hash_node<Value, false>* p, std::size_t N) const
837 { return m_h2(m_h1(m_extract(p->m_v)), N); }
840 compare(const Key& k, hash_code_t, hash_node<Value, false>* n) const
841 { return m_eq(k, m_extract(n->m_v)); }
844 store_code(hash_node<Value, false>*, hash_code_t) const
848 copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
852 m_swap(hash_code_base& x)
854 std::swap(m_extract, x.m_extract);
855 std::swap(m_eq, x.m_eq);
856 std::swap(m_h1, x.m_h1);
857 std::swap(m_h2, x.m_h2);
861 ExtractKey m_extract;
867 // Specialization: hash function and range-hashing function,
868 // caching hash codes. H is provided but ignored. Provides
869 // typedef and accessor required by TR1.
870 template<typename Key, typename Value,
871 typename ExtractKey, typename Equal,
872 typename H1, typename H2>
873 struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2,
874 default_ranged_hash, true>
879 hash_function() const
883 hash_code_base(const ExtractKey& ex, const Equal& eq,
884 const H1& h1, const H2& h2, const default_ranged_hash&)
885 : m_extract(ex), m_eq(eq), m_h1(h1), m_h2(h2) { }
887 typedef std::size_t hash_code_t;
890 m_hash_code(const Key& k) const
894 bucket_index(const Key&, hash_code_t c, std::size_t N) const
895 { return m_h2(c, N); }
898 bucket_index(const hash_node<Value, true>* p, std::size_t N) const
899 { return m_h2(p->hash_code, N); }
902 compare(const Key& k, hash_code_t c, hash_node<Value, true>* n) const
903 { return c == n->hash_code && m_eq(k, m_extract(n->m_v)); }
906 store_code(hash_node<Value, true>* n, hash_code_t c) const
907 { n->hash_code = c; }
910 copy_code(hash_node<Value, true>* to,
911 const hash_node<Value, true>* from) const
912 { to->hash_code = from->hash_code; }
915 m_swap(hash_code_base& x)
917 std::swap(m_extract, x.m_extract);
918 std::swap(m_eq, x.m_eq);
919 std::swap(m_h1, x.m_h1);
920 std::swap(m_h2, x.m_h2);
924 ExtractKey m_extract;
930 } // namespace internal
936 //----------------------------------------------------------------------
937 // Class template hashtable, class definition.
939 // Meaning of class template hashtable's template parameters
941 // Key and Value: arbitrary CopyConstructible types.
943 // Allocator: an allocator type ([lib.allocator.requirements]) whose
944 // value type is Value.
946 // ExtractKey: function object that takes a object of type Value
947 // and returns a value of type Key.
949 // Equal: function object that takes two objects of type k and returns
950 // a bool-like value that is true if the two objects are considered equal.
952 // H1: the hash function. A unary function object with argument type
953 // Key and result type size_t. Return values should be distributed
954 // over the entire range [0, numeric_limits<size_t>:::max()].
956 // H2: the range-hashing function (in the terminology of Tavori and
957 // Dreizin). A binary function object whose argument types and result
958 // type are all size_t. Given arguments r and N, the return value is
959 // in the range [0, N).
961 // H: the ranged hash function (Tavori and Dreizin). A binary function
962 // whose argument types are Key and size_t and whose result type is
963 // size_t. Given arguments k and N, the return value is in the range
964 // [0, N). Default: h(k, N) = h2(h1(k), N). If H is anything other
965 // than the default, H1 and H2 are ignored.
967 // RehashPolicy: Policy class with three members, all of which govern
968 // the bucket count. n_bkt(n) returns a bucket count no smaller
969 // than n. bkt_for_elements(n) returns a bucket count appropriate
970 // for an element count of n. need_rehash(n_bkt, n_elt, n_ins)
971 // determines whether, if the current bucket count is n_bkt and the
972 // current element count is n_elt, we need to increase the bucket
973 // count. If so, returns make_pair(true, n), where n is the new
974 // bucket count. If not, returns make_pair(false, <anything>).
976 // ??? Right now it is hard-wired that the number of buckets never
977 // shrinks. Should we allow RehashPolicy to change that?
979 // cache_hash_code: bool. true if we store the value of the hash
980 // function along with the value. This is a time-space tradeoff.
981 // Storing it may improve lookup speed by reducing the number of times
982 // we need to call the Equal function.
984 // constant_iterators: bool. true if iterator and const_iterator are
985 // both constant iterator types. This is true for unordered_set and
986 // unordered_multiset, false for unordered_map and unordered_multimap.
988 // unique_keys: bool. true if the return value of hashtable::count(k)
989 // is always at most one, false if it may be an arbitrary number. This
990 // true for unordered_set and unordered_map, false for unordered_multiset
991 // and unordered_multimap.
993 template<typename Key, typename Value,
995 typename ExtractKey, typename Equal,
996 typename H1, typename H2,
997 typename H, typename RehashPolicy,
998 bool cache_hash_code,
999 bool constant_iterators,
1002 : public Internal::rehash_base<RehashPolicy,
1003 hashtable<Key, Value, Allocator, ExtractKey,
1004 Equal, H1, H2, H, RehashPolicy,
1005 cache_hash_code, constant_iterators,
1007 public Internal::hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H,
1009 public Internal::map_base<Key, Value, ExtractKey, unique_keys,
1010 hashtable<Key, Value, Allocator, ExtractKey,
1011 Equal, H1, H2, H, RehashPolicy,
1012 cache_hash_code, constant_iterators,
1016 typedef Allocator allocator_type;
1017 typedef Value value_type;
1018 typedef Key key_type;
1019 typedef Equal key_equal;
1020 // mapped_type, if present, comes from map_base.
1021 // hasher, if present, comes from hash_code_base.
1022 typedef typename Allocator::difference_type difference_type;
1023 typedef typename Allocator::size_type size_type;
1024 typedef typename Allocator::reference reference;
1025 typedef typename Allocator::const_reference const_reference;
1027 typedef Internal::node_iterator<value_type, constant_iterators,
1030 typedef Internal::node_const_iterator<value_type, constant_iterators,
1032 const_local_iterator;
1034 typedef Internal::hashtable_iterator<value_type, constant_iterators,
1037 typedef Internal::hashtable_const_iterator<value_type, constant_iterators,
1041 template<typename K, typename Pair, typename Hashtable>
1042 friend struct Internal::map_base;
1045 typedef Internal::hash_node<Value, cache_hash_code> node;
1046 typedef typename Allocator::template rebind<node>::other
1048 typedef typename Allocator::template rebind<node*>::other
1052 node_allocator_t m_node_allocator;
1054 size_type m_bucket_count;
1055 size_type m_element_count;
1056 RehashPolicy m_rehash_policy;
1059 m_allocate_node(const value_type& v);
1062 m_deallocate_node(node* n);
1065 m_deallocate_nodes(node**, size_type);
1068 m_allocate_buckets(size_type n);
1071 m_deallocate_buckets(node**, size_type n);
1073 public: // Constructor, destructor, assignment, swap
1074 hashtable(size_type bucket_hint,
1075 const H1&, const H2&, const H&,
1076 const Equal&, const ExtractKey&,
1077 const allocator_type&);
1079 template<typename InIter>
1080 hashtable(InIter first, InIter last,
1081 size_type bucket_hint,
1082 const H1&, const H2&, const H&,
1083 const Equal&, const ExtractKey&,
1084 const allocator_type&);
1086 hashtable(const hashtable&);
1089 operator=(const hashtable&);
1093 void swap(hashtable&);
1095 public: // Basic container operations
1099 iterator i(m_buckets);
1108 const_iterator i(m_buckets);
1116 { return iterator(m_buckets + m_bucket_count); }
1120 { return const_iterator(m_buckets + m_bucket_count); }
1124 { return m_element_count; }
1128 { return size() == 0; }
1131 get_allocator() const
1132 { return m_node_allocator; }
1136 { return m_node_allocator.max_size(); }
1138 public: // Observers
1141 { return this->m_eq; }
1143 // hash_function, if present, comes from hash_code_base.
1145 public: // Bucket operations
1147 bucket_count() const
1148 { return m_bucket_count; }
1151 max_bucket_count() const
1152 { return max_size(); }
1155 bucket_size(size_type n) const
1156 { return std::distance(begin(n), end(n)); }
1159 bucket(const key_type& k) const
1161 return this->bucket_index(k, this->m_hash_code(k),
1162 this->m_bucket_count);
1167 { return local_iterator(m_buckets[n]); }
1171 { return local_iterator(0); }
1173 const_local_iterator
1174 begin(size_type n) const
1175 { return const_local_iterator(m_buckets[n]); }
1177 const_local_iterator
1178 end(size_type) const
1179 { return const_local_iterator(0); }
1184 return static_cast<float>(size()) / static_cast<float>(bucket_count());
1186 // max_load_factor, if present, comes from rehash_base.
1188 // Generalization of max_load_factor. Extension, not found in TR1. Only
1189 // useful if RehashPolicy is something other than the default.
1191 rehash_policy() const
1192 { return m_rehash_policy; }
1195 rehash_policy(const RehashPolicy&);
1199 find(const key_type& k);
1202 find(const key_type& k) const;
1205 count(const key_type& k) const;
1207 std::pair<iterator, iterator>
1208 equal_range(const key_type& k);
1210 std::pair<const_iterator, const_iterator>
1211 equal_range(const key_type& k) const;
1213 private: // Find, insert and erase helper functions
1214 // ??? This dispatching is a workaround for the fact that we don't
1215 // have partial specialization of member templates; it would be
1216 // better to just specialize insert on unique_keys. There may be a
1217 // cleaner workaround.
1218 typedef typename Internal::IF<unique_keys,
1219 std::pair<iterator, bool>, iterator>::type
1222 typedef typename Internal::IF<unique_keys,
1223 Internal::extract1st<Insert_Return_Type>,
1224 Internal::identity<Insert_Return_Type>
1229 m_find_node(node*, const key_type&,
1230 typename hashtable::hash_code_t) const;
1233 m_insert_bucket(const value_type&, size_type,
1234 typename hashtable::hash_code_t);
1236 std::pair<iterator, bool>
1237 m_insert(const value_type&, std::tr1::true_type);
1240 m_insert(const value_type&, std::tr1::false_type);
1243 m_erase_node(node*, node**);
1245 public: // Insert and erase
1247 insert(const value_type& v)
1248 { return m_insert(v, std::tr1::integral_constant<bool, unique_keys>()); }
1251 insert(iterator, const value_type& v)
1252 { return iterator(Insert_Conv_Type()(this->insert(v))); }
1255 insert(const_iterator, const value_type& v)
1256 { return const_iterator(Insert_Conv_Type()(this->insert(v))); }
1258 template<typename InIter>
1260 insert(InIter first, InIter last);
1266 erase(const_iterator);
1269 erase(const key_type&);
1272 erase(iterator, iterator);
1275 erase(const_iterator, const_iterator);
1281 // Set number of buckets to be appropriate for container of n element.
1282 void rehash(size_type n);
1285 // Unconditionally change size of bucket array to n.
1286 void m_rehash(size_type n);
1289 //----------------------------------------------------------------------
1290 // Definitions of class template hashtable's out-of-line member functions.
1292 template<typename K, typename V,
1293 typename A, typename Ex, typename Eq,
1294 typename H1, typename H2, typename H, typename RP,
1295 bool c, bool ci, bool u>
1296 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::node*
1297 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1298 m_allocate_node(const value_type& v)
1300 node* n = m_node_allocator.allocate(1);
1303 get_allocator().construct(&n->m_v, v);
1309 m_node_allocator.deallocate(n, 1);
1310 __throw_exception_again;
1314 template<typename K, typename V,
1315 typename A, typename Ex, typename Eq,
1316 typename H1, typename H2, typename H, typename RP,
1317 bool c, bool ci, bool u>
1319 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1320 m_deallocate_node(node* n)
1322 get_allocator().destroy(&n->m_v);
1323 m_node_allocator.deallocate(n, 1);
1326 template<typename K, typename V,
1327 typename A, typename Ex, typename Eq,
1328 typename H1, typename H2, typename H, typename RP,
1329 bool c, bool ci, bool u>
1331 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1332 m_deallocate_nodes(node** array, size_type n)
1334 for (size_type i = 0; i < n; ++i)
1341 m_deallocate_node(tmp);
1347 template<typename K, typename V,
1348 typename A, typename Ex, typename Eq,
1349 typename H1, typename H2, typename H, typename RP,
1350 bool c, bool ci, bool u>
1351 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::node**
1352 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1353 m_allocate_buckets(size_type n)
1355 bucket_allocator_t alloc(m_node_allocator);
1357 // We allocate one extra bucket to hold a sentinel, an arbitrary
1358 // non-null pointer. Iterator increment relies on this.
1359 node** p = alloc.allocate(n + 1);
1360 std::fill(p, p + n, (node*) 0);
1361 p[n] = reinterpret_cast<node*>(0x1000);
1365 template<typename K, typename V,
1366 typename A, typename Ex, typename Eq,
1367 typename H1, typename H2, typename H, typename RP,
1368 bool c, bool ci, bool u>
1370 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1371 m_deallocate_buckets(node** p, size_type n)
1373 bucket_allocator_t alloc(m_node_allocator);
1374 alloc.deallocate(p, n + 1);
1377 template<typename K, typename V,
1378 typename A, typename Ex, typename Eq,
1379 typename H1, typename H2, typename H, typename RP,
1380 bool c, bool ci, bool u>
1381 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1382 hashtable(size_type bucket_hint,
1383 const H1& h1, const H2& h2, const H& h,
1384 const Eq& eq, const Ex& exk,
1385 const allocator_type& a)
1386 : Internal::rehash_base<RP, hashtable>(),
1387 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>(exk, eq, h1, h2, h),
1388 Internal::map_base<K, V, Ex, u, hashtable>(),
1389 m_node_allocator(a),
1394 m_bucket_count = m_rehash_policy.next_bkt(bucket_hint);
1395 m_buckets = m_allocate_buckets(m_bucket_count);
1398 template<typename K, typename V,
1399 typename A, typename Ex, typename Eq,
1400 typename H1, typename H2, typename H, typename RP,
1401 bool c, bool ci, bool u>
1402 template<typename InIter>
1403 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1404 hashtable(InIter f, InIter l,
1405 size_type bucket_hint,
1406 const H1& h1, const H2& h2, const H& h,
1407 const Eq& eq, const Ex& exk,
1408 const allocator_type& a)
1409 : Internal::rehash_base<RP, hashtable>(),
1410 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>(exk, eq,
1412 Internal::map_base<K, V, Ex, u, hashtable>(),
1413 m_node_allocator(a),
1418 m_bucket_count = std::max(m_rehash_policy.next_bkt(bucket_hint),
1420 bkt_for_elements(Internal::
1421 distance_fw(f, l)));
1422 m_buckets = m_allocate_buckets(m_bucket_count);
1431 m_deallocate_buckets(m_buckets, m_bucket_count);
1432 __throw_exception_again;
1436 template<typename K, typename V,
1437 typename A, typename Ex, typename Eq,
1438 typename H1, typename H2, typename H, typename RP,
1439 bool c, bool ci, bool u>
1440 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1441 hashtable(const hashtable& ht)
1442 : Internal::rehash_base<RP, hashtable>(ht),
1443 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>(ht),
1444 Internal::map_base<K, V, Ex, u, hashtable>(ht),
1445 m_node_allocator(ht.get_allocator()),
1446 m_bucket_count(ht.m_bucket_count),
1447 m_element_count(ht.m_element_count),
1448 m_rehash_policy(ht.m_rehash_policy)
1450 m_buckets = m_allocate_buckets(m_bucket_count);
1453 for (size_type i = 0; i < ht.m_bucket_count; ++i)
1455 node* n = ht.m_buckets[i];
1456 node** tail = m_buckets + i;
1459 *tail = m_allocate_node(n->m_v);
1460 this->copy_code(*tail, n);
1461 tail = &((*tail)->m_next);
1469 m_deallocate_buckets(m_buckets, m_bucket_count);
1470 __throw_exception_again;
1474 template<typename K, typename V,
1475 typename A, typename Ex, typename Eq,
1476 typename H1, typename H2, typename H, typename RP,
1477 bool c, bool ci, bool u>
1478 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>&
1479 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1480 operator=(const hashtable& ht)
1487 template<typename K, typename V,
1488 typename A, typename Ex, typename Eq,
1489 typename H1, typename H2, typename H, typename RP,
1490 bool c, bool ci, bool u>
1491 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1495 m_deallocate_buckets(m_buckets, m_bucket_count);
1498 template<typename K, typename V,
1499 typename A, typename Ex, typename Eq,
1500 typename H1, typename H2, typename H, typename RP,
1501 bool c, bool ci, bool u>
1503 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1506 // The only base class with member variables is hash_code_base. We
1507 // define hash_code_base::m_swap because different specializations
1508 // have different members.
1509 Internal::hash_code_base<K, V, Ex, Eq, H1, H2, H, c>::m_swap(x);
1511 // open LWG issue 431
1512 // std::swap(m_node_allocator, x.m_node_allocator);
1513 std::swap(m_rehash_policy, x.m_rehash_policy);
1514 std::swap(m_buckets, x.m_buckets);
1515 std::swap(m_bucket_count, x.m_bucket_count);
1516 std::swap(m_element_count, x.m_element_count);
1519 template<typename K, typename V,
1520 typename A, typename Ex, typename Eq,
1521 typename H1, typename H2, typename H, typename RP,
1522 bool c, bool ci, bool u>
1524 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1525 rehash_policy(const RP& pol)
1527 m_rehash_policy = pol;
1528 size_type n_bkt = pol.bkt_for_elements(m_element_count);
1529 if (n_bkt > m_bucket_count)
1533 template<typename K, typename V,
1534 typename A, typename Ex, typename Eq,
1535 typename H1, typename H2, typename H, typename RP,
1536 bool c, bool ci, bool u>
1537 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1538 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1539 find(const key_type& k)
1541 typename hashtable::hash_code_t code = this->m_hash_code(k);
1542 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1543 node* p = m_find_node(m_buckets[n], k, code);
1544 return p ? iterator(p, m_buckets + n) : this->end();
1547 template<typename K, typename V,
1548 typename A, typename Ex, typename Eq,
1549 typename H1, typename H2, typename H, typename RP,
1550 bool c, bool ci, bool u>
1551 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::const_iterator
1552 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1553 find(const key_type& k) const
1555 typename hashtable::hash_code_t code = this->m_hash_code(k);
1556 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1557 node* p = m_find_node(m_buckets[n], k, code);
1558 return p ? const_iterator(p, m_buckets + n) : this->end();
1561 template<typename K, typename V,
1562 typename A, typename Ex, typename Eq,
1563 typename H1, typename H2, typename H, typename RP,
1564 bool c, bool ci, bool u>
1565 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::size_type
1566 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1567 count(const key_type& k) const
1569 typename hashtable::hash_code_t code = this->m_hash_code(k);
1570 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1571 std::size_t result = 0;
1572 for (node* p = m_buckets[n]; p; p = p->m_next)
1573 if (this->compare(k, code, p))
1578 template<typename K, typename V,
1579 typename A, typename Ex, typename Eq,
1580 typename H1, typename H2, typename H, typename RP,
1581 bool c, bool ci, bool u>
1582 std::pair<typename hashtable<K, V, A, Ex, Eq, H1,
1583 H2, H, RP, c, ci, u>::iterator,
1584 typename hashtable<K, V, A, Ex, Eq, H1,
1585 H2, H, RP, c, ci, u>::iterator>
1586 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1587 equal_range(const key_type& k)
1589 typename hashtable::hash_code_t code = this->m_hash_code(k);
1590 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1591 node** head = m_buckets + n;
1592 node* p = m_find_node(*head, k, code);
1596 node* p1 = p->m_next;
1597 for (; p1; p1 = p1->m_next)
1598 if (!this->compare(k, code, p1))
1601 iterator first(p, head);
1602 iterator last(p1, head);
1604 last.m_incr_bucket();
1605 return std::make_pair(first, last);
1608 return std::make_pair(this->end(), this->end());
1611 template<typename K, typename V,
1612 typename A, typename Ex, typename Eq,
1613 typename H1, typename H2, typename H, typename RP,
1614 bool c, bool ci, bool u>
1615 std::pair<typename hashtable<K, V, A, Ex, Eq, H1,
1616 H2, H, RP, c, ci, u>::const_iterator,
1617 typename hashtable<K, V, A, Ex, Eq, H1,
1618 H2, H, RP, c, ci, u>::const_iterator>
1619 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1620 equal_range(const key_type& k) const
1622 typename hashtable::hash_code_t code = this->m_hash_code(k);
1623 std::size_t n = this->bucket_index(k, code, this->bucket_count());
1624 node** head = m_buckets + n;
1625 node* p = m_find_node(*head, k, code);
1629 node* p1 = p->m_next;
1630 for (; p1; p1 = p1->m_next)
1631 if (!this->compare(k, code, p1))
1634 const_iterator first(p, head);
1635 const_iterator last(p1, head);
1637 last.m_incr_bucket();
1638 return std::make_pair(first, last);
1641 return std::make_pair(this->end(), this->end());
1644 // Find the node whose key compares equal to k, beginning the search
1645 // at p (usually the head of a bucket). Return nil if no node is found.
1646 template<typename K, typename V,
1647 typename A, typename Ex, typename Eq,
1648 typename H1, typename H2, typename H, typename RP,
1649 bool c, bool ci, bool u>
1650 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::node*
1651 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1652 m_find_node(node* p, const key_type& k,
1653 typename hashtable::hash_code_t code) const
1655 for (; p; p = p->m_next)
1656 if (this->compare(k, code, p))
1661 // Insert v in bucket n (assumes no element with its key already present).
1662 template<typename K, typename V,
1663 typename A, typename Ex, typename Eq,
1664 typename H1, typename H2, typename H, typename RP,
1665 bool c, bool ci, bool u>
1666 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1667 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1668 m_insert_bucket(const value_type& v, size_type n,
1669 typename hashtable::hash_code_t code)
1671 std::pair<bool, std::size_t> do_rehash
1672 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, 1);
1674 // Allocate the new node before doing the rehash so that we don't
1675 // do a rehash if the allocation throws.
1676 node* new_node = m_allocate_node(v);
1680 if (do_rehash.first)
1682 const key_type& k = this->m_extract(v);
1683 n = this->bucket_index(k, code, do_rehash.second);
1684 m_rehash(do_rehash.second);
1687 new_node->m_next = m_buckets[n];
1688 this->store_code(new_node, code);
1689 m_buckets[n] = new_node;
1691 return iterator(new_node, m_buckets + n);
1695 m_deallocate_node(new_node);
1696 __throw_exception_again;
1700 // Insert v if no element with its key is already present.
1701 template<typename K, typename V,
1702 typename A, typename Ex, typename Eq,
1703 typename H1, typename H2, typename H, typename RP,
1704 bool c, bool ci, bool u>
1705 std::pair<typename hashtable<K, V, A, Ex, Eq, H1,
1706 H2, H, RP, c, ci, u>::iterator, bool>
1707 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1708 m_insert(const value_type& v, std::tr1::true_type)
1710 const key_type& k = this->m_extract(v);
1711 typename hashtable::hash_code_t code = this->m_hash_code(k);
1712 size_type n = this->bucket_index(k, code, m_bucket_count);
1714 if (node* p = m_find_node(m_buckets[n], k, code))
1715 return std::make_pair(iterator(p, m_buckets + n), false);
1716 return std::make_pair(m_insert_bucket(v, n, code), true);
1719 // Insert v unconditionally.
1720 template<typename K, typename V,
1721 typename A, typename Ex, typename Eq,
1722 typename H1, typename H2, typename H, typename RP,
1723 bool c, bool ci, bool u>
1724 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1725 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1726 m_insert(const value_type& v, std::tr1::false_type)
1728 std::pair<bool, std::size_t> do_rehash
1729 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, 1);
1730 if (do_rehash.first)
1731 m_rehash(do_rehash.second);
1733 const key_type& k = this->m_extract(v);
1734 typename hashtable::hash_code_t code = this->m_hash_code(k);
1735 size_type n = this->bucket_index(k, code, m_bucket_count);
1737 // First find the node, avoid leaking new_node if compare throws.
1738 node* prev = m_find_node(m_buckets[n], k, code);
1739 node* new_node = m_allocate_node(v);
1743 new_node->m_next = prev->m_next;
1744 prev->m_next = new_node;
1748 new_node->m_next = m_buckets[n];
1749 m_buckets[n] = new_node;
1751 this->store_code(new_node, code);
1754 return iterator(new_node, m_buckets + n);
1757 // For erase(iterator) and erase(const_iterator).
1758 template<typename K, typename V,
1759 typename A, typename Ex, typename Eq,
1760 typename H1, typename H2, typename H, typename RP,
1761 bool c, bool ci, bool u>
1763 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1764 m_erase_node(node* p, node** b)
1771 node* next = cur->m_next;
1777 cur->m_next = next->m_next;
1780 m_deallocate_node(p);
1784 template<typename K, typename V,
1785 typename A, typename Ex, typename Eq,
1786 typename H1, typename H2, typename H, typename RP,
1787 bool c, bool ci, bool u>
1788 template<typename InIter>
1790 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1791 insert(InIter first, InIter last)
1793 size_type n_elt = Internal::distance_fw(first, last);
1794 std::pair<bool, std::size_t> do_rehash
1795 = m_rehash_policy.need_rehash(m_bucket_count, m_element_count, n_elt);
1796 if (do_rehash.first)
1797 m_rehash(do_rehash.second);
1799 for (; first != last; ++first)
1800 this->insert(*first);
1803 template<typename K, typename V,
1804 typename A, typename Ex, typename Eq,
1805 typename H1, typename H2, typename H, typename RP,
1806 bool c, bool ci, bool u>
1807 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1808 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1811 iterator result = it;
1813 m_erase_node(it.m_cur_node, it.m_cur_bucket);
1817 template<typename K, typename V,
1818 typename A, typename Ex, typename Eq,
1819 typename H1, typename H2, typename H, typename RP,
1820 bool c, bool ci, bool u>
1821 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::const_iterator
1822 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1823 erase(const_iterator it)
1825 const_iterator result = it;
1827 m_erase_node(it.m_cur_node, it.m_cur_bucket);
1831 template<typename K, typename V,
1832 typename A, typename Ex, typename Eq,
1833 typename H1, typename H2, typename H, typename RP,
1834 bool c, bool ci, bool u>
1835 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::size_type
1836 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1837 erase(const key_type& k)
1839 typename hashtable::hash_code_t code = this->m_hash_code(k);
1840 size_type n = this->bucket_index(k, code, m_bucket_count);
1841 size_type result = 0;
1843 node** slot = m_buckets + n;
1844 while (*slot && !this->compare(k, code, *slot))
1845 slot = &((*slot)->m_next);
1847 while (*slot && this->compare(k, code, *slot))
1851 m_deallocate_node(p);
1859 // ??? This could be optimized by taking advantage of the bucket
1860 // structure, but it's not clear that it's worth doing. It probably
1861 // wouldn't even be an optimization unless the load factor is large.
1862 template<typename K, typename V,
1863 typename A, typename Ex, typename Eq,
1864 typename H1, typename H2, typename H, typename RP,
1865 bool c, bool ci, bool u>
1866 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::iterator
1867 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1868 erase(iterator first, iterator last)
1870 while (first != last)
1871 first = this->erase(first);
1875 template<typename K, typename V,
1876 typename A, typename Ex, typename Eq,
1877 typename H1, typename H2, typename H, typename RP,
1878 bool c, bool ci, bool u>
1879 typename hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::const_iterator
1880 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1881 erase(const_iterator first, const_iterator last)
1883 while (first != last)
1884 first = this->erase(first);
1888 template<typename K, typename V,
1889 typename A, typename Ex, typename Eq,
1890 typename H1, typename H2, typename H, typename RP,
1891 bool c, bool ci, bool u>
1893 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1896 m_deallocate_nodes(m_buckets, m_bucket_count);
1897 m_element_count = 0;
1900 template<typename K, typename V,
1901 typename A, typename Ex, typename Eq,
1902 typename H1, typename H2, typename H, typename RP,
1903 bool c, bool ci, bool u>
1905 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1908 m_rehash(std::max(m_rehash_policy.next_bkt(n),
1909 m_rehash_policy.bkt_for_elements(m_element_count
1913 template<typename K, typename V,
1914 typename A, typename Ex, typename Eq,
1915 typename H1, typename H2, typename H, typename RP,
1916 bool c, bool ci, bool u>
1918 hashtable<K, V, A, Ex, Eq, H1, H2, H, RP, c, ci, u>::
1919 m_rehash(size_type n)
1921 node** new_array = m_allocate_buckets(n);
1924 for (size_type i = 0; i < m_bucket_count; ++i)
1925 while (node* p = m_buckets[i])
1927 size_type new_index = this->bucket_index(p, n);
1928 m_buckets[i] = p->m_next;
1929 p->m_next = new_array[new_index];
1930 new_array[new_index] = p;
1932 m_deallocate_buckets(m_buckets, m_bucket_count);
1934 m_buckets = new_array;
1938 // A failure here means that a hash function threw an exception.
1939 // We can't restore the previous state without calling the hash
1940 // function again, so the only sensible recovery is to delete
1942 m_deallocate_nodes(new_array, n);
1943 m_deallocate_buckets(new_array, n);
1944 m_deallocate_nodes(m_buckets, m_bucket_count);
1945 m_element_count = 0;
1946 __throw_exception_again;
1950 } // Namespace std::tr1
1952 #endif /* GNU_LIBSTDCXX_TR1_HASHTABLE_ */