]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/include/tr1_impl/hashtable_policy.h
complex (complex<float>::complex(float, float)): Uglify parameters.
[thirdparty/gcc.git] / libstdc++-v3 / include / tr1_impl / hashtable_policy.h
CommitLineData
b82f782b
BK
1// Internal policy header for TR1 unordered_set and unordered_map -*- C++ -*-
2
e133ace8 3// Copyright (C) 2007 Free Software Foundation, Inc.
b82f782b
BK
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 2, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License along
17// with this library; see the file COPYING. If not, write to the Free
18// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19// USA.
20
21// As a special exception, you may use this file as part of a free software
22// library without restriction. Specifically, if other files instantiate
23// templates or use macros or inline functions from this file, or you compile
24// this file and link it with other files to produce an executable, this
25// file does not by itself cause the resulting executable to be covered by
26// the GNU General Public License. This exception does not however
27// invalidate any other reasons why the executable file might be covered by
28// the GNU General Public License.
29
e133ace8
PC
30/** @file tr1_impl/hashtable_policy.h
31 * This is an internal header file, included by other library headers.
32 * You should not attempt to use it directly.
b82f782b
BK
33 */
34
b82f782b
BK
35namespace std
36{
e133ace8
PC
37_GLIBCXX_BEGIN_NAMESPACE_TR1
38
95cefe5f 39namespace __detail
b82f782b 40{
b82f782b
BK
41 // Helper function: return distance(first, last) for forward
42 // iterators, or 0 for input iterators.
95cefe5f
PC
43 template<class _Iterator>
44 inline typename std::iterator_traits<_Iterator>::difference_type
45 __distance_fw(_Iterator __first, _Iterator __last,
46 std::input_iterator_tag)
b82f782b
BK
47 { return 0; }
48
95cefe5f
PC
49 template<class _Iterator>
50 inline typename std::iterator_traits<_Iterator>::difference_type
51 __distance_fw(_Iterator __first, _Iterator __last,
52 std::forward_iterator_tag)
53 { return std::distance(__first, __last); }
b82f782b 54
95cefe5f
PC
55 template<class _Iterator>
56 inline typename std::iterator_traits<_Iterator>::difference_type
57 __distance_fw(_Iterator __first, _Iterator __last)
b82f782b 58 {
95cefe5f
PC
59 typedef typename std::iterator_traits<_Iterator>::iterator_category _Tag;
60 return __distance_fw(__first, __last, _Tag());
b82f782b
BK
61 }
62
b82f782b 63
95cefe5f 64 // Auxiliary types used for all instantiations of _Hashtable: nodes
b82f782b
BK
65 // and iterators.
66
67 // Nodes, used to wrap elements stored in the hash table. A policy
95cefe5f 68 // template parameter of class template _Hashtable controls whether
b82f782b
BK
69 // nodes also store a hash code. In some cases (e.g. strings) this
70 // may be a performance win.
95cefe5f
PC
71 template<typename _Value, bool __cache_hash_code>
72 struct _Hash_node;
b82f782b 73
95cefe5f
PC
74 template<typename _Value>
75 struct _Hash_node<_Value, true>
b82f782b 76 {
95cefe5f
PC
77 _Value _M_v;
78 std::size_t _M_hash_code;
79 _Hash_node* _M_next;
b82f782b
BK
80 };
81
95cefe5f
PC
82 template<typename _Value>
83 struct _Hash_node<_Value, false>
b82f782b 84 {
95cefe5f
PC
85 _Value _M_v;
86 _Hash_node* _M_next;
b82f782b
BK
87 };
88
89 // Local iterators, used to iterate within a bucket but not between
90 // buckets.
95cefe5f
PC
91 template<typename _Value, bool __cache>
92 struct _Node_iterator_base
b82f782b 93 {
95cefe5f
PC
94 _Node_iterator_base(_Hash_node<_Value, __cache>* __p)
95 : _M_cur(__p) { }
b82f782b
BK
96
97 void
95cefe5f
PC
98 _M_incr()
99 { _M_cur = _M_cur->_M_next; }
b82f782b 100
95cefe5f 101 _Hash_node<_Value, __cache>* _M_cur;
b82f782b
BK
102 };
103
95cefe5f 104 template<typename _Value, bool __cache>
b82f782b 105 inline bool
95cefe5f
PC
106 operator==(const _Node_iterator_base<_Value, __cache>& __x,
107 const _Node_iterator_base<_Value, __cache>& __y)
108 { return __x._M_cur == __y._M_cur; }
b82f782b 109
95cefe5f 110 template<typename _Value, bool __cache>
b82f782b 111 inline bool
95cefe5f
PC
112 operator!=(const _Node_iterator_base<_Value, __cache>& __x,
113 const _Node_iterator_base<_Value, __cache>& __y)
114 { return __x._M_cur != __y._M_cur; }
b82f782b 115
95cefe5f
PC
116 template<typename _Value, bool __constant_iterators, bool __cache>
117 struct _Node_iterator
118 : public _Node_iterator_base<_Value, __cache>
b82f782b 119 {
95cefe5f
PC
120 typedef _Value value_type;
121 typedef typename
122 __gnu_cxx::__conditional_type<__constant_iterators,
123 const _Value*, _Value*>::__type
b82f782b 124 pointer;
95cefe5f
PC
125 typedef typename
126 __gnu_cxx::__conditional_type<__constant_iterators,
127 const _Value&, _Value&>::__type
b82f782b
BK
128 reference;
129 typedef std::ptrdiff_t difference_type;
130 typedef std::forward_iterator_tag iterator_category;
131
95cefe5f
PC
132 _Node_iterator()
133 : _Node_iterator_base<_Value, __cache>(0) { }
b82f782b
BK
134
135 explicit
95cefe5f
PC
136 _Node_iterator(_Hash_node<_Value, __cache>* __p)
137 : _Node_iterator_base<_Value, __cache>(__p) { }
b82f782b
BK
138
139 reference
140 operator*() const
95cefe5f 141 { return this->_M_cur->_M_v; }
b82f782b
BK
142
143 pointer
144 operator->() const
95cefe5f 145 { return &this->_M_cur->_M_v; }
b82f782b 146
95cefe5f 147 _Node_iterator&
b82f782b
BK
148 operator++()
149 {
95cefe5f 150 this->_M_incr();
b82f782b
BK
151 return *this;
152 }
153
95cefe5f 154 _Node_iterator
b82f782b
BK
155 operator++(int)
156 {
95cefe5f
PC
157 _Node_iterator __tmp(*this);
158 this->_M_incr();
159 return __tmp;
b82f782b
BK
160 }
161 };
162
95cefe5f
PC
163 template<typename _Value, bool __constant_iterators, bool __cache>
164 struct _Node_const_iterator
165 : public _Node_iterator_base<_Value, __cache>
b82f782b 166 {
95cefe5f
PC
167 typedef _Value value_type;
168 typedef const _Value* pointer;
169 typedef const _Value& reference;
b82f782b
BK
170 typedef std::ptrdiff_t difference_type;
171 typedef std::forward_iterator_tag iterator_category;
172
95cefe5f
PC
173 _Node_const_iterator()
174 : _Node_iterator_base<_Value, __cache>(0) { }
b82f782b
BK
175
176 explicit
95cefe5f
PC
177 _Node_const_iterator(_Hash_node<_Value, __cache>* __p)
178 : _Node_iterator_base<_Value, __cache>(__p) { }
b82f782b 179
95cefe5f
PC
180 _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
181 __cache>& __x)
182 : _Node_iterator_base<_Value, __cache>(__x._M_cur) { }
b82f782b
BK
183
184 reference
185 operator*() const
95cefe5f 186 { return this->_M_cur->_M_v; }
b82f782b
BK
187
188 pointer
189 operator->() const
95cefe5f 190 { return &this->_M_cur->_M_v; }
b82f782b 191
95cefe5f 192 _Node_const_iterator&
b82f782b
BK
193 operator++()
194 {
95cefe5f 195 this->_M_incr();
b82f782b
BK
196 return *this;
197 }
198
95cefe5f 199 _Node_const_iterator
b82f782b
BK
200 operator++(int)
201 {
95cefe5f
PC
202 _Node_const_iterator __tmp(*this);
203 this->_M_incr();
204 return __tmp;
b82f782b
BK
205 }
206 };
207
95cefe5f
PC
208 template<typename _Value, bool __cache>
209 struct _Hashtable_iterator_base
b82f782b 210 {
95cefe5f
PC
211 _Hashtable_iterator_base(_Hash_node<_Value, __cache>* __node,
212 _Hash_node<_Value, __cache>** __bucket)
213 : _M_cur_node(__node), _M_cur_bucket(__bucket) { }
b82f782b
BK
214
215 void
95cefe5f 216 _M_incr()
b82f782b 217 {
95cefe5f
PC
218 _M_cur_node = _M_cur_node->_M_next;
219 if (!_M_cur_node)
220 _M_incr_bucket();
b82f782b
BK
221 }
222
223 void
95cefe5f 224 _M_incr_bucket();
b82f782b 225
95cefe5f
PC
226 _Hash_node<_Value, __cache>* _M_cur_node;
227 _Hash_node<_Value, __cache>** _M_cur_bucket;
b82f782b
BK
228 };
229
230 // Global iterators, used for arbitrary iteration within a hash
231 // table. Larger and more expensive than local iterators.
95cefe5f 232 template<typename _Value, bool __cache>
b82f782b 233 void
95cefe5f
PC
234 _Hashtable_iterator_base<_Value, __cache>::
235 _M_incr_bucket()
b82f782b 236 {
95cefe5f 237 ++_M_cur_bucket;
b82f782b
BK
238
239 // This loop requires the bucket array to have a non-null sentinel.
95cefe5f
PC
240 while (!*_M_cur_bucket)
241 ++_M_cur_bucket;
242 _M_cur_node = *_M_cur_bucket;
b82f782b
BK
243 }
244
95cefe5f 245 template<typename _Value, bool __cache>
b82f782b 246 inline bool
95cefe5f
PC
247 operator==(const _Hashtable_iterator_base<_Value, __cache>& __x,
248 const _Hashtable_iterator_base<_Value, __cache>& __y)
249 { return __x._M_cur_node == __y._M_cur_node; }
b82f782b 250
95cefe5f 251 template<typename _Value, bool __cache>
b82f782b 252 inline bool
95cefe5f
PC
253 operator!=(const _Hashtable_iterator_base<_Value, __cache>& __x,
254 const _Hashtable_iterator_base<_Value, __cache>& __y)
255 { return __x._M_cur_node != __y._M_cur_node; }
b82f782b 256
95cefe5f
PC
257 template<typename _Value, bool __constant_iterators, bool __cache>
258 struct _Hashtable_iterator
259 : public _Hashtable_iterator_base<_Value, __cache>
b82f782b 260 {
95cefe5f
PC
261 typedef _Value value_type;
262 typedef typename
263 __gnu_cxx::__conditional_type<__constant_iterators,
264 const _Value*, _Value*>::__type
b82f782b 265 pointer;
95cefe5f
PC
266 typedef typename
267 __gnu_cxx::__conditional_type<__constant_iterators,
268 const _Value&, _Value&>::__type
b82f782b
BK
269 reference;
270 typedef std::ptrdiff_t difference_type;
271 typedef std::forward_iterator_tag iterator_category;
272
95cefe5f
PC
273 _Hashtable_iterator()
274 : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
b82f782b 275
95cefe5f
PC
276 _Hashtable_iterator(_Hash_node<_Value, __cache>* __p,
277 _Hash_node<_Value, __cache>** __b)
278 : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
b82f782b
BK
279
280 explicit
95cefe5f
PC
281 _Hashtable_iterator(_Hash_node<_Value, __cache>** __b)
282 : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
b82f782b
BK
283
284 reference
285 operator*() const
95cefe5f 286 { return this->_M_cur_node->_M_v; }
b82f782b
BK
287
288 pointer
289 operator->() const
95cefe5f 290 { return &this->_M_cur_node->_M_v; }
b82f782b 291
95cefe5f 292 _Hashtable_iterator&
b82f782b
BK
293 operator++()
294 {
95cefe5f 295 this->_M_incr();
b82f782b
BK
296 return *this;
297 }
298
95cefe5f 299 _Hashtable_iterator
b82f782b
BK
300 operator++(int)
301 {
95cefe5f
PC
302 _Hashtable_iterator __tmp(*this);
303 this->_M_incr();
304 return __tmp;
b82f782b
BK
305 }
306 };
307
95cefe5f
PC
308 template<typename _Value, bool __constant_iterators, bool __cache>
309 struct _Hashtable_const_iterator
310 : public _Hashtable_iterator_base<_Value, __cache>
b82f782b 311 {
95cefe5f
PC
312 typedef _Value value_type;
313 typedef const _Value* pointer;
314 typedef const _Value& reference;
b82f782b
BK
315 typedef std::ptrdiff_t difference_type;
316 typedef std::forward_iterator_tag iterator_category;
317
95cefe5f
PC
318 _Hashtable_const_iterator()
319 : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
b82f782b 320
95cefe5f
PC
321 _Hashtable_const_iterator(_Hash_node<_Value, __cache>* __p,
322 _Hash_node<_Value, __cache>** __b)
323 : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
b82f782b
BK
324
325 explicit
95cefe5f
PC
326 _Hashtable_const_iterator(_Hash_node<_Value, __cache>** __b)
327 : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
b82f782b 328
95cefe5f
PC
329 _Hashtable_const_iterator(const _Hashtable_iterator<_Value,
330 __constant_iterators, __cache>& __x)
331 : _Hashtable_iterator_base<_Value, __cache>(__x._M_cur_node,
332 __x._M_cur_bucket) { }
b82f782b
BK
333
334 reference
335 operator*() const
95cefe5f 336 { return this->_M_cur_node->_M_v; }
b82f782b
BK
337
338 pointer
339 operator->() const
95cefe5f 340 { return &this->_M_cur_node->_M_v; }
b82f782b 341
95cefe5f 342 _Hashtable_const_iterator&
b82f782b
BK
343 operator++()
344 {
95cefe5f 345 this->_M_incr();
b82f782b
BK
346 return *this;
347 }
348
95cefe5f 349 _Hashtable_const_iterator
b82f782b
BK
350 operator++(int)
351 {
95cefe5f
PC
352 _Hashtable_const_iterator __tmp(*this);
353 this->_M_incr();
354 return __tmp;
b82f782b
BK
355 }
356 };
357
358
95cefe5f 359 // Many of class template _Hashtable's template parameters are policy
b82f782b
BK
360 // classes. These are defaults for the policies.
361
b82f782b
BK
362 // Default range hashing function: use division to fold a large number
363 // into the range [0, N).
95cefe5f 364 struct _Mod_range_hashing
b82f782b
BK
365 {
366 typedef std::size_t first_argument_type;
367 typedef std::size_t second_argument_type;
368 typedef std::size_t result_type;
369
370 result_type
95cefe5f
PC
371 operator()(first_argument_type __num, second_argument_type __den) const
372 { return __num % __den; }
b82f782b
BK
373 };
374
375 // Default ranged hash function H. In principle it should be a
376 // function object composed from objects of type H1 and H2 such that
377 // h(k, N) = h2(h1(k), N), but that would mean making extra copies of
378 // h1 and h2. So instead we'll just use a tag to tell class template
379 // hashtable to do that composition.
95cefe5f 380 struct _Default_ranged_hash { };
b82f782b
BK
381
382 // Default value for rehash policy. Bucket size is (usually) the
383 // smallest prime that keeps the load factor small enough.
95cefe5f 384 struct _Prime_rehash_policy
b82f782b 385 {
4d007574
PC
386 _Prime_rehash_policy(float __z = 1.0)
387 : _M_max_load_factor(__z), _M_growth_factor(2.f), _M_next_resize(0) { }
95cefe5f 388
b82f782b 389 float
4d007574
PC
390 max_load_factor() const
391 { return _M_max_load_factor; }
b82f782b
BK
392
393 // Return a bucket size no smaller than n.
394 std::size_t
95cefe5f 395 _M_next_bkt(std::size_t __n) const;
b82f782b
BK
396
397 // Return a bucket count appropriate for n elements
398 std::size_t
95cefe5f 399 _M_bkt_for_elements(std::size_t __n) const;
b82f782b 400
95cefe5f
PC
401 // __n_bkt is current bucket count, __n_elt is current element count,
402 // and __n_ins is number of elements to be inserted. Do we need to
b82f782b
BK
403 // increase bucket count? If so, return make_pair(true, n), where n
404 // is the new bucket count. If not, return make_pair(false, 0).
405 std::pair<bool, std::size_t>
95cefe5f
PC
406 _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
407 std::size_t __n_ins) const;
bce62343
PC
408
409 enum { _S_n_primes = sizeof(unsigned long) != 8 ? 256 : 256 + 48 };
410
95cefe5f
PC
411 float _M_max_load_factor;
412 float _M_growth_factor;
413 mutable std::size_t _M_next_resize;
b82f782b
BK
414 };
415
4d007574 416 extern const unsigned long __prime_list[];
b82f782b 417
4d007574
PC
418 // XXX This is a hack. There's no good reason for any of
419 // _Prime_rehash_policy's member functions to be inline.
b82f782b
BK
420
421 // Return a prime no smaller than n.
422 inline std::size_t
95cefe5f
PC
423 _Prime_rehash_policy::
424 _M_next_bkt(std::size_t __n) const
b82f782b 425 {
4d007574 426 const unsigned long* __p = std::lower_bound(__prime_list, __prime_list
bce62343 427 + _S_n_primes, __n);
95cefe5f
PC
428 _M_next_resize = static_cast<std::size_t>(std::ceil(*__p
429 * _M_max_load_factor));
430 return *__p;
b82f782b
BK
431 }
432
433 // Return the smallest prime p such that alpha p >= n, where alpha
434 // is the load factor.
435 inline std::size_t
95cefe5f
PC
436 _Prime_rehash_policy::
437 _M_bkt_for_elements(std::size_t __n) const
b82f782b 438 {
95cefe5f 439 const float __min_bkts = __n / _M_max_load_factor;
4d007574 440 const unsigned long* __p = std::lower_bound(__prime_list, __prime_list
bce62343 441 + _S_n_primes, __min_bkts);
95cefe5f
PC
442 _M_next_resize = static_cast<std::size_t>(std::ceil(*__p
443 * _M_max_load_factor));
444 return *__p;
b82f782b
BK
445 }
446
95cefe5f
PC
447 // Finds the smallest prime p such that alpha p > __n_elt + __n_ins.
448 // If p > __n_bkt, return make_pair(true, p); otherwise return
b82f782b 449 // make_pair(false, 0). In principle this isn't very different from
95cefe5f 450 // _M_bkt_for_elements.
4d007574 451
b82f782b
BK
452 // The only tricky part is that we're caching the element count at
453 // which we need to rehash, so we don't have to do a floating-point
454 // multiply for every insertion.
4d007574 455
b82f782b 456 inline std::pair<bool, std::size_t>
95cefe5f
PC
457 _Prime_rehash_policy::
458 _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
459 std::size_t __n_ins) const
b82f782b 460 {
95cefe5f 461 if (__n_elt + __n_ins > _M_next_resize)
b82f782b 462 {
95cefe5f
PC
463 float __min_bkts = ((float(__n_ins) + float(__n_elt))
464 / _M_max_load_factor);
465 if (__min_bkts > __n_bkt)
b82f782b 466 {
95cefe5f 467 __min_bkts = std::max(__min_bkts, _M_growth_factor * __n_bkt);
4d007574 468 const unsigned long* __p =
bce62343 469 std::lower_bound(__prime_list, __prime_list + _S_n_primes,
4d007574
PC
470 __min_bkts);
471 _M_next_resize =
95cefe5f
PC
472 static_cast<std::size_t>(std::ceil(*__p * _M_max_load_factor));
473 return std::make_pair(true, *__p);
b82f782b
BK
474 }
475 else
476 {
95cefe5f
PC
477 _M_next_resize =
478 static_cast<std::size_t>(std::ceil(__n_bkt
479 * _M_max_load_factor));
b82f782b
BK
480 return std::make_pair(false, 0);
481 }
482 }
483 else
484 return std::make_pair(false, 0);
485 }
486
95cefe5f 487 // Base classes for std::tr1::_Hashtable. We define these base
b82f782b
BK
488 // classes because in some cases we want to do different things
489 // depending on the value of a policy class. In some cases the
490 // policy class affects which member functions and nested typedefs
491 // are defined; we handle that by specializing base class templates.
492 // Several of the base class templates need to access other members
95cefe5f 493 // of class template _Hashtable, so we use the "curiously recurring
b82f782b
BK
494 // template pattern" for them.
495
95cefe5f 496 // class template _Map_base. If the hashtable has a value type of the
b82f782b
BK
497 // form pair<T1, T2> and a key extraction policy that returns the
498 // first part of the pair, the hashtable gets a mapped_type typedef.
499 // If it satisfies those criteria and also has unique keys, then it
500 // also gets an operator[].
95cefe5f
PC
501 template<typename _Key, typename _Value, typename _Ex, bool __unique,
502 typename _Hashtable>
503 struct _Map_base { };
b82f782b 504
95cefe5f
PC
505 template<typename _Key, typename _Pair, typename _Hashtable>
506 struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, false, _Hashtable>
b82f782b 507 {
95cefe5f 508 typedef typename _Pair::second_type mapped_type;
b82f782b
BK
509 };
510
95cefe5f
PC
511 template<typename _Key, typename _Pair, typename _Hashtable>
512 struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>
b82f782b 513 {
95cefe5f 514 typedef typename _Pair::second_type mapped_type;
b82f782b
BK
515
516 mapped_type&
95cefe5f 517 operator[](const _Key& __k);
b82f782b
BK
518 };
519
95cefe5f
PC
520 template<typename _Key, typename _Pair, typename _Hashtable>
521 typename _Map_base<_Key, _Pair, std::_Select1st<_Pair>,
522 true, _Hashtable>::mapped_type&
523 _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>::
524 operator[](const _Key& __k)
b82f782b 525 {
95cefe5f
PC
526 _Hashtable* __h = static_cast<_Hashtable*>(this);
527 typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
528 std::size_t __n = __h->_M_bucket_index(__k, __code,
529 __h->_M_bucket_count);
530
531 typename _Hashtable::_Node* __p =
532 __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
533 if (!__p)
534 return __h->_M_insert_bucket(std::make_pair(__k, mapped_type()),
535 __n, __code)->second;
536 return (__p->_M_v).second;
b82f782b
BK
537 }
538
95cefe5f
PC
539 // class template _Rehash_base. Give hashtable the max_load_factor
540 // functions iff the rehash policy is _Prime_rehash_policy.
541 template<typename _RehashPolicy, typename _Hashtable>
542 struct _Rehash_base { };
b82f782b 543
95cefe5f
PC
544 template<typename _Hashtable>
545 struct _Rehash_base<_Prime_rehash_policy, _Hashtable>
b82f782b
BK
546 {
547 float
548 max_load_factor() const
549 {
95cefe5f
PC
550 const _Hashtable* __this = static_cast<const _Hashtable*>(this);
551 return __this->__rehash_policy().max_load_factor();
b82f782b
BK
552 }
553
554 void
95cefe5f 555 max_load_factor(float __z)
b82f782b 556 {
95cefe5f
PC
557 _Hashtable* __this = static_cast<_Hashtable*>(this);
558 __this->__rehash_policy(_Prime_rehash_policy(__z));
b82f782b
BK
559 }
560 };
561
95cefe5f 562 // Class template _Hash_code_base. Encapsulates two policy issues that
b82f782b
BK
563 // aren't quite orthogonal.
564 // (1) the difference between using a ranged hash function and using
565 // the combination of a hash function and a range-hashing function.
566 // In the former case we don't have such things as hash codes, so
567 // we have a dummy type as placeholder.
568 // (2) Whether or not we cache hash codes. Caching hash codes is
569 // meaningless if we have a ranged hash function.
570 // We also put the key extraction and equality comparison function
571 // objects here, for convenience.
572
573 // Primary template: unused except as a hook for specializations.
95cefe5f
PC
574 template<typename _Key, typename _Value,
575 typename _ExtractKey, typename _Equal,
576 typename _H1, typename _H2, typename _Hash,
577 bool __cache_hash_code>
578 struct _Hash_code_base;
b82f782b
BK
579
580 // Specialization: ranged hash function, no caching hash codes. H1
581 // and H2 are provided but ignored. We define a dummy hash code type.
95cefe5f
PC
582 template<typename _Key, typename _Value,
583 typename _ExtractKey, typename _Equal,
584 typename _H1, typename _H2, typename _Hash>
585 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
586 _Hash, false>
b82f782b
BK
587 {
588 protected:
95cefe5f
PC
589 _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
590 const _H1&, const _H2&, const _Hash& __h)
591 : _M_extract(__ex), _M_eq(__eq), _M_ranged_hash(__h) { }
b82f782b 592
95cefe5f 593 typedef void* _Hash_code_type;
b82f782b 594
95cefe5f
PC
595 _Hash_code_type
596 _M_hash_code(const _Key& __key) const
b82f782b
BK
597 { return 0; }
598
599 std::size_t
95cefe5f
PC
600 _M_bucket_index(const _Key& __k, _Hash_code_type,
601 std::size_t __n) const
602 { return _M_ranged_hash(__k, __n); }
b82f782b
BK
603
604 std::size_t
95cefe5f
PC
605 _M_bucket_index(const _Hash_node<_Value, false>* __p,
606 std::size_t __n) const
607 { return _M_ranged_hash(_M_extract(__p->_M_v), __n); }
b82f782b
BK
608
609 bool
95cefe5f
PC
610 _M_compare(const _Key& __k, _Hash_code_type,
611 _Hash_node<_Value, false>* __n) const
612 { return _M_eq(__k, _M_extract(__n->_M_v)); }
b82f782b
BK
613
614 void
95cefe5f 615 _M_store_code(_Hash_node<_Value, false>*, _Hash_code_type) const
b82f782b
BK
616 { }
617
618 void
95cefe5f
PC
619 _M_copy_code(_Hash_node<_Value, false>*,
620 const _Hash_node<_Value, false>*) const
b82f782b
BK
621 { }
622
623 void
95cefe5f 624 _M_swap(_Hash_code_base& __x)
b82f782b 625 {
95cefe5f
PC
626 std::swap(_M_extract, __x._M_extract);
627 std::swap(_M_eq, __x._M_eq);
628 std::swap(_M_ranged_hash, __x._M_ranged_hash);
b82f782b
BK
629 }
630
631 protected:
95cefe5f
PC
632 _ExtractKey _M_extract;
633 _Equal _M_eq;
634 _Hash _M_ranged_hash;
b82f782b
BK
635 };
636
637
638 // No specialization for ranged hash function while caching hash codes.
639 // That combination is meaningless, and trying to do it is an error.
640
641
642 // Specialization: ranged hash function, cache hash codes. This
643 // combination is meaningless, so we provide only a declaration
644 // and no definition.
95cefe5f
PC
645 template<typename _Key, typename _Value,
646 typename _ExtractKey, typename _Equal,
647 typename _H1, typename _H2, typename _Hash>
648 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
649 _Hash, true>;
b82f782b
BK
650
651 // Specialization: hash function and range-hashing function, no
652 // caching of hash codes. H is provided but ignored. Provides
653 // typedef and accessor required by TR1.
95cefe5f
PC
654 template<typename _Key, typename _Value,
655 typename _ExtractKey, typename _Equal,
656 typename _H1, typename _H2>
657 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
658 _Default_ranged_hash, false>
b82f782b 659 {
95cefe5f
PC
660 typedef _H1 hasher;
661
b82f782b
BK
662 hasher
663 hash_function() const
95cefe5f 664 { return _M_h1; }
b82f782b
BK
665
666 protected:
95cefe5f
PC
667 _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
668 const _H1& __h1, const _H2& __h2,
669 const _Default_ranged_hash&)
670 : _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
b82f782b 671
95cefe5f
PC
672 typedef std::size_t _Hash_code_type;
673
674 _Hash_code_type
675 _M_hash_code(const _Key& __k) const
676 { return _M_h1(__k); }
b82f782b
BK
677
678 std::size_t
95cefe5f
PC
679 _M_bucket_index(const _Key&, _Hash_code_type __c,
680 std::size_t __n) const
681 { return _M_h2(__c, __n); }
b82f782b
BK
682
683 std::size_t
95cefe5f
PC
684 _M_bucket_index(const _Hash_node<_Value, false>* __p,
685 std::size_t __n) const
686 { return _M_h2(_M_h1(_M_extract(__p->_M_v)), __n); }
b82f782b
BK
687
688 bool
95cefe5f
PC
689 _M_compare(const _Key& __k, _Hash_code_type,
690 _Hash_node<_Value, false>* __n) const
691 { return _M_eq(__k, _M_extract(__n->_M_v)); }
b82f782b
BK
692
693 void
95cefe5f 694 _M_store_code(_Hash_node<_Value, false>*, _Hash_code_type) const
b82f782b
BK
695 { }
696
697 void
95cefe5f
PC
698 _M_copy_code(_Hash_node<_Value, false>*,
699 const _Hash_node<_Value, false>*) const
b82f782b
BK
700 { }
701
702 void
95cefe5f 703 _M_swap(_Hash_code_base& __x)
b82f782b 704 {
95cefe5f
PC
705 std::swap(_M_extract, __x._M_extract);
706 std::swap(_M_eq, __x._M_eq);
707 std::swap(_M_h1, __x._M_h1);
708 std::swap(_M_h2, __x._M_h2);
b82f782b
BK
709 }
710
711 protected:
95cefe5f
PC
712 _ExtractKey _M_extract;
713 _Equal _M_eq;
714 _H1 _M_h1;
715 _H2 _M_h2;
b82f782b
BK
716 };
717
718 // Specialization: hash function and range-hashing function,
719 // caching hash codes. H is provided but ignored. Provides
720 // typedef and accessor required by TR1.
95cefe5f
PC
721 template<typename _Key, typename _Value,
722 typename _ExtractKey, typename _Equal,
723 typename _H1, typename _H2>
724 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
725 _Default_ranged_hash, true>
b82f782b 726 {
95cefe5f 727 typedef _H1 hasher;
b82f782b
BK
728
729 hasher
730 hash_function() const
95cefe5f 731 { return _M_h1; }
b82f782b
BK
732
733 protected:
95cefe5f
PC
734 _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
735 const _H1& __h1, const _H2& __h2,
736 const _Default_ranged_hash&)
737 : _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
b82f782b 738
95cefe5f 739 typedef std::size_t _Hash_code_type;
b82f782b 740
95cefe5f
PC
741 _Hash_code_type
742 _M_hash_code(const _Key& __k) const
743 { return _M_h1(__k); }
b82f782b
BK
744
745 std::size_t
95cefe5f
PC
746 _M_bucket_index(const _Key&, _Hash_code_type __c,
747 std::size_t __n) const
748 { return _M_h2(__c, __n); }
b82f782b
BK
749
750 std::size_t
95cefe5f
PC
751 _M_bucket_index(const _Hash_node<_Value, true>* __p,
752 std::size_t __n) const
753 { return _M_h2(__p->_M_hash_code, __n); }
b82f782b
BK
754
755 bool
95cefe5f
PC
756 _M_compare(const _Key& __k, _Hash_code_type __c,
757 _Hash_node<_Value, true>* __n) const
758 { return __c == __n->_M_hash_code && _M_eq(__k, _M_extract(__n->_M_v)); }
b82f782b
BK
759
760 void
95cefe5f
PC
761 _M_store_code(_Hash_node<_Value, true>* __n, _Hash_code_type __c) const
762 { __n->_M_hash_code = __c; }
b82f782b
BK
763
764 void
95cefe5f
PC
765 _M_copy_code(_Hash_node<_Value, true>* __to,
766 const _Hash_node<_Value, true>* __from) const
767 { __to->_M_hash_code = __from->_M_hash_code; }
b82f782b
BK
768
769 void
95cefe5f 770 _M_swap(_Hash_code_base& __x)
b82f782b 771 {
95cefe5f
PC
772 std::swap(_M_extract, __x._M_extract);
773 std::swap(_M_eq, __x._M_eq);
774 std::swap(_M_h1, __x._M_h1);
775 std::swap(_M_h2, __x._M_h2);
b82f782b
BK
776 }
777
778 protected:
95cefe5f
PC
779 _ExtractKey _M_extract;
780 _Equal _M_eq;
781 _H1 _M_h1;
782 _H2 _M_h2;
b82f782b 783 };
95cefe5f 784} // namespace __detail
b82f782b 785
e133ace8
PC
786_GLIBCXX_END_NAMESPACE_TR1
787}