1 // hashtable.h header -*- C++ -*-
3 // Copyright (C) 2007-2020 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
31 #define _HASHTABLE_H 1
33 #pragma GCC system_header
35 #include <bits/hashtable_policy.h>
36 #if __cplusplus > 201402L
37 # include <bits/node_handle.h>
40 namespace std
_GLIBCXX_VISIBILITY(default)
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
44 template<typename _Tp
, typename _Hash
>
46 = __not_
<__and_
<// Do not cache for fast hasher.
47 __is_fast_hash
<_Hash
>,
48 // Mandatory to have erase not throwing.
49 __is_nothrow_invocable
<const _Hash
&, const _Tp
&>>>;
52 * Primary class template _Hashtable.
54 * @ingroup hashtable-detail
56 * @tparam _Value CopyConstructible type.
58 * @tparam _Key CopyConstructible type.
60 * @tparam _Alloc An allocator type
61 * ([lib.allocator.requirements]) whose _Alloc::value_type is
62 * _Value. As a conforming extension, we allow for
63 * _Alloc::value_type != _Value.
65 * @tparam _ExtractKey Function object that takes an object of type
66 * _Value and returns a value of type _Key.
68 * @tparam _Equal Function object that takes two objects of type k
69 * and returns a bool-like value that is true if the two objects
70 * are considered equal.
72 * @tparam _H1 The hash function. A unary function object with
73 * argument type _Key and result type size_t. Return values should
74 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
76 * @tparam _H2 The range-hashing function (in the terminology of
77 * Tavori and Dreizin). A binary function object whose argument
78 * types and result type are all size_t. Given arguments r and N,
79 * the return value is in the range [0, N).
81 * @tparam _Hash The ranged hash function (Tavori and Dreizin). A
82 * binary function whose argument types are _Key and size_t and
83 * whose result type is size_t. Given arguments k and N, the
84 * return value is in the range [0, N). Default: hash(k, N) =
85 * h2(h1(k), N). If _Hash is anything other than the default, _H1
86 * and _H2 are ignored.
88 * @tparam _RehashPolicy Policy class with three members, all of
89 * which govern the bucket count. _M_next_bkt(n) returns a bucket
90 * count no smaller than n. _M_bkt_for_elements(n) returns a
91 * bucket count appropriate for an element count of n.
92 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
93 * current bucket count is n_bkt and the current element count is
94 * n_elt, we need to increase the bucket count. If so, returns
95 * make_pair(true, n), where n is the new bucket count. If not,
96 * returns make_pair(false, <anything>)
98 * @tparam _Traits Compile-time class with three boolean
99 * std::integral_constant members: __cache_hash_code, __constant_iterators,
102 * Each _Hashtable data structure has:
104 * - _Bucket[] _M_buckets
105 * - _Hash_node_base _M_before_begin
106 * - size_type _M_bucket_count
107 * - size_type _M_element_count
109 * with _Bucket being _Hash_node* and _Hash_node containing:
111 * - _Hash_node* _M_next
113 * - size_t _M_hash_code if cache_hash_code is true
115 * In terms of Standard containers the hashtable is like the aggregation of:
117 * - std::forward_list<_Node> containing the elements
118 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
120 * The non-empty buckets contain the node before the first node in the
121 * bucket. This design makes it possible to implement something like a
122 * std::forward_list::insert_after on container insertion and
123 * std::forward_list::erase_after on container erase
124 * calls. _M_before_begin is equivalent to
125 * std::forward_list::before_begin. Empty buckets contain
126 * nullptr. Note that one of the non-empty buckets contains
127 * &_M_before_begin which is not a dereferenceable node so the
128 * node pointer in a bucket shall never be dereferenced, only its
131 * Walking through a bucket's nodes requires a check on the hash code to
132 * see if each node is still in the bucket. Such a design assumes a
133 * quite efficient hash functor and is one of the reasons it is
134 * highly advisable to set __cache_hash_code to true.
136 * The container iterators are simply built from nodes. This way
137 * incrementing the iterator is perfectly efficient independent of
138 * how many empty buckets there are in the container.
140 * On insert we compute the element's hash code and use it to find the
141 * bucket index. If the element must be inserted in an empty bucket
142 * we add it at the beginning of the singly linked list and make the
143 * bucket point to _M_before_begin. The bucket that used to point to
144 * _M_before_begin, if any, is updated to point to its new before
147 * On erase, the simple iterator design requires using the hash
148 * functor to get the index of the bucket to update. For this
149 * reason, when __cache_hash_code is set to false the hash functor must
150 * not throw and this is enforced by a static assertion.
152 * Functionality is implemented by decomposition into base classes,
153 * where the derived _Hashtable class is used in _Map_base,
154 * _Insert, _Rehash_base, and _Equality base classes to access the
155 * "this" pointer. _Hashtable_base is used in the base classes as a
156 * non-recursive, fully-completed-type so that detailed nested type
157 * information, such as iterator type and node type, can be
158 * used. This is similar to the "Curiously Recurring Template
159 * Pattern" (CRTP) technique, but uses a reconstructed, not
160 * explicitly passed, template pattern.
162 * Base class templates are:
163 * - __detail::_Hashtable_base
164 * - __detail::_Map_base
165 * - __detail::_Insert
166 * - __detail::_Rehash_base
167 * - __detail::_Equality
169 template<typename _Key
, typename _Value
, typename _Alloc
,
170 typename _ExtractKey
, typename _Equal
,
171 typename _H1
, typename _H2
, typename _Hash
,
172 typename _RehashPolicy
, typename _Traits
>
174 : public __detail::_Hashtable_base
<_Key
, _Value
, _ExtractKey
, _Equal
,
175 _H1
, _H2
, _Hash
, _Traits
>,
176 public __detail::_Map_base
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
177 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>,
178 public __detail::_Insert
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
179 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>,
180 public __detail::_Rehash_base
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
181 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>,
182 public __detail::_Equality
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
183 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>,
184 private __detail::_Hashtable_alloc
<
185 __alloc_rebind
<_Alloc
,
186 __detail::_Hash_node
<_Value
,
187 _Traits::__hash_cached::value
>>>
189 static_assert(is_same
<typename remove_cv
<_Value
>::type
, _Value
>::value
,
190 "unordered container must have a non-const, non-volatile value_type");
191 #if __cplusplus > 201703L || defined __STRICT_ANSI__
192 static_assert(is_same
<typename
_Alloc::value_type
, _Value
>{},
193 "unordered container must have the same value_type as its allocator");
196 using __traits_type
= _Traits
;
197 using __hash_cached
= typename
__traits_type::__hash_cached
;
198 using __node_type
= __detail::_Hash_node
<_Value
, __hash_cached::value
>;
199 using __node_alloc_type
= __alloc_rebind
<_Alloc
, __node_type
>;
201 using __hashtable_alloc
= __detail::_Hashtable_alloc
<__node_alloc_type
>;
203 using __value_alloc_traits
=
204 typename
__hashtable_alloc::__value_alloc_traits
;
205 using __node_alloc_traits
=
206 typename
__hashtable_alloc::__node_alloc_traits
;
207 using __node_base
= typename
__hashtable_alloc::__node_base
;
208 using __bucket_type
= typename
__hashtable_alloc::__bucket_type
;
211 typedef _Key key_type
;
212 typedef _Value value_type
;
213 typedef _Alloc allocator_type
;
214 typedef _Equal key_equal
;
216 // mapped_type, if present, comes from _Map_base.
217 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
218 typedef typename
__value_alloc_traits::pointer pointer
;
219 typedef typename
__value_alloc_traits::const_pointer const_pointer
;
220 typedef value_type
& reference
;
221 typedef const value_type
& const_reference
;
224 using __rehash_type
= _RehashPolicy
;
225 using __rehash_state
= typename
__rehash_type::_State
;
227 using __constant_iterators
= typename
__traits_type::__constant_iterators
;
228 using __unique_keys
= typename
__traits_type::__unique_keys
;
230 using __key_extract
= typename
std::conditional
<
231 __constant_iterators::value
,
233 __detail::_Select1st
>::type
;
235 using __hashtable_base
= __detail::
236 _Hashtable_base
<_Key
, _Value
, _ExtractKey
,
237 _Equal
, _H1
, _H2
, _Hash
, _Traits
>;
239 using __hash_code_base
= typename
__hashtable_base::__hash_code_base
;
240 using __hash_code
= typename
__hashtable_base::__hash_code
;
241 using __ireturn_type
= typename
__hashtable_base::__ireturn_type
;
243 using __map_base
= __detail::_Map_base
<_Key
, _Value
, _Alloc
, _ExtractKey
,
244 _Equal
, _H1
, _H2
, _Hash
,
245 _RehashPolicy
, _Traits
>;
247 using __rehash_base
= __detail::_Rehash_base
<_Key
, _Value
, _Alloc
,
250 _RehashPolicy
, _Traits
>;
252 using __eq_base
= __detail::_Equality
<_Key
, _Value
, _Alloc
, _ExtractKey
,
253 _Equal
, _H1
, _H2
, _Hash
,
254 _RehashPolicy
, _Traits
>;
256 using __reuse_or_alloc_node_gen_t
=
257 __detail::_ReuseOrAllocNode
<__node_alloc_type
>;
258 using __alloc_node_gen_t
=
259 __detail::_AllocNode
<__node_alloc_type
>;
261 // Simple RAII type for managing a node containing an element
264 // Take ownership of a node with a constructed element.
265 _Scoped_node(__node_type
* __n
, __hashtable_alloc
* __h
)
266 : _M_h(__h
), _M_node(__n
) { }
268 // Allocate a node and construct an element within it.
269 template<typename
... _Args
>
270 _Scoped_node(__hashtable_alloc
* __h
, _Args
&&... __args
)
272 _M_node(__h
->_M_allocate_node(std::forward
<_Args
>(__args
)...))
275 // Destroy element and deallocate node.
276 ~_Scoped_node() { if (_M_node
) _M_h
->_M_deallocate_node(_M_node
); };
278 _Scoped_node(const _Scoped_node
&) = delete;
279 _Scoped_node
& operator=(const _Scoped_node
&) = delete;
281 __hashtable_alloc
* _M_h
;
282 __node_type
* _M_node
;
285 template<typename _Ht
>
287 typename conditional
<std::is_lvalue_reference
<_Ht
>::value
,
288 const value_type
&, value_type
&&>::type
289 __fwd_value_for(value_type
& __val
) noexcept
290 { return std::move(__val
); }
292 // Metaprogramming for picking apart hash caching.
293 template<typename _Cond
>
294 using __if_hash_cached
= __or_
<__not_
<__hash_cached
>, _Cond
>;
296 template<typename _Cond
>
297 using __if_hash_not_cached
= __or_
<__hash_cached
, _Cond
>;
299 // Compile-time diagnostics.
301 // _Hash_code_base has everything protected, so use this derived type to
303 struct __hash_code_base_access
: __hash_code_base
304 { using __hash_code_base::_M_bucket_index
; };
306 // Getting a bucket index from a node shall not throw because it is used
307 // in methods (erase, swap...) that shall not throw.
308 static_assert(noexcept(declval
<const __hash_code_base_access
&>()
309 ._M_bucket_index((const __node_type
*)nullptr,
311 "Cache the hash code or qualify your functors involved"
312 " in hash code and bucket index computation with noexcept");
314 // When hash codes are cached local iterator inherits from H2 functor
315 // which must then be default constructible.
316 static_assert(__if_hash_cached
<is_default_constructible
<_H2
>>::value
,
317 "Functor used to map hash code to bucket index"
318 " must be default constructible");
320 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
321 typename _ExtractKeya
, typename _Equala
,
322 typename _H1a
, typename _H2a
, typename _Hasha
,
323 typename _RehashPolicya
, typename _Traitsa
,
325 friend struct __detail::_Map_base
;
327 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
328 typename _ExtractKeya
, typename _Equala
,
329 typename _H1a
, typename _H2a
, typename _Hasha
,
330 typename _RehashPolicya
, typename _Traitsa
>
331 friend struct __detail::_Insert_base
;
333 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
334 typename _ExtractKeya
, typename _Equala
,
335 typename _H1a
, typename _H2a
, typename _Hasha
,
336 typename _RehashPolicya
, typename _Traitsa
,
337 bool _Constant_iteratorsa
>
338 friend struct __detail::_Insert
;
340 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
341 typename _ExtractKeya
, typename _Equala
,
342 typename _H1a
, typename _H2a
, typename _Hasha
,
343 typename _RehashPolicya
, typename _Traitsa
,
345 friend struct __detail::_Equality
;
348 using size_type
= typename
__hashtable_base::size_type
;
349 using difference_type
= typename
__hashtable_base::difference_type
;
351 using iterator
= typename
__hashtable_base::iterator
;
352 using const_iterator
= typename
__hashtable_base::const_iterator
;
354 using local_iterator
= typename
__hashtable_base::local_iterator
;
355 using const_local_iterator
= typename
__hashtable_base::
356 const_local_iterator
;
358 #if __cplusplus > 201402L
359 using node_type
= _Node_handle
<_Key
, _Value
, __node_alloc_type
>;
360 using insert_return_type
= _Node_insert_return
<iterator
, node_type
>;
364 __bucket_type
* _M_buckets
= &_M_single_bucket
;
365 size_type _M_bucket_count
= 1;
366 __node_base _M_before_begin
;
367 size_type _M_element_count
= 0;
368 _RehashPolicy _M_rehash_policy
;
370 // A single bucket used when only need for 1 bucket. Especially
371 // interesting in move semantic to leave hashtable with only 1 bucket
372 // which is not allocated so that we can have those operations noexcept
374 // Note that we can't leave hashtable with 0 bucket without adding
375 // numerous checks in the code to avoid 0 modulus.
376 __bucket_type _M_single_bucket
= nullptr;
379 _M_uses_single_bucket(__bucket_type
* __bkts
) const
380 { return __builtin_expect(__bkts
== &_M_single_bucket
, false); }
383 _M_uses_single_bucket() const
384 { return _M_uses_single_bucket(_M_buckets
); }
387 _M_base_alloc() { return *this; }
390 _M_allocate_buckets(size_type __bkt_count
)
392 if (__builtin_expect(__bkt_count
== 1, false))
394 _M_single_bucket
= nullptr;
395 return &_M_single_bucket
;
398 return __hashtable_alloc::_M_allocate_buckets(__bkt_count
);
402 _M_deallocate_buckets(__bucket_type
* __bkts
, size_type __bkt_count
)
404 if (_M_uses_single_bucket(__bkts
))
407 __hashtable_alloc::_M_deallocate_buckets(__bkts
, __bkt_count
);
411 _M_deallocate_buckets()
412 { _M_deallocate_buckets(_M_buckets
, _M_bucket_count
); }
414 // Gets bucket begin, deals with the fact that non-empty buckets contain
415 // their before begin node.
417 _M_bucket_begin(size_type __bkt
) const;
421 { return static_cast<__node_type
*>(_M_before_begin
._M_nxt
); }
423 // Assign *this using another _Hashtable instance. Whether elements
424 // are copied or moved depends on the _Ht reference.
425 template<typename _Ht
>
427 _M_assign_elements(_Ht
&&);
429 template<typename _Ht
, typename _NodeGenerator
>
431 _M_assign(_Ht
&&, const _NodeGenerator
&);
434 _M_move_assign(_Hashtable
&&, true_type
);
437 _M_move_assign(_Hashtable
&&, false_type
);
442 _Hashtable(const _H1
& __h1
, const _H2
& __h2
, const _Hash
& __h
,
443 const _Equal
& __eq
, const _ExtractKey
& __exk
,
444 const allocator_type
& __a
)
445 : __hashtable_base(__exk
, __h1
, __h2
, __h
, __eq
),
446 __hashtable_alloc(__node_alloc_type(__a
))
450 // Constructor, destructor, assignment, swap
451 _Hashtable() = default;
452 _Hashtable(size_type __bkt_count_hint
,
453 const _H1
&, const _H2
&, const _Hash
&,
454 const _Equal
&, const _ExtractKey
&,
455 const allocator_type
&);
457 template<typename _InputIterator
>
458 _Hashtable(_InputIterator __first
, _InputIterator __last
,
459 size_type __bkt_count_hint
,
460 const _H1
&, const _H2
&, const _Hash
&,
461 const _Equal
&, const _ExtractKey
&,
462 const allocator_type
&);
464 _Hashtable(const _Hashtable
&);
466 _Hashtable(_Hashtable
&&) noexcept
;
468 _Hashtable(const _Hashtable
&, const allocator_type
&);
470 _Hashtable(_Hashtable
&&, const allocator_type
&);
472 // Use delegating constructors.
474 _Hashtable(const allocator_type
& __a
)
475 : __hashtable_alloc(__node_alloc_type(__a
))
479 _Hashtable(size_type __bkt_count_hint
,
480 const _H1
& __hf
= _H1(),
481 const key_equal
& __eql
= key_equal(),
482 const allocator_type
& __a
= allocator_type())
483 : _Hashtable(__bkt_count_hint
, __hf
, _H2(), _Hash(), __eql
,
484 __key_extract(), __a
)
487 template<typename _InputIterator
>
488 _Hashtable(_InputIterator __f
, _InputIterator __l
,
489 size_type __bkt_count_hint
= 0,
490 const _H1
& __hf
= _H1(),
491 const key_equal
& __eql
= key_equal(),
492 const allocator_type
& __a
= allocator_type())
493 : _Hashtable(__f
, __l
, __bkt_count_hint
, __hf
, _H2(), _Hash(), __eql
,
494 __key_extract(), __a
)
497 _Hashtable(initializer_list
<value_type
> __l
,
498 size_type __bkt_count_hint
= 0,
499 const _H1
& __hf
= _H1(),
500 const key_equal
& __eql
= key_equal(),
501 const allocator_type
& __a
= allocator_type())
502 : _Hashtable(__l
.begin(), __l
.end(), __bkt_count_hint
,
503 __hf
, _H2(), _Hash(), __eql
,
504 __key_extract(), __a
)
508 operator=(const _Hashtable
& __ht
);
511 operator=(_Hashtable
&& __ht
)
512 noexcept(__node_alloc_traits::_S_nothrow_move()
513 && is_nothrow_move_assignable
<_H1
>::value
514 && is_nothrow_move_assignable
<_Equal
>::value
)
516 constexpr bool __move_storage
=
517 __node_alloc_traits::_S_propagate_on_move_assign()
518 || __node_alloc_traits::_S_always_equal();
519 _M_move_assign(std::move(__ht
), __bool_constant
<__move_storage
>());
524 operator=(initializer_list
<value_type
> __l
)
526 __reuse_or_alloc_node_gen_t
__roan(_M_begin(), *this);
527 _M_before_begin
._M_nxt
= nullptr;
529 this->_M_insert_range(__l
.begin(), __l
.end(), __roan
, __unique_keys());
533 ~_Hashtable() noexcept
;
537 noexcept(__and_
<__is_nothrow_swappable
<_H1
>,
538 __is_nothrow_swappable
<_Equal
>>::value
);
540 // Basic container operations
543 { return iterator(_M_begin()); }
546 begin() const noexcept
547 { return const_iterator(_M_begin()); }
551 { return iterator(nullptr); }
555 { return const_iterator(nullptr); }
558 cbegin() const noexcept
559 { return const_iterator(_M_begin()); }
562 cend() const noexcept
563 { return const_iterator(nullptr); }
566 size() const noexcept
567 { return _M_element_count
; }
569 _GLIBCXX_NODISCARD
bool
570 empty() const noexcept
571 { return size() == 0; }
574 get_allocator() const noexcept
575 { return allocator_type(this->_M_node_allocator()); }
578 max_size() const noexcept
579 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
584 { return this->_M_eq(); }
586 // hash_function, if present, comes from _Hash_code_base.
590 bucket_count() const noexcept
591 { return _M_bucket_count
; }
594 max_bucket_count() const noexcept
595 { return max_size(); }
598 bucket_size(size_type __bkt
) const
599 { return std::distance(begin(__bkt
), end(__bkt
)); }
602 bucket(const key_type
& __k
) const
603 { return _M_bucket_index(__k
, this->_M_hash_code(__k
)); }
606 begin(size_type __bkt
)
608 return local_iterator(*this, _M_bucket_begin(__bkt
),
609 __bkt
, _M_bucket_count
);
614 { return local_iterator(*this, nullptr, __bkt
, _M_bucket_count
); }
617 begin(size_type __bkt
) const
619 return const_local_iterator(*this, _M_bucket_begin(__bkt
),
620 __bkt
, _M_bucket_count
);
624 end(size_type __bkt
) const
625 { return const_local_iterator(*this, nullptr, __bkt
, _M_bucket_count
); }
629 cbegin(size_type __bkt
) const
631 return const_local_iterator(*this, _M_bucket_begin(__bkt
),
632 __bkt
, _M_bucket_count
);
636 cend(size_type __bkt
) const
637 { return const_local_iterator(*this, nullptr, __bkt
, _M_bucket_count
); }
640 load_factor() const noexcept
642 return static_cast<float>(size()) / static_cast<float>(bucket_count());
645 // max_load_factor, if present, comes from _Rehash_base.
647 // Generalization of max_load_factor. Extension, not found in
648 // TR1. Only useful if _RehashPolicy is something other than
651 __rehash_policy() const
652 { return _M_rehash_policy
; }
655 __rehash_policy(const _RehashPolicy
& __pol
)
656 { _M_rehash_policy
= __pol
; }
660 find(const key_type
& __k
);
663 find(const key_type
& __k
) const;
666 count(const key_type
& __k
) const;
668 std::pair
<iterator
, iterator
>
669 equal_range(const key_type
& __k
);
671 std::pair
<const_iterator
, const_iterator
>
672 equal_range(const key_type
& __k
) const;
675 // Bucket index computation helpers.
677 _M_bucket_index(__node_type
* __n
) const noexcept
678 { return __hash_code_base::_M_bucket_index(__n
, _M_bucket_count
); }
681 _M_bucket_index(const key_type
& __k
, __hash_code __c
) const
682 { return __hash_code_base::_M_bucket_index(__k
, __c
, _M_bucket_count
); }
684 // Find and insert helper functions and types
685 // Find the node before the one matching the criteria.
687 _M_find_before_node(size_type
, const key_type
&, __hash_code
) const;
690 _M_find_node(size_type __bkt
, const key_type
& __key
,
691 __hash_code __c
) const
693 __node_base
* __before_n
= _M_find_before_node(__bkt
, __key
, __c
);
695 return static_cast<__node_type
*>(__before_n
->_M_nxt
);
699 // Insert a node at the beginning of a bucket.
701 _M_insert_bucket_begin(size_type
, __node_type
*);
703 // Remove the bucket first node
705 _M_remove_bucket_begin(size_type __bkt
, __node_type
* __next_n
,
706 size_type __next_bkt
);
708 // Get the node before __n in the bucket __bkt
710 _M_get_previous_node(size_type __bkt
, __node_base
* __n
);
712 // Insert node __n with key __k and hash code __code, in bucket __bkt
713 // if no rehash (assumes no element with same key already present).
714 // Takes ownership of __n if insertion succeeds, throws otherwise.
716 _M_insert_unique_node(const key_type
& __k
, size_type __bkt
,
717 __hash_code __code
, __node_type
* __n
,
718 size_type __n_elt
= 1);
720 // Insert node __n with key __k and hash code __code.
721 // Takes ownership of __n if insertion succeeds, throws otherwise.
723 _M_insert_multi_node(__node_type
* __hint
, const key_type
& __k
,
724 __hash_code __code
, __node_type
* __n
);
726 template<typename
... _Args
>
727 std::pair
<iterator
, bool>
728 _M_emplace(true_type
, _Args
&&... __args
);
730 template<typename
... _Args
>
732 _M_emplace(false_type __uk
, _Args
&&... __args
)
733 { return _M_emplace(cend(), __uk
, std::forward
<_Args
>(__args
)...); }
735 // Emplace with hint, useless when keys are unique.
736 template<typename
... _Args
>
738 _M_emplace(const_iterator
, true_type __uk
, _Args
&&... __args
)
739 { return _M_emplace(__uk
, std::forward
<_Args
>(__args
)...).first
; }
741 template<typename
... _Args
>
743 _M_emplace(const_iterator
, false_type
, _Args
&&... __args
);
745 template<typename _Arg
, typename _NodeGenerator
>
746 std::pair
<iterator
, bool>
747 _M_insert(_Arg
&&, const _NodeGenerator
&, true_type
, size_type
= 1);
749 template<typename _Arg
, typename _NodeGenerator
>
751 _M_insert(_Arg
&& __arg
, const _NodeGenerator
& __node_gen
,
754 return _M_insert(cend(), std::forward
<_Arg
>(__arg
), __node_gen
,
758 // Insert with hint, not used when keys are unique.
759 template<typename _Arg
, typename _NodeGenerator
>
761 _M_insert(const_iterator
, _Arg
&& __arg
,
762 const _NodeGenerator
& __node_gen
, true_type __uk
)
765 _M_insert(std::forward
<_Arg
>(__arg
), __node_gen
, __uk
).first
;
768 // Insert with hint when keys are not unique.
769 template<typename _Arg
, typename _NodeGenerator
>
771 _M_insert(const_iterator
, _Arg
&&,
772 const _NodeGenerator
&, false_type
);
775 _M_erase(true_type
, const key_type
&);
778 _M_erase(false_type
, const key_type
&);
781 _M_erase(size_type __bkt
, __node_base
* __prev_n
, __node_type
* __n
);
785 template<typename
... _Args
>
787 emplace(_Args
&&... __args
)
788 { return _M_emplace(__unique_keys(), std::forward
<_Args
>(__args
)...); }
790 template<typename
... _Args
>
792 emplace_hint(const_iterator __hint
, _Args
&&... __args
)
794 return _M_emplace(__hint
, __unique_keys(),
795 std::forward
<_Args
>(__args
)...);
798 // Insert member functions via inheritance.
802 erase(const_iterator
);
807 { return erase(const_iterator(__it
)); }
810 erase(const key_type
& __k
)
811 { return _M_erase(__unique_keys(), __k
); }
814 erase(const_iterator
, const_iterator
);
819 // Set number of buckets keeping it appropriate for container's number
821 void rehash(size_type __bkt_count
);
824 // reserve, if present, comes from _Rehash_base.
826 #if __cplusplus > 201402L
827 /// Re-insert an extracted node into a container with unique keys.
829 _M_reinsert_node(node_type
&& __nh
)
831 insert_return_type __ret
;
833 __ret
.position
= end();
836 __glibcxx_assert(get_allocator() == __nh
.get_allocator());
838 const key_type
& __k
= __nh
._M_key();
839 __hash_code __code
= this->_M_hash_code(__k
);
840 size_type __bkt
= _M_bucket_index(__k
, __code
);
841 if (__node_type
* __n
= _M_find_node(__bkt
, __k
, __code
))
843 __ret
.node
= std::move(__nh
);
844 __ret
.position
= iterator(__n
);
845 __ret
.inserted
= false;
850 = _M_insert_unique_node(__k
, __bkt
, __code
, __nh
._M_ptr
);
851 __nh
._M_ptr
= nullptr;
852 __ret
.inserted
= true;
858 /// Re-insert an extracted node into a container with equivalent keys.
860 _M_reinsert_node_multi(const_iterator __hint
, node_type
&& __nh
)
865 __glibcxx_assert(get_allocator() == __nh
.get_allocator());
867 const key_type
& __k
= __nh
._M_key();
868 auto __code
= this->_M_hash_code(__k
);
870 = _M_insert_multi_node(__hint
._M_cur
, __k
, __code
, __nh
._M_ptr
);
871 __nh
._M_ptr
= nullptr;
877 _M_extract_node(size_t __bkt
, __node_base
* __prev_n
)
879 __node_type
* __n
= static_cast<__node_type
*>(__prev_n
->_M_nxt
);
880 if (__prev_n
== _M_buckets
[__bkt
])
881 _M_remove_bucket_begin(__bkt
, __n
->_M_next(),
882 __n
->_M_nxt
? _M_bucket_index(__n
->_M_next()) : 0);
883 else if (__n
->_M_nxt
)
885 size_type __next_bkt
= _M_bucket_index(__n
->_M_next());
886 if (__next_bkt
!= __bkt
)
887 _M_buckets
[__next_bkt
] = __prev_n
;
890 __prev_n
->_M_nxt
= __n
->_M_nxt
;
891 __n
->_M_nxt
= nullptr;
893 return { __n
, this->_M_node_allocator() };
899 extract(const_iterator __pos
)
901 size_t __bkt
= _M_bucket_index(__pos
._M_cur
);
902 return _M_extract_node(__bkt
,
903 _M_get_previous_node(__bkt
, __pos
._M_cur
));
908 extract(const _Key
& __k
)
911 __hash_code __code
= this->_M_hash_code(__k
);
912 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
913 if (__node_base
* __prev_node
= _M_find_before_node(__bkt
, __k
, __code
))
914 __nh
= _M_extract_node(__bkt
, __prev_node
);
918 /// Merge from a compatible container into one with unique keys.
919 template<typename _Compatible_Hashtable
>
921 _M_merge_unique(_Compatible_Hashtable
& __src
) noexcept
923 static_assert(is_same_v
<typename
_Compatible_Hashtable::node_type
,
924 node_type
>, "Node types are compatible");
925 __glibcxx_assert(get_allocator() == __src
.get_allocator());
927 auto __n_elt
= __src
.size();
928 for (auto __i
= __src
.begin(), __end
= __src
.end(); __i
!= __end
;)
931 const key_type
& __k
= this->_M_extract()(*__pos
);
932 __hash_code __code
= this->_M_hash_code(__k
);
933 size_type __bkt
= _M_bucket_index(__k
, __code
);
934 if (_M_find_node(__bkt
, __k
, __code
) == nullptr)
936 auto __nh
= __src
.extract(__pos
);
937 _M_insert_unique_node(__k
, __bkt
, __code
, __nh
._M_ptr
,
939 __nh
._M_ptr
= nullptr;
942 else if (__n_elt
!= 1)
947 /// Merge from a compatible container into one with equivalent keys.
948 template<typename _Compatible_Hashtable
>
950 _M_merge_multi(_Compatible_Hashtable
& __src
) noexcept
952 static_assert(is_same_v
<typename
_Compatible_Hashtable::node_type
,
953 node_type
>, "Node types are compatible");
954 __glibcxx_assert(get_allocator() == __src
.get_allocator());
956 this->reserve(size() + __src
.size());
957 for (auto __i
= __src
.begin(), __end
= __src
.end(); __i
!= __end
;)
958 _M_reinsert_node_multi(cend(), __src
.extract(__i
++));
963 // Helper rehash method used when keys are unique.
964 void _M_rehash_aux(size_type __bkt_count
, true_type
);
966 // Helper rehash method used when keys can be non-unique.
967 void _M_rehash_aux(size_type __bkt_count
, false_type
);
969 // Unconditionally change size of bucket array to n, restore
970 // hash policy state to __state on exception.
971 void _M_rehash(size_type __bkt_count
, const __rehash_state
& __state
);
975 // Definitions of class template _Hashtable's out-of-line member functions.
976 template<typename _Key
, typename _Value
,
977 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
978 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
981 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
982 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
983 _M_bucket_begin(size_type __bkt
) const
986 __node_base
* __n
= _M_buckets
[__bkt
];
987 return __n
? static_cast<__node_type
*>(__n
->_M_nxt
) : nullptr;
990 template<typename _Key
, typename _Value
,
991 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
992 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
994 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
995 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
996 _Hashtable(size_type __bkt_count_hint
,
997 const _H1
& __h1
, const _H2
& __h2
, const _Hash
& __h
,
998 const _Equal
& __eq
, const _ExtractKey
& __exk
,
999 const allocator_type
& __a
)
1000 : _Hashtable(__h1
, __h2
, __h
, __eq
, __exk
, __a
)
1002 auto __bkt_count
= _M_rehash_policy
._M_next_bkt(__bkt_count_hint
);
1003 if (__bkt_count
> _M_bucket_count
)
1005 _M_buckets
= _M_allocate_buckets(__bkt_count
);
1006 _M_bucket_count
= __bkt_count
;
1010 template<typename _Key
, typename _Value
,
1011 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1012 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1014 template<typename _InputIterator
>
1015 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1016 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1017 _Hashtable(_InputIterator __f
, _InputIterator __l
,
1018 size_type __bkt_count_hint
,
1019 const _H1
& __h1
, const _H2
& __h2
, const _Hash
& __h
,
1020 const _Equal
& __eq
, const _ExtractKey
& __exk
,
1021 const allocator_type
& __a
)
1022 : _Hashtable(__h1
, __h2
, __h
, __eq
, __exk
, __a
)
1024 auto __nb_elems
= __detail::__distance_fw(__f
, __l
);
1026 _M_rehash_policy
._M_next_bkt(
1027 std::max(_M_rehash_policy
._M_bkt_for_elements(__nb_elems
),
1030 if (__bkt_count
> _M_bucket_count
)
1032 _M_buckets
= _M_allocate_buckets(__bkt_count
);
1033 _M_bucket_count
= __bkt_count
;
1036 for (; __f
!= __l
; ++__f
)
1040 template<typename _Key
, typename _Value
,
1041 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1042 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1045 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1046 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1047 operator=(const _Hashtable
& __ht
)
1053 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1055 auto& __this_alloc
= this->_M_node_allocator();
1056 auto& __that_alloc
= __ht
._M_node_allocator();
1057 if (!__node_alloc_traits::_S_always_equal()
1058 && __this_alloc
!= __that_alloc
)
1060 // Replacement allocator cannot free existing storage.
1061 this->_M_deallocate_nodes(_M_begin());
1062 _M_before_begin
._M_nxt
= nullptr;
1063 _M_deallocate_buckets();
1064 _M_buckets
= nullptr;
1065 std::__alloc_on_copy(__this_alloc
, __that_alloc
);
1066 __hashtable_base::operator=(__ht
);
1067 _M_bucket_count
= __ht
._M_bucket_count
;
1068 _M_element_count
= __ht
._M_element_count
;
1069 _M_rehash_policy
= __ht
._M_rehash_policy
;
1070 __alloc_node_gen_t
__alloc_node_gen(*this);
1073 _M_assign(__ht
, __alloc_node_gen
);
1077 // _M_assign took care of deallocating all memory. Now we
1078 // must make sure this instance remains in a usable state.
1080 __throw_exception_again
;
1084 std::__alloc_on_copy(__this_alloc
, __that_alloc
);
1087 // Reuse allocated buckets and nodes.
1088 _M_assign_elements(__ht
);
1092 template<typename _Key
, typename _Value
,
1093 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1094 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1096 template<typename _Ht
>
1098 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1099 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1100 _M_assign_elements(_Ht
&& __ht
)
1102 __bucket_type
* __former_buckets
= nullptr;
1103 std::size_t __former_bucket_count
= _M_bucket_count
;
1104 const __rehash_state
& __former_state
= _M_rehash_policy
._M_state();
1106 if (_M_bucket_count
!= __ht
._M_bucket_count
)
1108 __former_buckets
= _M_buckets
;
1109 _M_buckets
= _M_allocate_buckets(__ht
._M_bucket_count
);
1110 _M_bucket_count
= __ht
._M_bucket_count
;
1113 __builtin_memset(_M_buckets
, 0,
1114 _M_bucket_count
* sizeof(__bucket_type
));
1118 __hashtable_base::operator=(std::forward
<_Ht
>(__ht
));
1119 _M_element_count
= __ht
._M_element_count
;
1120 _M_rehash_policy
= __ht
._M_rehash_policy
;
1121 __reuse_or_alloc_node_gen_t
__roan(_M_begin(), *this);
1122 _M_before_begin
._M_nxt
= nullptr;
1123 _M_assign(std::forward
<_Ht
>(__ht
), __roan
);
1124 if (__former_buckets
)
1125 _M_deallocate_buckets(__former_buckets
, __former_bucket_count
);
1129 if (__former_buckets
)
1131 // Restore previous buckets.
1132 _M_deallocate_buckets();
1133 _M_rehash_policy
._M_reset(__former_state
);
1134 _M_buckets
= __former_buckets
;
1135 _M_bucket_count
= __former_bucket_count
;
1137 __builtin_memset(_M_buckets
, 0,
1138 _M_bucket_count
* sizeof(__bucket_type
));
1139 __throw_exception_again
;
1143 template<typename _Key
, typename _Value
,
1144 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1145 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1147 template<typename _Ht
, typename _NodeGenerator
>
1149 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1150 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1151 _M_assign(_Ht
&& __ht
, const _NodeGenerator
& __node_gen
)
1153 __bucket_type
* __buckets
= nullptr;
1155 _M_buckets
= __buckets
= _M_allocate_buckets(_M_bucket_count
);
1159 if (!__ht
._M_before_begin
._M_nxt
)
1162 // First deal with the special first node pointed to by
1164 __node_type
* __ht_n
= __ht
._M_begin();
1165 __node_type
* __this_n
1166 = __node_gen(__fwd_value_for
<_Ht
>(__ht_n
->_M_v()));
1167 this->_M_copy_code(__this_n
, __ht_n
);
1168 _M_before_begin
._M_nxt
= __this_n
;
1169 _M_buckets
[_M_bucket_index(__this_n
)] = &_M_before_begin
;
1171 // Then deal with other nodes.
1172 __node_base
* __prev_n
= __this_n
;
1173 for (__ht_n
= __ht_n
->_M_next(); __ht_n
; __ht_n
= __ht_n
->_M_next())
1175 __this_n
= __node_gen(__fwd_value_for
<_Ht
>(__ht_n
->_M_v()));
1176 __prev_n
->_M_nxt
= __this_n
;
1177 this->_M_copy_code(__this_n
, __ht_n
);
1178 size_type __bkt
= _M_bucket_index(__this_n
);
1179 if (!_M_buckets
[__bkt
])
1180 _M_buckets
[__bkt
] = __prev_n
;
1181 __prev_n
= __this_n
;
1188 _M_deallocate_buckets();
1189 __throw_exception_again
;
1193 template<typename _Key
, typename _Value
,
1194 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1195 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1198 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1199 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1202 _M_rehash_policy
._M_reset();
1203 _M_bucket_count
= 1;
1204 _M_single_bucket
= nullptr;
1205 _M_buckets
= &_M_single_bucket
;
1206 _M_before_begin
._M_nxt
= nullptr;
1207 _M_element_count
= 0;
1210 template<typename _Key
, typename _Value
,
1211 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1212 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1215 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1216 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1217 _M_move_assign(_Hashtable
&& __ht
, true_type
)
1219 this->_M_deallocate_nodes(_M_begin());
1220 _M_deallocate_buckets();
1221 __hashtable_base::operator=(std::move(__ht
));
1222 _M_rehash_policy
= __ht
._M_rehash_policy
;
1223 if (!__ht
._M_uses_single_bucket())
1224 _M_buckets
= __ht
._M_buckets
;
1227 _M_buckets
= &_M_single_bucket
;
1228 _M_single_bucket
= __ht
._M_single_bucket
;
1230 _M_bucket_count
= __ht
._M_bucket_count
;
1231 _M_before_begin
._M_nxt
= __ht
._M_before_begin
._M_nxt
;
1232 _M_element_count
= __ht
._M_element_count
;
1233 std::__alloc_on_move(this->_M_node_allocator(), __ht
._M_node_allocator());
1235 // Fix buckets containing the _M_before_begin pointers that can't be
1238 _M_buckets
[_M_bucket_index(_M_begin())] = &_M_before_begin
;
1242 template<typename _Key
, typename _Value
,
1243 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1244 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1247 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1248 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1249 _M_move_assign(_Hashtable
&& __ht
, false_type
)
1251 if (__ht
._M_node_allocator() == this->_M_node_allocator())
1252 _M_move_assign(std::move(__ht
), true_type());
1255 // Can't move memory, move elements then.
1256 _M_assign_elements(std::move(__ht
));
1261 template<typename _Key
, typename _Value
,
1262 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1263 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1265 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1266 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1267 _Hashtable(const _Hashtable
& __ht
)
1268 : __hashtable_base(__ht
),
1270 __rehash_base(__ht
),
1272 __node_alloc_traits::_S_select_on_copy(__ht
._M_node_allocator())),
1273 _M_buckets(nullptr),
1274 _M_bucket_count(__ht
._M_bucket_count
),
1275 _M_element_count(__ht
._M_element_count
),
1276 _M_rehash_policy(__ht
._M_rehash_policy
)
1278 __alloc_node_gen_t
__alloc_node_gen(*this);
1279 _M_assign(__ht
, __alloc_node_gen
);
1282 template<typename _Key
, typename _Value
,
1283 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1284 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1286 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1287 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1288 _Hashtable(_Hashtable
&& __ht
) noexcept
1289 : __hashtable_base(__ht
),
1291 __rehash_base(__ht
),
1292 __hashtable_alloc(std::move(__ht
._M_base_alloc())),
1293 _M_buckets(__ht
._M_buckets
),
1294 _M_bucket_count(__ht
._M_bucket_count
),
1295 _M_before_begin(__ht
._M_before_begin
._M_nxt
),
1296 _M_element_count(__ht
._M_element_count
),
1297 _M_rehash_policy(__ht
._M_rehash_policy
)
1299 // Update, if necessary, buckets if __ht is using its single bucket.
1300 if (__ht
._M_uses_single_bucket())
1302 _M_buckets
= &_M_single_bucket
;
1303 _M_single_bucket
= __ht
._M_single_bucket
;
1306 // Update, if necessary, bucket pointing to before begin that hasn't
1309 _M_buckets
[_M_bucket_index(_M_begin())] = &_M_before_begin
;
1314 template<typename _Key
, typename _Value
,
1315 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1316 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1318 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1319 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1320 _Hashtable(const _Hashtable
& __ht
, const allocator_type
& __a
)
1321 : __hashtable_base(__ht
),
1323 __rehash_base(__ht
),
1324 __hashtable_alloc(__node_alloc_type(__a
)),
1326 _M_bucket_count(__ht
._M_bucket_count
),
1327 _M_element_count(__ht
._M_element_count
),
1328 _M_rehash_policy(__ht
._M_rehash_policy
)
1330 __alloc_node_gen_t
__alloc_node_gen(*this);
1331 _M_assign(__ht
, __alloc_node_gen
);
1334 template<typename _Key
, typename _Value
,
1335 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1336 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1338 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1339 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1340 _Hashtable(_Hashtable
&& __ht
, const allocator_type
& __a
)
1341 : __hashtable_base(__ht
),
1343 __rehash_base(__ht
),
1344 __hashtable_alloc(__node_alloc_type(__a
)),
1345 _M_buckets(nullptr),
1346 _M_bucket_count(__ht
._M_bucket_count
),
1347 _M_element_count(__ht
._M_element_count
),
1348 _M_rehash_policy(__ht
._M_rehash_policy
)
1350 if (__ht
._M_node_allocator() == this->_M_node_allocator())
1352 if (__ht
._M_uses_single_bucket())
1354 _M_buckets
= &_M_single_bucket
;
1355 _M_single_bucket
= __ht
._M_single_bucket
;
1358 _M_buckets
= __ht
._M_buckets
;
1360 _M_before_begin
._M_nxt
= __ht
._M_before_begin
._M_nxt
;
1361 // Update, if necessary, bucket pointing to before begin that hasn't
1364 _M_buckets
[_M_bucket_index(_M_begin())] = &_M_before_begin
;
1369 __alloc_node_gen_t
__alloc_gen(*this);
1371 using _Fwd_Ht
= typename
1372 conditional
<__move_if_noexcept_cond
<value_type
>::value
,
1373 const _Hashtable
&, _Hashtable
&&>::type
;
1374 _M_assign(std::forward
<_Fwd_Ht
>(__ht
), __alloc_gen
);
1379 template<typename _Key
, typename _Value
,
1380 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1381 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1383 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1384 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1385 ~_Hashtable() noexcept
1388 _M_deallocate_buckets();
1391 template<typename _Key
, typename _Value
,
1392 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1393 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1396 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1397 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1398 swap(_Hashtable
& __x
)
1399 noexcept(__and_
<__is_nothrow_swappable
<_H1
>,
1400 __is_nothrow_swappable
<_Equal
>>::value
)
1402 // The only base class with member variables is hash_code_base.
1403 // We define _Hash_code_base::_M_swap because different
1404 // specializations have different members.
1407 std::__alloc_on_swap(this->_M_node_allocator(), __x
._M_node_allocator());
1408 std::swap(_M_rehash_policy
, __x
._M_rehash_policy
);
1410 // Deal properly with potentially moved instances.
1411 if (this->_M_uses_single_bucket())
1413 if (!__x
._M_uses_single_bucket())
1415 _M_buckets
= __x
._M_buckets
;
1416 __x
._M_buckets
= &__x
._M_single_bucket
;
1419 else if (__x
._M_uses_single_bucket())
1421 __x
._M_buckets
= _M_buckets
;
1422 _M_buckets
= &_M_single_bucket
;
1425 std::swap(_M_buckets
, __x
._M_buckets
);
1427 std::swap(_M_bucket_count
, __x
._M_bucket_count
);
1428 std::swap(_M_before_begin
._M_nxt
, __x
._M_before_begin
._M_nxt
);
1429 std::swap(_M_element_count
, __x
._M_element_count
);
1430 std::swap(_M_single_bucket
, __x
._M_single_bucket
);
1432 // Fix buckets containing the _M_before_begin pointers that can't be
1435 _M_buckets
[_M_bucket_index(_M_begin())] = &_M_before_begin
;
1438 __x
._M_buckets
[__x
._M_bucket_index(__x
._M_begin())]
1439 = &__x
._M_before_begin
;
1442 template<typename _Key
, typename _Value
,
1443 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1444 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1447 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1448 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1449 find(const key_type
& __k
)
1452 __hash_code __code
= this->_M_hash_code(__k
);
1453 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1454 __node_type
* __p
= _M_find_node(__bkt
, __k
, __code
);
1455 return __p
? iterator(__p
) : end();
1458 template<typename _Key
, typename _Value
,
1459 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1460 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1463 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1464 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1465 find(const key_type
& __k
) const
1468 __hash_code __code
= this->_M_hash_code(__k
);
1469 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1470 __node_type
* __p
= _M_find_node(__bkt
, __k
, __code
);
1471 return __p
? const_iterator(__p
) : end();
1474 template<typename _Key
, typename _Value
,
1475 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1476 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1479 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1480 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1481 count(const key_type
& __k
) const
1484 __hash_code __code
= this->_M_hash_code(__k
);
1485 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1486 __node_type
* __p
= _M_bucket_begin(__bkt
);
1490 std::size_t __result
= 0;
1491 for (;; __p
= __p
->_M_next())
1493 if (this->_M_equals(__k
, __code
, __p
))
1496 // All equivalent values are next to each other, if we
1497 // found a non-equivalent value after an equivalent one it
1498 // means that we won't find any new equivalent value.
1500 if (!__p
->_M_nxt
|| _M_bucket_index(__p
->_M_next()) != __bkt
)
1506 template<typename _Key
, typename _Value
,
1507 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1508 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1511 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1512 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1513 equal_range(const key_type
& __k
)
1514 -> pair
<iterator
, iterator
>
1516 __hash_code __code
= this->_M_hash_code(__k
);
1517 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1518 __node_type
* __p
= _M_find_node(__bkt
, __k
, __code
);
1522 __node_type
* __p1
= __p
->_M_next();
1523 while (__p1
&& _M_bucket_index(__p1
) == __bkt
1524 && this->_M_equals(__k
, __code
, __p1
))
1525 __p1
= __p1
->_M_next();
1527 return std::make_pair(iterator(__p
), iterator(__p1
));
1530 return std::make_pair(end(), end());
1533 template<typename _Key
, typename _Value
,
1534 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1535 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1538 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1539 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1540 equal_range(const key_type
& __k
) const
1541 -> pair
<const_iterator
, const_iterator
>
1543 __hash_code __code
= this->_M_hash_code(__k
);
1544 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1545 __node_type
* __p
= _M_find_node(__bkt
, __k
, __code
);
1549 __node_type
* __p1
= __p
->_M_next();
1550 while (__p1
&& _M_bucket_index(__p1
) == __bkt
1551 && this->_M_equals(__k
, __code
, __p1
))
1552 __p1
= __p1
->_M_next();
1554 return std::make_pair(const_iterator(__p
), const_iterator(__p1
));
1557 return std::make_pair(end(), end());
1560 // Find the node whose key compares equal to k in the bucket bkt.
1561 // Return nullptr if no node is found.
1562 template<typename _Key
, typename _Value
,
1563 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1564 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1567 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1568 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1569 _M_find_before_node(size_type __bkt
, const key_type
& __k
,
1570 __hash_code __code
) const
1573 __node_base
* __prev_p
= _M_buckets
[__bkt
];
1577 for (__node_type
* __p
= static_cast<__node_type
*>(__prev_p
->_M_nxt
);;
1578 __p
= __p
->_M_next())
1580 if (this->_M_equals(__k
, __code
, __p
))
1583 if (!__p
->_M_nxt
|| _M_bucket_index(__p
->_M_next()) != __bkt
)
1590 template<typename _Key
, typename _Value
,
1591 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1592 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1595 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1596 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1597 _M_insert_bucket_begin(size_type __bkt
, __node_type
* __node
)
1599 if (_M_buckets
[__bkt
])
1601 // Bucket is not empty, we just need to insert the new node
1602 // after the bucket before begin.
1603 __node
->_M_nxt
= _M_buckets
[__bkt
]->_M_nxt
;
1604 _M_buckets
[__bkt
]->_M_nxt
= __node
;
1608 // The bucket is empty, the new node is inserted at the
1609 // beginning of the singly-linked list and the bucket will
1610 // contain _M_before_begin pointer.
1611 __node
->_M_nxt
= _M_before_begin
._M_nxt
;
1612 _M_before_begin
._M_nxt
= __node
;
1614 // We must update former begin bucket that is pointing to
1616 _M_buckets
[_M_bucket_index(__node
->_M_next())] = __node
;
1617 _M_buckets
[__bkt
] = &_M_before_begin
;
1621 template<typename _Key
, typename _Value
,
1622 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1623 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1626 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1627 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1628 _M_remove_bucket_begin(size_type __bkt
, __node_type
* __next
,
1629 size_type __next_bkt
)
1631 if (!__next
|| __next_bkt
!= __bkt
)
1633 // Bucket is now empty
1634 // First update next bucket if any
1636 _M_buckets
[__next_bkt
] = _M_buckets
[__bkt
];
1638 // Second update before begin node if necessary
1639 if (&_M_before_begin
== _M_buckets
[__bkt
])
1640 _M_before_begin
._M_nxt
= __next
;
1641 _M_buckets
[__bkt
] = nullptr;
1645 template<typename _Key
, typename _Value
,
1646 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1647 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1650 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1651 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1652 _M_get_previous_node(size_type __bkt
, __node_base
* __n
)
1655 __node_base
* __prev_n
= _M_buckets
[__bkt
];
1656 while (__prev_n
->_M_nxt
!= __n
)
1657 __prev_n
= __prev_n
->_M_nxt
;
1661 template<typename _Key
, typename _Value
,
1662 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1663 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1665 template<typename
... _Args
>
1667 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1668 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1669 _M_emplace(true_type
, _Args
&&... __args
)
1670 -> pair
<iterator
, bool>
1672 // First build the node to get access to the hash code
1673 _Scoped_node __node
{ this, std::forward
<_Args
>(__args
)... };
1674 const key_type
& __k
= this->_M_extract()(__node
._M_node
->_M_v());
1675 __hash_code __code
= this->_M_hash_code(__k
);
1676 size_type __bkt
= _M_bucket_index(__k
, __code
);
1677 if (__node_type
* __p
= _M_find_node(__bkt
, __k
, __code
))
1678 // There is already an equivalent node, no insertion
1679 return std::make_pair(iterator(__p
), false);
1682 auto __pos
= _M_insert_unique_node(__k
, __bkt
, __code
, __node
._M_node
);
1683 __node
._M_node
= nullptr;
1684 return { __pos
, true };
1687 template<typename _Key
, typename _Value
,
1688 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1689 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1691 template<typename
... _Args
>
1693 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1694 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1695 _M_emplace(const_iterator __hint
, false_type
, _Args
&&... __args
)
1698 // First build the node to get its hash code.
1699 _Scoped_node __node
{ this, std::forward
<_Args
>(__args
)... };
1700 const key_type
& __k
= this->_M_extract()(__node
._M_node
->_M_v());
1702 __hash_code __code
= this->_M_hash_code(__k
);
1704 = _M_insert_multi_node(__hint
._M_cur
, __k
, __code
, __node
._M_node
);
1705 __node
._M_node
= nullptr;
1709 template<typename _Key
, typename _Value
,
1710 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1711 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1714 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1715 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1716 _M_insert_unique_node(const key_type
& __k
, size_type __bkt
,
1717 __hash_code __code
, __node_type
* __node
,
1721 const __rehash_state
& __saved_state
= _M_rehash_policy
._M_state();
1722 std::pair
<bool, std::size_t> __do_rehash
1723 = _M_rehash_policy
._M_need_rehash(_M_bucket_count
, _M_element_count
,
1726 if (__do_rehash
.first
)
1728 _M_rehash(__do_rehash
.second
, __saved_state
);
1729 __bkt
= _M_bucket_index(__k
, __code
);
1732 this->_M_store_code(__node
, __code
);
1734 // Always insert at the beginning of the bucket.
1735 _M_insert_bucket_begin(__bkt
, __node
);
1737 return iterator(__node
);
1740 template<typename _Key
, typename _Value
,
1741 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1742 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1745 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1746 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1747 _M_insert_multi_node(__node_type
* __hint
, const key_type
& __k
,
1748 __hash_code __code
, __node_type
* __node
)
1751 const __rehash_state
& __saved_state
= _M_rehash_policy
._M_state();
1752 std::pair
<bool, std::size_t> __do_rehash
1753 = _M_rehash_policy
._M_need_rehash(_M_bucket_count
, _M_element_count
, 1);
1755 if (__do_rehash
.first
)
1756 _M_rehash(__do_rehash
.second
, __saved_state
);
1758 this->_M_store_code(__node
, __code
);
1759 size_type __bkt
= _M_bucket_index(__k
, __code
);
1761 // Find the node before an equivalent one or use hint if it exists and
1762 // if it is equivalent.
1764 = __builtin_expect(__hint
!= nullptr, false)
1765 && this->_M_equals(__k
, __code
, __hint
)
1767 : _M_find_before_node(__bkt
, __k
, __code
);
1770 // Insert after the node before the equivalent one.
1771 __node
->_M_nxt
= __prev
->_M_nxt
;
1772 __prev
->_M_nxt
= __node
;
1773 if (__builtin_expect(__prev
== __hint
, false))
1774 // hint might be the last bucket node, in this case we need to
1775 // update next bucket.
1777 && !this->_M_equals(__k
, __code
, __node
->_M_next()))
1779 size_type __next_bkt
= _M_bucket_index(__node
->_M_next());
1780 if (__next_bkt
!= __bkt
)
1781 _M_buckets
[__next_bkt
] = __node
;
1785 // The inserted node has no equivalent in the hashtable. We must
1786 // insert the new node at the beginning of the bucket to preserve
1787 // equivalent elements' relative positions.
1788 _M_insert_bucket_begin(__bkt
, __node
);
1790 return iterator(__node
);
1793 // Insert v if no element with its key is already present.
1794 template<typename _Key
, typename _Value
,
1795 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1796 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1798 template<typename _Arg
, typename _NodeGenerator
>
1800 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1801 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1802 _M_insert(_Arg
&& __v
, const _NodeGenerator
& __node_gen
, true_type
,
1804 -> pair
<iterator
, bool>
1806 const key_type
& __k
= this->_M_extract()(__v
);
1807 __hash_code __code
= this->_M_hash_code(__k
);
1808 size_type __bkt
= _M_bucket_index(__k
, __code
);
1810 if (__node_type
* __node
= _M_find_node(__bkt
, __k
, __code
))
1811 return { iterator(__node
), false };
1813 _Scoped_node __node
{ __node_gen(std::forward
<_Arg
>(__v
)), this };
1815 = _M_insert_unique_node(__k
, __bkt
, __code
, __node
._M_node
, __n_elt
);
1816 __node
._M_node
= nullptr;
1817 return { __pos
, true };
1820 // Insert v unconditionally.
1821 template<typename _Key
, typename _Value
,
1822 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1823 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1825 template<typename _Arg
, typename _NodeGenerator
>
1827 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1828 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1829 _M_insert(const_iterator __hint
, _Arg
&& __v
,
1830 const _NodeGenerator
& __node_gen
, false_type
)
1833 // First compute the hash code so that we don't do anything if it
1835 __hash_code __code
= this->_M_hash_code(this->_M_extract()(__v
));
1837 // Second allocate new node so that we don't rehash if it throws.
1838 _Scoped_node __node
{ __node_gen(std::forward
<_Arg
>(__v
)), this };
1839 const key_type
& __k
= this->_M_extract()(__node
._M_node
->_M_v());
1841 = _M_insert_multi_node(__hint
._M_cur
, __k
, __code
, __node
._M_node
);
1842 __node
._M_node
= nullptr;
1846 template<typename _Key
, typename _Value
,
1847 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1848 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1851 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1852 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1853 erase(const_iterator __it
)
1856 __node_type
* __n
= __it
._M_cur
;
1857 std::size_t __bkt
= _M_bucket_index(__n
);
1859 // Look for previous node to unlink it from the erased one, this
1860 // is why we need buckets to contain the before begin to make
1861 // this search fast.
1862 __node_base
* __prev_n
= _M_get_previous_node(__bkt
, __n
);
1863 return _M_erase(__bkt
, __prev_n
, __n
);
1866 template<typename _Key
, typename _Value
,
1867 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1868 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1871 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1872 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1873 _M_erase(size_type __bkt
, __node_base
* __prev_n
, __node_type
* __n
)
1876 if (__prev_n
== _M_buckets
[__bkt
])
1877 _M_remove_bucket_begin(__bkt
, __n
->_M_next(),
1878 __n
->_M_nxt
? _M_bucket_index(__n
->_M_next()) : 0);
1879 else if (__n
->_M_nxt
)
1881 size_type __next_bkt
= _M_bucket_index(__n
->_M_next());
1882 if (__next_bkt
!= __bkt
)
1883 _M_buckets
[__next_bkt
] = __prev_n
;
1886 __prev_n
->_M_nxt
= __n
->_M_nxt
;
1887 iterator
__result(__n
->_M_next());
1888 this->_M_deallocate_node(__n
);
1894 template<typename _Key
, typename _Value
,
1895 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1896 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1899 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1900 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1901 _M_erase(true_type
, const key_type
& __k
)
1904 __hash_code __code
= this->_M_hash_code(__k
);
1905 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1907 // Look for the node before the first matching node.
1908 __node_base
* __prev_n
= _M_find_before_node(__bkt
, __k
, __code
);
1912 // We found a matching node, erase it.
1913 __node_type
* __n
= static_cast<__node_type
*>(__prev_n
->_M_nxt
);
1914 _M_erase(__bkt
, __prev_n
, __n
);
1918 template<typename _Key
, typename _Value
,
1919 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1920 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1923 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1924 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1925 _M_erase(false_type
, const key_type
& __k
)
1928 __hash_code __code
= this->_M_hash_code(__k
);
1929 std::size_t __bkt
= _M_bucket_index(__k
, __code
);
1931 // Look for the node before the first matching node.
1932 __node_base
* __prev_n
= _M_find_before_node(__bkt
, __k
, __code
);
1936 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1937 // 526. Is it undefined if a function in the standard changes
1939 // We use one loop to find all matching nodes and another to deallocate
1940 // them so that the key stays valid during the first loop. It might be
1941 // invalidated indirectly when destroying nodes.
1942 __node_type
* __n
= static_cast<__node_type
*>(__prev_n
->_M_nxt
);
1943 __node_type
* __n_last
= __n
;
1944 std::size_t __n_last_bkt
= __bkt
;
1947 __n_last
= __n_last
->_M_next();
1950 __n_last_bkt
= _M_bucket_index(__n_last
);
1952 while (__n_last_bkt
== __bkt
&& this->_M_equals(__k
, __code
, __n_last
));
1954 // Deallocate nodes.
1955 size_type __result
= 0;
1958 __node_type
* __p
= __n
->_M_next();
1959 this->_M_deallocate_node(__n
);
1964 while (__n
!= __n_last
);
1966 if (__prev_n
== _M_buckets
[__bkt
])
1967 _M_remove_bucket_begin(__bkt
, __n_last
, __n_last_bkt
);
1968 else if (__n_last
&& __n_last_bkt
!= __bkt
)
1969 _M_buckets
[__n_last_bkt
] = __prev_n
;
1970 __prev_n
->_M_nxt
= __n_last
;
1974 template<typename _Key
, typename _Value
,
1975 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
1976 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
1979 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1980 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
1981 erase(const_iterator __first
, const_iterator __last
)
1984 __node_type
* __n
= __first
._M_cur
;
1985 __node_type
* __last_n
= __last
._M_cur
;
1986 if (__n
== __last_n
)
1987 return iterator(__n
);
1989 std::size_t __bkt
= _M_bucket_index(__n
);
1991 __node_base
* __prev_n
= _M_get_previous_node(__bkt
, __n
);
1992 bool __is_bucket_begin
= __n
== _M_bucket_begin(__bkt
);
1993 std::size_t __n_bkt
= __bkt
;
1998 __node_type
* __tmp
= __n
;
1999 __n
= __n
->_M_next();
2000 this->_M_deallocate_node(__tmp
);
2004 __n_bkt
= _M_bucket_index(__n
);
2006 while (__n
!= __last_n
&& __n_bkt
== __bkt
);
2007 if (__is_bucket_begin
)
2008 _M_remove_bucket_begin(__bkt
, __n
, __n_bkt
);
2009 if (__n
== __last_n
)
2011 __is_bucket_begin
= true;
2015 if (__n
&& (__n_bkt
!= __bkt
|| __is_bucket_begin
))
2016 _M_buckets
[__n_bkt
] = __prev_n
;
2017 __prev_n
->_M_nxt
= __n
;
2018 return iterator(__n
);
2021 template<typename _Key
, typename _Value
,
2022 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
2023 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
2026 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2027 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
2030 this->_M_deallocate_nodes(_M_begin());
2031 __builtin_memset(_M_buckets
, 0, _M_bucket_count
* sizeof(__bucket_type
));
2032 _M_element_count
= 0;
2033 _M_before_begin
._M_nxt
= nullptr;
2036 template<typename _Key
, typename _Value
,
2037 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
2038 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
2041 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2042 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
2043 rehash(size_type __bkt_count
)
2045 const __rehash_state
& __saved_state
= _M_rehash_policy
._M_state();
2047 = std::max(_M_rehash_policy
._M_bkt_for_elements(_M_element_count
+ 1),
2049 __bkt_count
= _M_rehash_policy
._M_next_bkt(__bkt_count
);
2051 if (__bkt_count
!= _M_bucket_count
)
2052 _M_rehash(__bkt_count
, __saved_state
);
2054 // No rehash, restore previous state to keep it consistent with
2056 _M_rehash_policy
._M_reset(__saved_state
);
2059 template<typename _Key
, typename _Value
,
2060 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
2061 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
2064 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2065 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
2066 _M_rehash(size_type __bkt_count
, const __rehash_state
& __state
)
2070 _M_rehash_aux(__bkt_count
, __unique_keys());
2074 // A failure here means that buckets allocation failed. We only
2075 // have to restore hash policy previous state.
2076 _M_rehash_policy
._M_reset(__state
);
2077 __throw_exception_again
;
2081 // Rehash when there is no equivalent elements.
2082 template<typename _Key
, typename _Value
,
2083 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
2084 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
2087 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2088 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
2089 _M_rehash_aux(size_type __bkt_count
, true_type
)
2091 __bucket_type
* __new_buckets
= _M_allocate_buckets(__bkt_count
);
2092 __node_type
* __p
= _M_begin();
2093 _M_before_begin
._M_nxt
= nullptr;
2094 std::size_t __bbegin_bkt
= 0;
2097 __node_type
* __next
= __p
->_M_next();
2099 = __hash_code_base::_M_bucket_index(__p
, __bkt_count
);
2100 if (!__new_buckets
[__bkt
])
2102 __p
->_M_nxt
= _M_before_begin
._M_nxt
;
2103 _M_before_begin
._M_nxt
= __p
;
2104 __new_buckets
[__bkt
] = &_M_before_begin
;
2106 __new_buckets
[__bbegin_bkt
] = __p
;
2107 __bbegin_bkt
= __bkt
;
2111 __p
->_M_nxt
= __new_buckets
[__bkt
]->_M_nxt
;
2112 __new_buckets
[__bkt
]->_M_nxt
= __p
;
2117 _M_deallocate_buckets();
2118 _M_bucket_count
= __bkt_count
;
2119 _M_buckets
= __new_buckets
;
2122 // Rehash when there can be equivalent elements, preserve their relative
2124 template<typename _Key
, typename _Value
,
2125 typename _Alloc
, typename _ExtractKey
, typename _Equal
,
2126 typename _H1
, typename _H2
, typename _Hash
, typename _RehashPolicy
,
2129 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2130 _H1
, _H2
, _Hash
, _RehashPolicy
, _Traits
>::
2131 _M_rehash_aux(size_type __bkt_count
, false_type
)
2133 __bucket_type
* __new_buckets
= _M_allocate_buckets(__bkt_count
);
2135 __node_type
* __p
= _M_begin();
2136 _M_before_begin
._M_nxt
= nullptr;
2137 std::size_t __bbegin_bkt
= 0;
2138 std::size_t __prev_bkt
= 0;
2139 __node_type
* __prev_p
= nullptr;
2140 bool __check_bucket
= false;
2144 __node_type
* __next
= __p
->_M_next();
2146 = __hash_code_base::_M_bucket_index(__p
, __bkt_count
);
2148 if (__prev_p
&& __prev_bkt
== __bkt
)
2150 // Previous insert was already in this bucket, we insert after
2151 // the previously inserted one to preserve equivalent elements
2153 __p
->_M_nxt
= __prev_p
->_M_nxt
;
2154 __prev_p
->_M_nxt
= __p
;
2156 // Inserting after a node in a bucket require to check that we
2157 // haven't change the bucket last node, in this case next
2158 // bucket containing its before begin node must be updated. We
2159 // schedule a check as soon as we move out of the sequence of
2160 // equivalent nodes to limit the number of checks.
2161 __check_bucket
= true;
2167 // Check if we shall update the next bucket because of
2168 // insertions into __prev_bkt bucket.
2169 if (__prev_p
->_M_nxt
)
2171 std::size_t __next_bkt
2172 = __hash_code_base::_M_bucket_index(__prev_p
->_M_next(),
2174 if (__next_bkt
!= __prev_bkt
)
2175 __new_buckets
[__next_bkt
] = __prev_p
;
2177 __check_bucket
= false;
2180 if (!__new_buckets
[__bkt
])
2182 __p
->_M_nxt
= _M_before_begin
._M_nxt
;
2183 _M_before_begin
._M_nxt
= __p
;
2184 __new_buckets
[__bkt
] = &_M_before_begin
;
2186 __new_buckets
[__bbegin_bkt
] = __p
;
2187 __bbegin_bkt
= __bkt
;
2191 __p
->_M_nxt
= __new_buckets
[__bkt
]->_M_nxt
;
2192 __new_buckets
[__bkt
]->_M_nxt
= __p
;
2200 if (__check_bucket
&& __prev_p
->_M_nxt
)
2202 std::size_t __next_bkt
2203 = __hash_code_base::_M_bucket_index(__prev_p
->_M_next(),
2205 if (__next_bkt
!= __prev_bkt
)
2206 __new_buckets
[__next_bkt
] = __prev_p
;
2209 _M_deallocate_buckets();
2210 _M_bucket_count
= __bkt_count
;
2211 _M_buckets
= __new_buckets
;
2214 #if __cplusplus > 201402L
2215 template<typename
, typename
, typename
> class _Hash_merge_helper
{ };
2218 #if __cpp_deduction_guides >= 201606
2219 // Used to constrain deduction guides
2220 template<typename _Hash
>
2221 using _RequireNotAllocatorOrIntegral
2222 = __enable_if_t
<!__or_
<is_integral
<_Hash
>, __is_allocator
<_Hash
>>::value
>;
2225 _GLIBCXX_END_NAMESPACE_VERSION
2228 #endif // _HASHTABLE_H