]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/bits/hashtable.h
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / bits / hashtable.h
1 // hashtable.h header -*- C++ -*-
2
3 // Copyright (C) 2007-2024 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32
33 #pragma GCC system_header
34
35 #include <bits/hashtable_policy.h>
36 #include <bits/enable_special_members.h>
37 #include <bits/stl_function.h> // __has_is_transparent_t
38 #if __cplusplus > 201402L
39 # include <bits/node_handle.h>
40 #endif
41
42 namespace std _GLIBCXX_VISIBILITY(default)
43 {
44 _GLIBCXX_BEGIN_NAMESPACE_VERSION
45 /// @cond undocumented
46
47 template<typename _Tp, typename _Hash>
48 using __cache_default
49 = __not_<__and_<// Do not cache for fast hasher.
50 __is_fast_hash<_Hash>,
51 // Mandatory to have erase not throwing.
52 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
53
54 // Helper to conditionally delete the default constructor.
55 // The _Hash_node_base type is used to distinguish this specialization
56 // from any other potentially-overlapping subobjects of the hashtable.
57 template<typename _Equal, typename _Hash, typename _Allocator>
58 using _Hashtable_enable_default_ctor
59 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
60 is_default_constructible<_Hash>,
61 is_default_constructible<_Allocator>>{},
62 __detail::_Hash_node_base>;
63
64 /**
65 * Primary class template _Hashtable.
66 *
67 * @ingroup hashtable-detail
68 *
69 * @tparam _Value CopyConstructible type.
70 *
71 * @tparam _Key CopyConstructible type.
72 *
73 * @tparam _Alloc An allocator type
74 * ([lib.allocator.requirements]) whose _Alloc::value_type is
75 * _Value. As a conforming extension, we allow for
76 * _Alloc::value_type != _Value.
77 *
78 * @tparam _ExtractKey Function object that takes an object of type
79 * _Value and returns a value of type _Key.
80 *
81 * @tparam _Equal Function object that takes two objects of type k
82 * and returns a bool-like value that is true if the two objects
83 * are considered equal.
84 *
85 * @tparam _Hash The hash function. A unary function object with
86 * argument type _Key and result type size_t. Return values should
87 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
88 *
89 * @tparam _RangeHash The range-hashing function (in the terminology of
90 * Tavori and Dreizin). A binary function object whose argument
91 * types and result type are all size_t. Given arguments r and N,
92 * the return value is in the range [0, N).
93 *
94 * @tparam _Unused Not used.
95 *
96 * @tparam _RehashPolicy Policy class with three members, all of
97 * which govern the bucket count. _M_next_bkt(n) returns a bucket
98 * count no smaller than n. _M_bkt_for_elements(n) returns a
99 * bucket count appropriate for an element count of n.
100 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
101 * current bucket count is n_bkt and the current element count is
102 * n_elt, we need to increase the bucket count for n_ins insertions.
103 * If so, returns make_pair(true, n), where n is the new bucket count. If
104 * not, returns make_pair(false, <anything>)
105 *
106 * @tparam _Traits Compile-time class with three boolean
107 * std::integral_constant members: __cache_hash_code, __constant_iterators,
108 * __unique_keys.
109 *
110 * Each _Hashtable data structure has:
111 *
112 * - _Bucket[] _M_buckets
113 * - _Hash_node_base _M_before_begin
114 * - size_type _M_bucket_count
115 * - size_type _M_element_count
116 *
117 * with _Bucket being _Hash_node_base* and _Hash_node containing:
118 *
119 * - _Hash_node* _M_next
120 * - Tp _M_value
121 * - size_t _M_hash_code if cache_hash_code is true
122 *
123 * In terms of Standard containers the hashtable is like the aggregation of:
124 *
125 * - std::forward_list<_Node> containing the elements
126 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
127 *
128 * The non-empty buckets contain the node before the first node in the
129 * bucket. This design makes it possible to implement something like a
130 * std::forward_list::insert_after on container insertion and
131 * std::forward_list::erase_after on container erase
132 * calls. _M_before_begin is equivalent to
133 * std::forward_list::before_begin. Empty buckets contain
134 * nullptr. Note that one of the non-empty buckets contains
135 * &_M_before_begin which is not a dereferenceable node so the
136 * node pointer in a bucket shall never be dereferenced, only its
137 * next node can be.
138 *
139 * Walking through a bucket's nodes requires a check on the hash code to
140 * see if each node is still in the bucket. Such a design assumes a
141 * quite efficient hash functor and is one of the reasons it is
142 * highly advisable to set __cache_hash_code to true.
143 *
144 * The container iterators are simply built from nodes. This way
145 * incrementing the iterator is perfectly efficient independent of
146 * how many empty buckets there are in the container.
147 *
148 * On insert we compute the element's hash code and use it to find the
149 * bucket index. If the element must be inserted in an empty bucket
150 * we add it at the beginning of the singly linked list and make the
151 * bucket point to _M_before_begin. The bucket that used to point to
152 * _M_before_begin, if any, is updated to point to its new before
153 * begin node.
154 *
155 * Note that all equivalent values, if any, are next to each other, if
156 * we find a non-equivalent value after an equivalent one it means that
157 * we won't find any new equivalent value.
158 *
159 * On erase, the simple iterator design requires using the hash
160 * functor to get the index of the bucket to update. For this
161 * reason, when __cache_hash_code is set to false the hash functor must
162 * not throw and this is enforced by a static assertion.
163 *
164 * Functionality is implemented by decomposition into base classes,
165 * where the derived _Hashtable class is used in _Map_base,
166 * _Insert, _Rehash_base, and _Equality base classes to access the
167 * "this" pointer. _Hashtable_base is used in the base classes as a
168 * non-recursive, fully-completed-type so that detailed nested type
169 * information, such as iterator type and node type, can be
170 * used. This is similar to the "Curiously Recurring Template
171 * Pattern" (CRTP) technique, but uses a reconstructed, not
172 * explicitly passed, template pattern.
173 *
174 * Base class templates are:
175 * - __detail::_Hashtable_base
176 * - __detail::_Map_base
177 * - __detail::_Insert
178 * - __detail::_Rehash_base
179 * - __detail::_Equality
180 */
181 template<typename _Key, typename _Value, typename _Alloc,
182 typename _ExtractKey, typename _Equal,
183 typename _Hash, typename _RangeHash, typename _Unused,
184 typename _RehashPolicy, typename _Traits>
185 class _Hashtable
186 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
187 _Hash, _RangeHash, _Unused, _Traits>,
188 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
189 _Hash, _RangeHash, _Unused,
190 _RehashPolicy, _Traits>,
191 public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
192 _Hash, _RangeHash, _Unused,
193 _RehashPolicy, _Traits>,
194 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
195 _Hash, _RangeHash, _Unused,
196 _RehashPolicy, _Traits>,
197 public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
198 _Hash, _RangeHash, _Unused,
199 _RehashPolicy, _Traits>,
200 private __detail::_Hashtable_alloc<
201 __alloc_rebind<_Alloc,
202 __detail::_Hash_node<_Value,
203 _Traits::__hash_cached::value>>>,
204 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
205 {
206 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
207 "unordered container must have a non-const, non-volatile value_type");
208 #if __cplusplus > 201703L || defined __STRICT_ANSI__
209 static_assert(is_same<typename _Alloc::value_type, _Value>{},
210 "unordered container must have the same value_type as its allocator");
211 #endif
212
213 using __traits_type = _Traits;
214 using __hash_cached = typename __traits_type::__hash_cached;
215 using __constant_iterators = typename __traits_type::__constant_iterators;
216 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
217 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
218
219 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
220
221 using __node_value_type =
222 __detail::_Hash_node_value<_Value, __hash_cached::value>;
223 using __node_ptr = typename __hashtable_alloc::__node_ptr;
224 using __value_alloc_traits =
225 typename __hashtable_alloc::__value_alloc_traits;
226 using __node_alloc_traits =
227 typename __hashtable_alloc::__node_alloc_traits;
228 using __node_base = typename __hashtable_alloc::__node_base;
229 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
230 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
231
232 using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
233 _Equal, _Hash,
234 _RangeHash, _Unused,
235 _RehashPolicy, _Traits>;
236 using __enable_default_ctor
237 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
238 using __rehash_guard_t
239 = __detail::_RehashStateGuard<_RehashPolicy>;
240
241 public:
242 typedef _Key key_type;
243 typedef _Value value_type;
244 typedef _Alloc allocator_type;
245 typedef _Equal key_equal;
246
247 // mapped_type, if present, comes from _Map_base.
248 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
249 typedef typename __value_alloc_traits::pointer pointer;
250 typedef typename __value_alloc_traits::const_pointer const_pointer;
251 typedef value_type& reference;
252 typedef const value_type& const_reference;
253
254 using iterator = typename __insert_base::iterator;
255
256 using const_iterator = typename __insert_base::const_iterator;
257
258 using local_iterator = __detail::_Local_iterator<key_type, _Value,
259 _ExtractKey, _Hash, _RangeHash, _Unused,
260 __constant_iterators::value,
261 __hash_cached::value>;
262
263 using const_local_iterator = __detail::_Local_const_iterator<
264 key_type, _Value,
265 _ExtractKey, _Hash, _RangeHash, _Unused,
266 __constant_iterators::value, __hash_cached::value>;
267
268 private:
269 using __rehash_type = _RehashPolicy;
270
271 using __unique_keys = typename __traits_type::__unique_keys;
272
273 using __hashtable_base = __detail::
274 _Hashtable_base<_Key, _Value, _ExtractKey,
275 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
276
277 using __hash_code_base = typename __hashtable_base::__hash_code_base;
278 using __hash_code = typename __hashtable_base::__hash_code;
279 using __ireturn_type = typename __insert_base::__ireturn_type;
280
281 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
282 _Equal, _Hash, _RangeHash, _Unused,
283 _RehashPolicy, _Traits>;
284
285 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
286 _ExtractKey, _Equal,
287 _Hash, _RangeHash, _Unused,
288 _RehashPolicy, _Traits>;
289
290 using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
291 _Equal, _Hash, _RangeHash, _Unused,
292 _RehashPolicy, _Traits>;
293
294 using __reuse_or_alloc_node_gen_t =
295 __detail::_ReuseOrAllocNode<__node_alloc_type>;
296 using __alloc_node_gen_t =
297 __detail::_AllocNode<__node_alloc_type>;
298 using __node_builder_t =
299 __detail::_NodeBuilder<_ExtractKey>;
300
301 // Simple RAII type for managing a node containing an element
302 struct _Scoped_node
303 {
304 // Take ownership of a node with a constructed element.
305 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
306 : _M_h(__h), _M_node(__n) { }
307
308 // Allocate a node and construct an element within it.
309 template<typename... _Args>
310 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
311 : _M_h(__h),
312 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
313 { }
314
315 // Destroy element and deallocate node.
316 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
317
318 _Scoped_node(const _Scoped_node&) = delete;
319 _Scoped_node& operator=(const _Scoped_node&) = delete;
320
321 __hashtable_alloc* _M_h;
322 __node_ptr _M_node;
323 };
324
325 template<typename _Ht>
326 static constexpr
327 __conditional_t<std::is_lvalue_reference<_Ht>::value,
328 const value_type&, value_type&&>
329 __fwd_value_for(value_type& __val) noexcept
330 { return std::move(__val); }
331
332 // Compile-time diagnostics.
333
334 // _Hash_code_base has everything protected, so use this derived type to
335 // access it.
336 struct __hash_code_base_access : __hash_code_base
337 { using __hash_code_base::_M_bucket_index; };
338
339 // To get bucket index we need _RangeHash not to throw.
340 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
341 "Functor used to map hash code to bucket index"
342 " must be nothrow default constructible");
343 static_assert(noexcept(
344 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
345 "Functor used to map hash code to bucket index must be"
346 " noexcept");
347
348 // To compute bucket index we also need _ExtratKey not to throw.
349 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
350 "_ExtractKey must be nothrow default constructible");
351 static_assert(noexcept(
352 std::declval<const _ExtractKey&>()(std::declval<_Value>())),
353 "_ExtractKey functor must be noexcept invocable");
354
355 template<typename _Keya, typename _Valuea, typename _Alloca,
356 typename _ExtractKeya, typename _Equala,
357 typename _Hasha, typename _RangeHasha, typename _Unuseda,
358 typename _RehashPolicya, typename _Traitsa,
359 bool _Unique_keysa>
360 friend struct __detail::_Map_base;
361
362 template<typename _Keya, typename _Valuea, typename _Alloca,
363 typename _ExtractKeya, typename _Equala,
364 typename _Hasha, typename _RangeHasha, typename _Unuseda,
365 typename _RehashPolicya, typename _Traitsa>
366 friend struct __detail::_Insert_base;
367
368 template<typename _Keya, typename _Valuea, typename _Alloca,
369 typename _ExtractKeya, typename _Equala,
370 typename _Hasha, typename _RangeHasha, typename _Unuseda,
371 typename _RehashPolicya, typename _Traitsa,
372 bool _Constant_iteratorsa>
373 friend struct __detail::_Insert;
374
375 template<typename _Keya, typename _Valuea, typename _Alloca,
376 typename _ExtractKeya, typename _Equala,
377 typename _Hasha, typename _RangeHasha, typename _Unuseda,
378 typename _RehashPolicya, typename _Traitsa,
379 bool _Unique_keysa>
380 friend struct __detail::_Equality;
381
382 public:
383 using size_type = typename __hashtable_base::size_type;
384 using difference_type = typename __hashtable_base::difference_type;
385
386 #if __cplusplus > 201402L
387 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
388 using insert_return_type = _Node_insert_return<iterator, node_type>;
389 #endif
390
391 private:
392 __buckets_ptr _M_buckets = &_M_single_bucket;
393 size_type _M_bucket_count = 1;
394 __node_base _M_before_begin;
395 size_type _M_element_count = 0;
396 _RehashPolicy _M_rehash_policy;
397
398 // A single bucket used when only need for 1 bucket. Especially
399 // interesting in move semantic to leave hashtable with only 1 bucket
400 // which is not allocated so that we can have those operations noexcept
401 // qualified.
402 // Note that we can't leave hashtable with 0 bucket without adding
403 // numerous checks in the code to avoid 0 modulus.
404 __node_base_ptr _M_single_bucket = nullptr;
405
406 void
407 _M_update_bbegin()
408 {
409 if (auto __begin = _M_begin())
410 _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
411 }
412
413 void
414 _M_update_bbegin(__node_ptr __n)
415 {
416 _M_before_begin._M_nxt = __n;
417 _M_update_bbegin();
418 }
419
420 bool
421 _M_uses_single_bucket(__buckets_ptr __bkts) const
422 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
423
424 bool
425 _M_uses_single_bucket() const
426 { return _M_uses_single_bucket(_M_buckets); }
427
428 static constexpr size_t
429 __small_size_threshold() noexcept
430 {
431 return
432 __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
433 }
434
435 __hashtable_alloc&
436 _M_base_alloc() { return *this; }
437
438 __buckets_ptr
439 _M_allocate_buckets(size_type __bkt_count)
440 {
441 if (__builtin_expect(__bkt_count == 1, false))
442 {
443 _M_single_bucket = nullptr;
444 return &_M_single_bucket;
445 }
446
447 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
448 }
449
450 void
451 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
452 {
453 if (_M_uses_single_bucket(__bkts))
454 return;
455
456 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
457 }
458
459 void
460 _M_deallocate_buckets()
461 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
462
463 // Gets bucket begin, deals with the fact that non-empty buckets contain
464 // their before begin node.
465 __node_ptr
466 _M_bucket_begin(size_type __bkt) const
467 {
468 __node_base_ptr __n = _M_buckets[__bkt];
469 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
470 }
471
472 __node_ptr
473 _M_begin() const
474 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
475
476 // Assign *this using another _Hashtable instance. Whether elements
477 // are copied or moved depends on the _Ht reference.
478 template<typename _Ht>
479 void
480 _M_assign_elements(_Ht&&);
481
482 template<typename _Ht, typename _NodeGenerator>
483 void
484 _M_assign(_Ht&&, const _NodeGenerator&);
485
486 void
487 _M_move_assign(_Hashtable&&, true_type);
488
489 void
490 _M_move_assign(_Hashtable&&, false_type);
491
492 void
493 _M_reset() noexcept;
494
495 _Hashtable(const _Hash& __h, const _Equal& __eq,
496 const allocator_type& __a)
497 : __hashtable_base(__h, __eq),
498 __hashtable_alloc(__node_alloc_type(__a)),
499 __enable_default_ctor(_Enable_default_constructor_tag{})
500 { }
501
502 template<bool _No_realloc = true>
503 static constexpr bool
504 _S_nothrow_move()
505 {
506 #if __cplusplus <= 201402L
507 return __and_<__bool_constant<_No_realloc>,
508 is_nothrow_copy_constructible<_Hash>,
509 is_nothrow_copy_constructible<_Equal>>::value;
510 #else
511 if constexpr (_No_realloc)
512 if constexpr (is_nothrow_copy_constructible<_Hash>())
513 return is_nothrow_copy_constructible<_Equal>();
514 return false;
515 #endif
516 }
517
518 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
519 true_type /* alloc always equal */)
520 noexcept(_S_nothrow_move());
521
522 _Hashtable(_Hashtable&&, __node_alloc_type&&,
523 false_type /* alloc always equal */);
524
525 template<typename _InputIterator>
526 _Hashtable(_InputIterator __first, _InputIterator __last,
527 size_type __bkt_count_hint,
528 const _Hash&, const _Equal&, const allocator_type&,
529 true_type __uks);
530
531 template<typename _InputIterator>
532 _Hashtable(_InputIterator __first, _InputIterator __last,
533 size_type __bkt_count_hint,
534 const _Hash&, const _Equal&, const allocator_type&,
535 false_type __uks);
536
537 public:
538 // Constructor, destructor, assignment, swap
539 _Hashtable() = default;
540
541 _Hashtable(const _Hashtable&);
542
543 _Hashtable(const _Hashtable&, const allocator_type&);
544
545 explicit
546 _Hashtable(size_type __bkt_count_hint,
547 const _Hash& __hf = _Hash(),
548 const key_equal& __eql = key_equal(),
549 const allocator_type& __a = allocator_type());
550
551 // Use delegating constructors.
552 _Hashtable(_Hashtable&& __ht)
553 noexcept(_S_nothrow_move())
554 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
555 true_type{})
556 { }
557
558 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
559 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
560 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
561 typename __node_alloc_traits::is_always_equal{})
562 { }
563
564 explicit
565 _Hashtable(const allocator_type& __a)
566 : __hashtable_alloc(__node_alloc_type(__a)),
567 __enable_default_ctor(_Enable_default_constructor_tag{})
568 { }
569
570 template<typename _InputIterator>
571 _Hashtable(_InputIterator __f, _InputIterator __l,
572 size_type __bkt_count_hint = 0,
573 const _Hash& __hf = _Hash(),
574 const key_equal& __eql = key_equal(),
575 const allocator_type& __a = allocator_type())
576 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
577 __unique_keys{})
578 { }
579
580 _Hashtable(initializer_list<value_type> __l,
581 size_type __bkt_count_hint = 0,
582 const _Hash& __hf = _Hash(),
583 const key_equal& __eql = key_equal(),
584 const allocator_type& __a = allocator_type())
585 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
586 __hf, __eql, __a, __unique_keys{})
587 { }
588
589 _Hashtable&
590 operator=(const _Hashtable& __ht);
591
592 _Hashtable&
593 operator=(_Hashtable&& __ht)
594 noexcept(__node_alloc_traits::_S_nothrow_move()
595 && is_nothrow_move_assignable<_Hash>::value
596 && is_nothrow_move_assignable<_Equal>::value)
597 {
598 constexpr bool __move_storage =
599 __node_alloc_traits::_S_propagate_on_move_assign()
600 || __node_alloc_traits::_S_always_equal();
601 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
602 return *this;
603 }
604
605 _Hashtable&
606 operator=(initializer_list<value_type> __l)
607 {
608 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
609 _M_before_begin._M_nxt = nullptr;
610 clear();
611
612 // We consider that all elements of __l are going to be inserted.
613 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
614
615 // Do not shrink to keep potential user reservation.
616 if (_M_bucket_count < __l_bkt_count)
617 rehash(__l_bkt_count);
618
619 this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
620 return *this;
621 }
622
623 ~_Hashtable() noexcept;
624
625 void
626 swap(_Hashtable&)
627 noexcept(__and_<__is_nothrow_swappable<_Hash>,
628 __is_nothrow_swappable<_Equal>>::value);
629
630 // Basic container operations
631 iterator
632 begin() noexcept
633 { return iterator(_M_begin()); }
634
635 const_iterator
636 begin() const noexcept
637 { return const_iterator(_M_begin()); }
638
639 iterator
640 end() noexcept
641 { return iterator(nullptr); }
642
643 const_iterator
644 end() const noexcept
645 { return const_iterator(nullptr); }
646
647 const_iterator
648 cbegin() const noexcept
649 { return const_iterator(_M_begin()); }
650
651 const_iterator
652 cend() const noexcept
653 { return const_iterator(nullptr); }
654
655 size_type
656 size() const noexcept
657 { return _M_element_count; }
658
659 _GLIBCXX_NODISCARD bool
660 empty() const noexcept
661 { return size() == 0; }
662
663 allocator_type
664 get_allocator() const noexcept
665 { return allocator_type(this->_M_node_allocator()); }
666
667 size_type
668 max_size() const noexcept
669 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
670
671 // Observers
672 key_equal
673 key_eq() const
674 { return this->_M_eq(); }
675
676 // hash_function, if present, comes from _Hash_code_base.
677
678 // Bucket operations
679 size_type
680 bucket_count() const noexcept
681 { return _M_bucket_count; }
682
683 size_type
684 max_bucket_count() const noexcept
685 { return max_size(); }
686
687 size_type
688 bucket_size(size_type __bkt) const
689 { return std::distance(begin(__bkt), end(__bkt)); }
690
691 size_type
692 bucket(const key_type& __k) const
693 { return _M_bucket_index(this->_M_hash_code(__k)); }
694
695 local_iterator
696 begin(size_type __bkt)
697 {
698 return local_iterator(*this, _M_bucket_begin(__bkt),
699 __bkt, _M_bucket_count);
700 }
701
702 local_iterator
703 end(size_type __bkt)
704 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
705
706 const_local_iterator
707 begin(size_type __bkt) const
708 {
709 return const_local_iterator(*this, _M_bucket_begin(__bkt),
710 __bkt, _M_bucket_count);
711 }
712
713 const_local_iterator
714 end(size_type __bkt) const
715 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
716
717 // DR 691.
718 const_local_iterator
719 cbegin(size_type __bkt) const
720 {
721 return const_local_iterator(*this, _M_bucket_begin(__bkt),
722 __bkt, _M_bucket_count);
723 }
724
725 const_local_iterator
726 cend(size_type __bkt) const
727 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
728
729 float
730 load_factor() const noexcept
731 {
732 return static_cast<float>(size()) / static_cast<float>(bucket_count());
733 }
734
735 // max_load_factor, if present, comes from _Rehash_base.
736
737 // Generalization of max_load_factor. Extension, not found in
738 // TR1. Only useful if _RehashPolicy is something other than
739 // the default.
740 const _RehashPolicy&
741 __rehash_policy() const
742 { return _M_rehash_policy; }
743
744 void
745 __rehash_policy(const _RehashPolicy& __pol)
746 { _M_rehash_policy = __pol; }
747
748 // Lookup.
749 iterator
750 find(const key_type& __k);
751
752 const_iterator
753 find(const key_type& __k) const;
754
755 size_type
756 count(const key_type& __k) const;
757
758 std::pair<iterator, iterator>
759 equal_range(const key_type& __k);
760
761 std::pair<const_iterator, const_iterator>
762 equal_range(const key_type& __k) const;
763
764 #ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
765 template<typename _Kt,
766 typename = __has_is_transparent_t<_Hash, _Kt>,
767 typename = __has_is_transparent_t<_Equal, _Kt>>
768 iterator
769 _M_find_tr(const _Kt& __k);
770
771 template<typename _Kt,
772 typename = __has_is_transparent_t<_Hash, _Kt>,
773 typename = __has_is_transparent_t<_Equal, _Kt>>
774 const_iterator
775 _M_find_tr(const _Kt& __k) const;
776
777 template<typename _Kt,
778 typename = __has_is_transparent_t<_Hash, _Kt>,
779 typename = __has_is_transparent_t<_Equal, _Kt>>
780 size_type
781 _M_count_tr(const _Kt& __k) const;
782
783 template<typename _Kt,
784 typename = __has_is_transparent_t<_Hash, _Kt>,
785 typename = __has_is_transparent_t<_Equal, _Kt>>
786 pair<iterator, iterator>
787 _M_equal_range_tr(const _Kt& __k);
788
789 template<typename _Kt,
790 typename = __has_is_transparent_t<_Hash, _Kt>,
791 typename = __has_is_transparent_t<_Equal, _Kt>>
792 pair<const_iterator, const_iterator>
793 _M_equal_range_tr(const _Kt& __k) const;
794 #endif // __glibcxx_generic_unordered_lookup
795
796 private:
797 // Bucket index computation helpers.
798 size_type
799 _M_bucket_index(const __node_value_type& __n) const noexcept
800 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
801
802 size_type
803 _M_bucket_index(__hash_code __c) const
804 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
805
806 __node_base_ptr
807 _M_find_before_node(const key_type&);
808
809 // Find and insert helper functions and types
810 // Find the node before the one matching the criteria.
811 __node_base_ptr
812 _M_find_before_node(size_type, const key_type&, __hash_code) const;
813
814 template<typename _Kt>
815 __node_base_ptr
816 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
817
818 __node_ptr
819 _M_find_node(size_type __bkt, const key_type& __key,
820 __hash_code __c) const
821 {
822 __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
823 if (__before_n)
824 return static_cast<__node_ptr>(__before_n->_M_nxt);
825 return nullptr;
826 }
827
828 template<typename _Kt>
829 __node_ptr
830 _M_find_node_tr(size_type __bkt, const _Kt& __key,
831 __hash_code __c) const
832 {
833 auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
834 if (__before_n)
835 return static_cast<__node_ptr>(__before_n->_M_nxt);
836 return nullptr;
837 }
838
839 // Insert a node at the beginning of a bucket.
840 void
841 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
842 {
843 if (_M_buckets[__bkt])
844 {
845 // Bucket is not empty, we just need to insert the new node
846 // after the bucket before begin.
847 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
848 _M_buckets[__bkt]->_M_nxt = __node;
849 }
850 else
851 {
852 // The bucket is empty, the new node is inserted at the
853 // beginning of the singly-linked list and the bucket will
854 // contain _M_before_begin pointer.
855 __node->_M_nxt = _M_before_begin._M_nxt;
856 _M_before_begin._M_nxt = __node;
857
858 if (__node->_M_nxt)
859 // We must update former begin bucket that is pointing to
860 // _M_before_begin.
861 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
862
863 _M_buckets[__bkt] = &_M_before_begin;
864 }
865 }
866
867 // Remove the bucket first node
868 void
869 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
870 size_type __next_bkt)
871 {
872 if (!__next_n || __next_bkt != __bkt)
873 {
874 // Bucket is now empty
875 // First update next bucket if any
876 if (__next_n)
877 _M_buckets[__next_bkt] = _M_buckets[__bkt];
878
879 // Second update before begin node if necessary
880 if (&_M_before_begin == _M_buckets[__bkt])
881 _M_before_begin._M_nxt = __next_n;
882 _M_buckets[__bkt] = nullptr;
883 }
884 }
885
886 // Get the node before __n in the bucket __bkt
887 __node_base_ptr
888 _M_get_previous_node(size_type __bkt, __node_ptr __n);
889
890 pair<__node_ptr, __hash_code>
891 _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const;
892
893 // Insert node __n with hash code __code, in bucket __bkt if no
894 // rehash (assumes no element with same key already present).
895 // Takes ownership of __n if insertion succeeds, throws otherwise.
896 iterator
897 _M_insert_unique_node(size_type __bkt, __hash_code,
898 __node_ptr __n, size_type __n_elt = 1);
899
900 // Insert node __n with key __k and hash code __code.
901 // Takes ownership of __n if insertion succeeds, throws otherwise.
902 iterator
903 _M_insert_multi_node(__node_ptr __hint,
904 __hash_code __code, __node_ptr __n);
905
906 template<typename... _Args>
907 std::pair<iterator, bool>
908 _M_emplace(true_type __uks, _Args&&... __args);
909
910 template<typename... _Args>
911 iterator
912 _M_emplace(false_type __uks, _Args&&... __args)
913 { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
914
915 // Emplace with hint, useless when keys are unique.
916 template<typename... _Args>
917 iterator
918 _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
919 { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
920
921 template<typename... _Args>
922 iterator
923 _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
924
925 template<typename _Kt, typename _Arg, typename _NodeGenerator>
926 std::pair<iterator, bool>
927 _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
928
929 template<typename _Kt>
930 static __conditional_t<
931 __and_<__is_nothrow_invocable<_Hash&, const key_type&>,
932 __not_<__is_nothrow_invocable<_Hash&, _Kt>>>::value,
933 key_type, _Kt&&>
934 _S_forward_key(_Kt&& __k)
935 { return std::forward<_Kt>(__k); }
936
937 static const key_type&
938 _S_forward_key(const key_type& __k)
939 { return __k; }
940
941 static key_type&&
942 _S_forward_key(key_type&& __k)
943 { return std::move(__k); }
944
945 template<typename _Arg, typename _NodeGenerator>
946 std::pair<iterator, bool>
947 _M_insert_unique_aux(_Arg&& __arg, const _NodeGenerator& __node_gen)
948 {
949 return _M_insert_unique(
950 _S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
951 std::forward<_Arg>(__arg), __node_gen);
952 }
953
954 template<typename _Arg, typename _NodeGenerator>
955 std::pair<iterator, bool>
956 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
957 true_type /* __uks */)
958 {
959 using __to_value
960 = __detail::_ConvertToValueType<_ExtractKey, value_type>;
961 return _M_insert_unique_aux(
962 __to_value{}(std::forward<_Arg>(__arg)), __node_gen);
963 }
964
965 template<typename _Arg, typename _NodeGenerator>
966 iterator
967 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
968 false_type __uks)
969 {
970 using __to_value
971 = __detail::_ConvertToValueType<_ExtractKey, value_type>;
972 return _M_insert(cend(),
973 __to_value{}(std::forward<_Arg>(__arg)), __node_gen, __uks);
974 }
975
976 // Insert with hint, not used when keys are unique.
977 template<typename _Arg, typename _NodeGenerator>
978 iterator
979 _M_insert(const_iterator, _Arg&& __arg,
980 const _NodeGenerator& __node_gen, true_type __uks)
981 {
982 return
983 _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
984 }
985
986 // Insert with hint when keys are not unique.
987 template<typename _Arg, typename _NodeGenerator>
988 iterator
989 _M_insert(const_iterator, _Arg&&,
990 const _NodeGenerator&, false_type __uks);
991
992 size_type
993 _M_erase(true_type __uks, const key_type&);
994
995 size_type
996 _M_erase(false_type __uks, const key_type&);
997
998 iterator
999 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1000
1001 public:
1002 // Emplace
1003 template<typename... _Args>
1004 __ireturn_type
1005 emplace(_Args&&... __args)
1006 { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
1007
1008 template<typename... _Args>
1009 iterator
1010 emplace_hint(const_iterator __hint, _Args&&... __args)
1011 {
1012 return _M_emplace(__hint, __unique_keys{},
1013 std::forward<_Args>(__args)...);
1014 }
1015
1016 // Insert member functions via inheritance.
1017
1018 // Erase
1019 iterator
1020 erase(const_iterator);
1021
1022 // LWG 2059.
1023 iterator
1024 erase(iterator __it)
1025 { return erase(const_iterator(__it)); }
1026
1027 size_type
1028 erase(const key_type& __k)
1029 { return _M_erase(__unique_keys{}, __k); }
1030
1031 iterator
1032 erase(const_iterator, const_iterator);
1033
1034 void
1035 clear() noexcept;
1036
1037 // Set number of buckets keeping it appropriate for container's number
1038 // of elements.
1039 void rehash(size_type __bkt_count);
1040
1041 // DR 1189.
1042 // reserve, if present, comes from _Rehash_base.
1043
1044 #if __cplusplus > 201402L
1045 /// Re-insert an extracted node into a container with unique keys.
1046 insert_return_type
1047 _M_reinsert_node(node_type&& __nh)
1048 {
1049 insert_return_type __ret;
1050 if (__nh.empty())
1051 __ret.position = end();
1052 else
1053 {
1054 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1055
1056 __node_ptr __n = nullptr;
1057 const key_type& __k = __nh._M_key();
1058 const size_type __size = size();
1059 if (__size <= __small_size_threshold())
1060 {
1061 for (__n = _M_begin(); __n; __n = __n->_M_next())
1062 if (this->_M_key_equals(__k, *__n))
1063 break;
1064 }
1065
1066 __hash_code __code;
1067 size_type __bkt;
1068 if (!__n)
1069 {
1070 __code = this->_M_hash_code(__k);
1071 __bkt = _M_bucket_index(__code);
1072 if (__size > __small_size_threshold())
1073 __n = _M_find_node(__bkt, __k, __code);
1074 }
1075
1076 if (__n)
1077 {
1078 __ret.node = std::move(__nh);
1079 __ret.position = iterator(__n);
1080 __ret.inserted = false;
1081 }
1082 else
1083 {
1084 __ret.position
1085 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1086 __nh._M_ptr = nullptr;
1087 __ret.inserted = true;
1088 }
1089 }
1090 return __ret;
1091 }
1092
1093 /// Re-insert an extracted node into a container with equivalent keys.
1094 iterator
1095 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1096 {
1097 if (__nh.empty())
1098 return end();
1099
1100 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1101
1102 const key_type& __k = __nh._M_key();
1103 auto __code = this->_M_hash_code(__k);
1104 auto __ret
1105 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1106 __nh._M_ptr = nullptr;
1107 return __ret;
1108 }
1109
1110 private:
1111 node_type
1112 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1113 {
1114 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1115 if (__prev_n == _M_buckets[__bkt])
1116 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1117 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1118 else if (__n->_M_nxt)
1119 {
1120 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1121 if (__next_bkt != __bkt)
1122 _M_buckets[__next_bkt] = __prev_n;
1123 }
1124
1125 __prev_n->_M_nxt = __n->_M_nxt;
1126 __n->_M_nxt = nullptr;
1127 --_M_element_count;
1128 return { __n, this->_M_node_allocator() };
1129 }
1130
1131 // Only use the possibly cached node's hash code if its hash function
1132 // _H2 matches _Hash and is stateless. Otherwise recompute it using _Hash.
1133 template<typename _H2>
1134 __hash_code
1135 _M_src_hash_code(const _H2&, const key_type& __k,
1136 const __node_value_type& __src_n) const
1137 {
1138 if constexpr (std::is_same_v<_H2, _Hash>)
1139 if constexpr (std::is_empty_v<_Hash>)
1140 return this->_M_hash_code(__src_n);
1141
1142 return this->_M_hash_code(__k);
1143 }
1144
1145 public:
1146 // Extract a node.
1147 node_type
1148 extract(const_iterator __pos)
1149 {
1150 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1151 return _M_extract_node(__bkt,
1152 _M_get_previous_node(__bkt, __pos._M_cur));
1153 }
1154
1155 /// Extract a node.
1156 node_type
1157 extract(const _Key& __k)
1158 {
1159 node_type __nh;
1160 __hash_code __code = this->_M_hash_code(__k);
1161 std::size_t __bkt = _M_bucket_index(__code);
1162 if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1163 __nh = _M_extract_node(__bkt, __prev_node);
1164 return __nh;
1165 }
1166
1167 /// Merge from a compatible container into one with unique keys.
1168 template<typename _Compatible_Hashtable>
1169 void
1170 _M_merge_unique(_Compatible_Hashtable& __src)
1171 {
1172 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1173 node_type>, "Node types are compatible");
1174 __glibcxx_assert(get_allocator() == __src.get_allocator());
1175
1176 auto __n_elt = __src.size();
1177 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1178 {
1179 auto __pos = __i++;
1180 const size_type __size = size();
1181 const key_type& __k = _ExtractKey{}(*__pos);
1182 if (__size <= __small_size_threshold())
1183 {
1184 bool __found = false;
1185 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1186 if (this->_M_key_equals(__k, *__n))
1187 {
1188 __found = true;
1189 break;
1190 }
1191
1192 if (__found)
1193 {
1194 if (__n_elt != 1)
1195 --__n_elt;
1196 continue;
1197 }
1198 }
1199
1200 __hash_code __code
1201 = _M_src_hash_code(__src.hash_function(), __k, *__pos._M_cur);
1202 size_type __bkt = _M_bucket_index(__code);
1203 if (__size <= __small_size_threshold()
1204 || _M_find_node(__bkt, __k, __code) == nullptr)
1205 {
1206 auto __nh = __src.extract(__pos);
1207 _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1208 __nh._M_ptr = nullptr;
1209 __n_elt = 1;
1210 }
1211 else if (__n_elt != 1)
1212 --__n_elt;
1213 }
1214 }
1215
1216 /// Merge from a compatible container into one with equivalent keys.
1217 template<typename _Compatible_Hashtable>
1218 void
1219 _M_merge_multi(_Compatible_Hashtable& __src)
1220 {
1221 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1222 node_type>, "Node types are compatible");
1223 __glibcxx_assert(get_allocator() == __src.get_allocator());
1224
1225 __node_ptr __hint = nullptr;
1226 this->reserve(size() + __src.size());
1227 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1228 {
1229 auto __pos = __i++;
1230 const key_type& __k = _ExtractKey{}(*__pos);
1231 __hash_code __code
1232 = _M_src_hash_code(__src.hash_function(), __k, *__pos._M_cur);
1233 auto __nh = __src.extract(__pos);
1234 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1235 __nh._M_ptr = nullptr;
1236 }
1237 }
1238 #endif // C++17
1239
1240 private:
1241 // Helper rehash method used when keys are unique.
1242 void _M_rehash(size_type __bkt_count, true_type __uks);
1243
1244 // Helper rehash method used when keys can be non-unique.
1245 void _M_rehash(size_type __bkt_count, false_type __uks);
1246 };
1247
1248 // Definitions of class template _Hashtable's out-of-line member functions.
1249 template<typename _Key, typename _Value, typename _Alloc,
1250 typename _ExtractKey, typename _Equal,
1251 typename _Hash, typename _RangeHash, typename _Unused,
1252 typename _RehashPolicy, typename _Traits>
1253 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1254 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1255 _Hashtable(size_type __bkt_count_hint,
1256 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1257 : _Hashtable(__h, __eq, __a)
1258 {
1259 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1260 if (__bkt_count > _M_bucket_count)
1261 {
1262 _M_buckets = _M_allocate_buckets(__bkt_count);
1263 _M_bucket_count = __bkt_count;
1264 }
1265 }
1266
1267 template<typename _Key, typename _Value, typename _Alloc,
1268 typename _ExtractKey, typename _Equal,
1269 typename _Hash, typename _RangeHash, typename _Unused,
1270 typename _RehashPolicy, typename _Traits>
1271 template<typename _InputIterator>
1272 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1273 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1274 _Hashtable(_InputIterator __f, _InputIterator __l,
1275 size_type __bkt_count_hint,
1276 const _Hash& __h, const _Equal& __eq,
1277 const allocator_type& __a, true_type /* __uks */)
1278 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1279 { this->insert(__f, __l); }
1280
1281 template<typename _Key, typename _Value, typename _Alloc,
1282 typename _ExtractKey, typename _Equal,
1283 typename _Hash, typename _RangeHash, typename _Unused,
1284 typename _RehashPolicy, typename _Traits>
1285 template<typename _InputIterator>
1286 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1287 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1288 _Hashtable(_InputIterator __f, _InputIterator __l,
1289 size_type __bkt_count_hint,
1290 const _Hash& __h, const _Equal& __eq,
1291 const allocator_type& __a, false_type __uks)
1292 : _Hashtable(__h, __eq, __a)
1293 {
1294 auto __nb_elems = __detail::__distance_fw(__f, __l);
1295 auto __bkt_count =
1296 _M_rehash_policy._M_next_bkt(
1297 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1298 __bkt_count_hint));
1299
1300 if (__bkt_count > _M_bucket_count)
1301 {
1302 _M_buckets = _M_allocate_buckets(__bkt_count);
1303 _M_bucket_count = __bkt_count;
1304 }
1305
1306 __alloc_node_gen_t __node_gen(*this);
1307 for (; __f != __l; ++__f)
1308 _M_insert(*__f, __node_gen, __uks);
1309 }
1310
1311 template<typename _Key, typename _Value, typename _Alloc,
1312 typename _ExtractKey, typename _Equal,
1313 typename _Hash, typename _RangeHash, typename _Unused,
1314 typename _RehashPolicy, typename _Traits>
1315 auto
1316 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1317 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1318 operator=(const _Hashtable& __ht)
1319 -> _Hashtable&
1320 {
1321 if (&__ht == this)
1322 return *this;
1323
1324 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1325 {
1326 auto& __this_alloc = this->_M_node_allocator();
1327 auto& __that_alloc = __ht._M_node_allocator();
1328 if (!__node_alloc_traits::_S_always_equal()
1329 && __this_alloc != __that_alloc)
1330 {
1331 // Replacement allocator cannot free existing storage.
1332 this->_M_deallocate_nodes(_M_begin());
1333 _M_before_begin._M_nxt = nullptr;
1334 _M_deallocate_buckets();
1335 _M_buckets = nullptr;
1336 std::__alloc_on_copy(__this_alloc, __that_alloc);
1337 __hashtable_base::operator=(__ht);
1338 _M_bucket_count = __ht._M_bucket_count;
1339 _M_element_count = __ht._M_element_count;
1340 _M_rehash_policy = __ht._M_rehash_policy;
1341 __alloc_node_gen_t __alloc_node_gen(*this);
1342 __try
1343 {
1344 _M_assign(__ht, __alloc_node_gen);
1345 }
1346 __catch(...)
1347 {
1348 // _M_assign took care of deallocating all memory. Now we
1349 // must make sure this instance remains in a usable state.
1350 _M_reset();
1351 __throw_exception_again;
1352 }
1353 return *this;
1354 }
1355 std::__alloc_on_copy(__this_alloc, __that_alloc);
1356 }
1357
1358 // Reuse allocated buckets and nodes.
1359 _M_assign_elements(__ht);
1360 return *this;
1361 }
1362
1363 template<typename _Key, typename _Value, typename _Alloc,
1364 typename _ExtractKey, typename _Equal,
1365 typename _Hash, typename _RangeHash, typename _Unused,
1366 typename _RehashPolicy, typename _Traits>
1367 template<typename _Ht>
1368 void
1369 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1370 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1371 _M_assign_elements(_Ht&& __ht)
1372 {
1373 __buckets_ptr __former_buckets = nullptr;
1374 std::size_t __former_bucket_count = _M_bucket_count;
1375 __rehash_guard_t __rehash_guard(_M_rehash_policy);
1376
1377 if (_M_bucket_count != __ht._M_bucket_count)
1378 {
1379 __former_buckets = _M_buckets;
1380 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1381 _M_bucket_count = __ht._M_bucket_count;
1382 }
1383 else
1384 __builtin_memset(_M_buckets, 0,
1385 _M_bucket_count * sizeof(__node_base_ptr));
1386
1387 __try
1388 {
1389 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1390 _M_element_count = __ht._M_element_count;
1391 _M_rehash_policy = __ht._M_rehash_policy;
1392 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1393 _M_before_begin._M_nxt = nullptr;
1394 _M_assign(std::forward<_Ht>(__ht), __roan);
1395 if (__former_buckets)
1396 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1397 __rehash_guard._M_guarded_obj = nullptr;
1398 }
1399 __catch(...)
1400 {
1401 if (__former_buckets)
1402 {
1403 // Restore previous buckets.
1404 _M_deallocate_buckets();
1405 _M_buckets = __former_buckets;
1406 _M_bucket_count = __former_bucket_count;
1407 }
1408 __builtin_memset(_M_buckets, 0,
1409 _M_bucket_count * sizeof(__node_base_ptr));
1410 __throw_exception_again;
1411 }
1412 }
1413
1414 template<typename _Key, typename _Value, typename _Alloc,
1415 typename _ExtractKey, typename _Equal,
1416 typename _Hash, typename _RangeHash, typename _Unused,
1417 typename _RehashPolicy, typename _Traits>
1418 template<typename _Ht, typename _NodeGenerator>
1419 void
1420 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1421 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1422 _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1423 {
1424 __buckets_ptr __buckets = nullptr;
1425 if (!_M_buckets)
1426 _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1427
1428 __try
1429 {
1430 if (!__ht._M_before_begin._M_nxt)
1431 return;
1432
1433 // First deal with the special first node pointed to by
1434 // _M_before_begin.
1435 __node_ptr __ht_n = __ht._M_begin();
1436 __node_ptr __this_n
1437 = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1438 this->_M_copy_code(*__this_n, *__ht_n);
1439 _M_update_bbegin(__this_n);
1440
1441 // Then deal with other nodes.
1442 __node_ptr __prev_n = __this_n;
1443 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1444 {
1445 __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1446 __prev_n->_M_nxt = __this_n;
1447 this->_M_copy_code(*__this_n, *__ht_n);
1448 size_type __bkt = _M_bucket_index(*__this_n);
1449 if (!_M_buckets[__bkt])
1450 _M_buckets[__bkt] = __prev_n;
1451 __prev_n = __this_n;
1452 }
1453 }
1454 __catch(...)
1455 {
1456 clear();
1457 if (__buckets)
1458 _M_deallocate_buckets();
1459 __throw_exception_again;
1460 }
1461 }
1462
1463 template<typename _Key, typename _Value, typename _Alloc,
1464 typename _ExtractKey, typename _Equal,
1465 typename _Hash, typename _RangeHash, typename _Unused,
1466 typename _RehashPolicy, typename _Traits>
1467 void
1468 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1469 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1470 _M_reset() noexcept
1471 {
1472 _M_rehash_policy._M_reset();
1473 _M_bucket_count = 1;
1474 _M_single_bucket = nullptr;
1475 _M_buckets = &_M_single_bucket;
1476 _M_before_begin._M_nxt = nullptr;
1477 _M_element_count = 0;
1478 }
1479
1480 template<typename _Key, typename _Value, typename _Alloc,
1481 typename _ExtractKey, typename _Equal,
1482 typename _Hash, typename _RangeHash, typename _Unused,
1483 typename _RehashPolicy, typename _Traits>
1484 void
1485 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1486 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1487 _M_move_assign(_Hashtable&& __ht, true_type)
1488 {
1489 if (__builtin_expect(std::__addressof(__ht) == this, false))
1490 return;
1491
1492 this->_M_deallocate_nodes(_M_begin());
1493 _M_deallocate_buckets();
1494 __hashtable_base::operator=(std::move(__ht));
1495 _M_rehash_policy = __ht._M_rehash_policy;
1496 if (!__ht._M_uses_single_bucket())
1497 _M_buckets = __ht._M_buckets;
1498 else
1499 {
1500 _M_buckets = &_M_single_bucket;
1501 _M_single_bucket = __ht._M_single_bucket;
1502 }
1503
1504 _M_bucket_count = __ht._M_bucket_count;
1505 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1506 _M_element_count = __ht._M_element_count;
1507 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1508
1509 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1510 _M_update_bbegin();
1511 __ht._M_reset();
1512 }
1513
1514 template<typename _Key, typename _Value, typename _Alloc,
1515 typename _ExtractKey, typename _Equal,
1516 typename _Hash, typename _RangeHash, typename _Unused,
1517 typename _RehashPolicy, typename _Traits>
1518 void
1519 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1520 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1521 _M_move_assign(_Hashtable&& __ht, false_type)
1522 {
1523 if (__ht._M_node_allocator() == this->_M_node_allocator())
1524 _M_move_assign(std::move(__ht), true_type{});
1525 else
1526 {
1527 // Can't move memory, move elements then.
1528 _M_assign_elements(std::move(__ht));
1529 __ht.clear();
1530 }
1531 }
1532
1533 template<typename _Key, typename _Value, typename _Alloc,
1534 typename _ExtractKey, typename _Equal,
1535 typename _Hash, typename _RangeHash, typename _Unused,
1536 typename _RehashPolicy, typename _Traits>
1537 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1538 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1539 _Hashtable(const _Hashtable& __ht)
1540 : __hashtable_base(__ht),
1541 __map_base(__ht),
1542 __rehash_base(__ht),
1543 __hashtable_alloc(
1544 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1545 __enable_default_ctor(__ht),
1546 _M_buckets(nullptr),
1547 _M_bucket_count(__ht._M_bucket_count),
1548 _M_element_count(__ht._M_element_count),
1549 _M_rehash_policy(__ht._M_rehash_policy)
1550 {
1551 __alloc_node_gen_t __alloc_node_gen(*this);
1552 _M_assign(__ht, __alloc_node_gen);
1553 }
1554
1555 template<typename _Key, typename _Value, typename _Alloc,
1556 typename _ExtractKey, typename _Equal,
1557 typename _Hash, typename _RangeHash, typename _Unused,
1558 typename _RehashPolicy, typename _Traits>
1559 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1560 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1561 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1562 true_type /* alloc always equal */)
1563 noexcept(_S_nothrow_move())
1564 : __hashtable_base(__ht),
1565 __map_base(__ht),
1566 __rehash_base(__ht),
1567 __hashtable_alloc(std::move(__a)),
1568 __enable_default_ctor(__ht),
1569 _M_buckets(__ht._M_buckets),
1570 _M_bucket_count(__ht._M_bucket_count),
1571 _M_before_begin(__ht._M_before_begin._M_nxt),
1572 _M_element_count(__ht._M_element_count),
1573 _M_rehash_policy(__ht._M_rehash_policy)
1574 {
1575 // Update buckets if __ht is using its single bucket.
1576 if (__ht._M_uses_single_bucket())
1577 {
1578 _M_buckets = &_M_single_bucket;
1579 _M_single_bucket = __ht._M_single_bucket;
1580 }
1581
1582 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1583 _M_update_bbegin();
1584
1585 __ht._M_reset();
1586 }
1587
1588 template<typename _Key, typename _Value, typename _Alloc,
1589 typename _ExtractKey, typename _Equal,
1590 typename _Hash, typename _RangeHash, typename _Unused,
1591 typename _RehashPolicy, typename _Traits>
1592 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1593 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1594 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1595 : __hashtable_base(__ht),
1596 __map_base(__ht),
1597 __rehash_base(__ht),
1598 __hashtable_alloc(__node_alloc_type(__a)),
1599 __enable_default_ctor(__ht),
1600 _M_buckets(),
1601 _M_bucket_count(__ht._M_bucket_count),
1602 _M_element_count(__ht._M_element_count),
1603 _M_rehash_policy(__ht._M_rehash_policy)
1604 {
1605 __alloc_node_gen_t __alloc_node_gen(*this);
1606 _M_assign(__ht, __alloc_node_gen);
1607 }
1608
1609 template<typename _Key, typename _Value, typename _Alloc,
1610 typename _ExtractKey, typename _Equal,
1611 typename _Hash, typename _RangeHash, typename _Unused,
1612 typename _RehashPolicy, typename _Traits>
1613 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1614 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1615 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1616 false_type /* alloc always equal */)
1617 : __hashtable_base(__ht),
1618 __map_base(__ht),
1619 __rehash_base(__ht),
1620 __hashtable_alloc(std::move(__a)),
1621 __enable_default_ctor(__ht),
1622 _M_buckets(nullptr),
1623 _M_bucket_count(__ht._M_bucket_count),
1624 _M_element_count(__ht._M_element_count),
1625 _M_rehash_policy(__ht._M_rehash_policy)
1626 {
1627 if (__ht._M_node_allocator() == this->_M_node_allocator())
1628 {
1629 if (__ht._M_uses_single_bucket())
1630 {
1631 _M_buckets = &_M_single_bucket;
1632 _M_single_bucket = __ht._M_single_bucket;
1633 }
1634 else
1635 _M_buckets = __ht._M_buckets;
1636
1637 // Fix bucket containing the _M_before_begin pointer that can't be
1638 // moved.
1639 _M_update_bbegin(__ht._M_begin());
1640
1641 __ht._M_reset();
1642 }
1643 else
1644 {
1645 __alloc_node_gen_t __alloc_gen(*this);
1646
1647 using _Fwd_Ht = __conditional_t<
1648 __move_if_noexcept_cond<value_type>::value,
1649 const _Hashtable&, _Hashtable&&>;
1650 _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1651 __ht.clear();
1652 }
1653 }
1654
1655 template<typename _Key, typename _Value, typename _Alloc,
1656 typename _ExtractKey, typename _Equal,
1657 typename _Hash, typename _RangeHash, typename _Unused,
1658 typename _RehashPolicy, typename _Traits>
1659 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1660 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1661 ~_Hashtable() noexcept
1662 {
1663 // Getting a bucket index from a node shall not throw because it is used
1664 // in methods (erase, swap...) that shall not throw. Need a complete
1665 // type to check this, so do it in the destructor not at class scope.
1666 static_assert(noexcept(declval<const __hash_code_base_access&>()
1667 ._M_bucket_index(declval<const __node_value_type&>(),
1668 (std::size_t)0)),
1669 "Cache the hash code or qualify your functors involved"
1670 " in hash code and bucket index computation with noexcept");
1671
1672 clear();
1673 _M_deallocate_buckets();
1674 }
1675
1676 template<typename _Key, typename _Value, typename _Alloc,
1677 typename _ExtractKey, typename _Equal,
1678 typename _Hash, typename _RangeHash, typename _Unused,
1679 typename _RehashPolicy, typename _Traits>
1680 void
1681 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1682 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1683 swap(_Hashtable& __x)
1684 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1685 __is_nothrow_swappable<_Equal>>::value)
1686 {
1687 // The only base class with member variables is hash_code_base.
1688 // We define _Hash_code_base::_M_swap because different
1689 // specializations have different members.
1690 this->_M_swap(__x);
1691
1692 std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1693 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1694
1695 // Deal properly with potentially moved instances.
1696 if (this->_M_uses_single_bucket())
1697 {
1698 if (!__x._M_uses_single_bucket())
1699 {
1700 _M_buckets = __x._M_buckets;
1701 __x._M_buckets = &__x._M_single_bucket;
1702 }
1703 }
1704 else if (__x._M_uses_single_bucket())
1705 {
1706 __x._M_buckets = _M_buckets;
1707 _M_buckets = &_M_single_bucket;
1708 }
1709 else
1710 std::swap(_M_buckets, __x._M_buckets);
1711
1712 std::swap(_M_bucket_count, __x._M_bucket_count);
1713 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1714 std::swap(_M_element_count, __x._M_element_count);
1715 std::swap(_M_single_bucket, __x._M_single_bucket);
1716
1717 // Fix buckets containing the _M_before_begin pointers that can't be
1718 // swapped.
1719 _M_update_bbegin();
1720 __x._M_update_bbegin();
1721 }
1722
1723 template<typename _Key, typename _Value, typename _Alloc,
1724 typename _ExtractKey, typename _Equal,
1725 typename _Hash, typename _RangeHash, typename _Unused,
1726 typename _RehashPolicy, typename _Traits>
1727 auto
1728 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1729 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1730 find(const key_type& __k)
1731 -> iterator
1732 {
1733 if (size() <= __small_size_threshold())
1734 {
1735 for (auto __it = _M_begin(); __it; __it = __it->_M_next())
1736 if (this->_M_key_equals(__k, *__it))
1737 return iterator(__it);
1738 return end();
1739 }
1740
1741 __hash_code __code = this->_M_hash_code(__k);
1742 std::size_t __bkt = _M_bucket_index(__code);
1743 return iterator(_M_find_node(__bkt, __k, __code));
1744 }
1745
1746 template<typename _Key, typename _Value, typename _Alloc,
1747 typename _ExtractKey, typename _Equal,
1748 typename _Hash, typename _RangeHash, typename _Unused,
1749 typename _RehashPolicy, typename _Traits>
1750 auto
1751 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1752 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1753 find(const key_type& __k) const
1754 -> const_iterator
1755 {
1756 if (size() <= __small_size_threshold())
1757 {
1758 for (auto __it = _M_begin(); __it; __it = __it->_M_next())
1759 if (this->_M_key_equals(__k, *__it))
1760 return const_iterator(__it);
1761 return end();
1762 }
1763
1764 __hash_code __code = this->_M_hash_code(__k);
1765 std::size_t __bkt = _M_bucket_index(__code);
1766 return const_iterator(_M_find_node(__bkt, __k, __code));
1767 }
1768
1769 #if __cplusplus > 201703L
1770 template<typename _Key, typename _Value, typename _Alloc,
1771 typename _ExtractKey, typename _Equal,
1772 typename _Hash, typename _RangeHash, typename _Unused,
1773 typename _RehashPolicy, typename _Traits>
1774 template<typename _Kt, typename, typename>
1775 auto
1776 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1777 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1778 _M_find_tr(const _Kt& __k)
1779 -> iterator
1780 {
1781 if (size() <= __small_size_threshold())
1782 {
1783 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1784 if (this->_M_key_equals_tr(__k, *__n))
1785 return iterator(__n);
1786 return end();
1787 }
1788
1789 __hash_code __code = this->_M_hash_code_tr(__k);
1790 std::size_t __bkt = _M_bucket_index(__code);
1791 return iterator(_M_find_node_tr(__bkt, __k, __code));
1792 }
1793
1794 template<typename _Key, typename _Value, typename _Alloc,
1795 typename _ExtractKey, typename _Equal,
1796 typename _Hash, typename _RangeHash, typename _Unused,
1797 typename _RehashPolicy, typename _Traits>
1798 template<typename _Kt, typename, typename>
1799 auto
1800 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1801 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1802 _M_find_tr(const _Kt& __k) const
1803 -> const_iterator
1804 {
1805 if (size() <= __small_size_threshold())
1806 {
1807 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1808 if (this->_M_key_equals_tr(__k, *__n))
1809 return const_iterator(__n);
1810 return end();
1811 }
1812
1813 __hash_code __code = this->_M_hash_code_tr(__k);
1814 std::size_t __bkt = _M_bucket_index(__code);
1815 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1816 }
1817 #endif
1818
1819 template<typename _Key, typename _Value, typename _Alloc,
1820 typename _ExtractKey, typename _Equal,
1821 typename _Hash, typename _RangeHash, typename _Unused,
1822 typename _RehashPolicy, typename _Traits>
1823 auto
1824 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1825 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1826 count(const key_type& __k) const
1827 -> size_type
1828 {
1829 auto __it = find(__k);
1830 if (!__it._M_cur)
1831 return 0;
1832
1833 if (__unique_keys::value)
1834 return 1;
1835
1836 size_type __result = 1;
1837 for (auto __ref = __it++;
1838 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1839 ++__it)
1840 ++__result;
1841
1842 return __result;
1843 }
1844
1845 #if __cplusplus > 201703L
1846 template<typename _Key, typename _Value, typename _Alloc,
1847 typename _ExtractKey, typename _Equal,
1848 typename _Hash, typename _RangeHash, typename _Unused,
1849 typename _RehashPolicy, typename _Traits>
1850 template<typename _Kt, typename, typename>
1851 auto
1852 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1853 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1854 _M_count_tr(const _Kt& __k) const
1855 -> size_type
1856 {
1857 if (size() <= __small_size_threshold())
1858 {
1859 size_type __result = 0;
1860 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1861 {
1862 if (this->_M_key_equals_tr(__k, *__n))
1863 {
1864 ++__result;
1865 continue;
1866 }
1867
1868 if (__result)
1869 break;
1870 }
1871
1872 return __result;
1873 }
1874
1875 __hash_code __code = this->_M_hash_code_tr(__k);
1876 std::size_t __bkt = _M_bucket_index(__code);
1877 auto __n = _M_find_node_tr(__bkt, __k, __code);
1878 if (!__n)
1879 return 0;
1880
1881 iterator __it(__n);
1882 size_type __result = 1;
1883 for (++__it;
1884 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1885 ++__it)
1886 ++__result;
1887
1888 return __result;
1889 }
1890 #endif
1891
1892 template<typename _Key, typename _Value, typename _Alloc,
1893 typename _ExtractKey, typename _Equal,
1894 typename _Hash, typename _RangeHash, typename _Unused,
1895 typename _RehashPolicy, typename _Traits>
1896 auto
1897 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1898 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1899 equal_range(const key_type& __k)
1900 -> pair<iterator, iterator>
1901 {
1902 auto __ite = find(__k);
1903 if (!__ite._M_cur)
1904 return { __ite, __ite };
1905
1906 auto __beg = __ite++;
1907 if (__unique_keys::value)
1908 return { __beg, __ite };
1909
1910 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1911 ++__ite;
1912
1913 return { __beg, __ite };
1914 }
1915
1916 template<typename _Key, typename _Value, typename _Alloc,
1917 typename _ExtractKey, typename _Equal,
1918 typename _Hash, typename _RangeHash, typename _Unused,
1919 typename _RehashPolicy, typename _Traits>
1920 auto
1921 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1922 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1923 equal_range(const key_type& __k) const
1924 -> pair<const_iterator, const_iterator>
1925 {
1926 auto __ite = find(__k);
1927 if (!__ite._M_cur)
1928 return { __ite, __ite };
1929
1930 auto __beg = __ite++;
1931 if (__unique_keys::value)
1932 return { __beg, __ite };
1933
1934 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1935 ++__ite;
1936
1937 return { __beg, __ite };
1938 }
1939
1940 #if __cplusplus > 201703L
1941 template<typename _Key, typename _Value, typename _Alloc,
1942 typename _ExtractKey, typename _Equal,
1943 typename _Hash, typename _RangeHash, typename _Unused,
1944 typename _RehashPolicy, typename _Traits>
1945 template<typename _Kt, typename, typename>
1946 auto
1947 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1948 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1949 _M_equal_range_tr(const _Kt& __k)
1950 -> pair<iterator, iterator>
1951 {
1952 if (size() <= __small_size_threshold())
1953 {
1954 __node_ptr __n, __beg = nullptr;
1955 for (__n = _M_begin(); __n; __n = __n->_M_next())
1956 {
1957 if (this->_M_key_equals_tr(__k, *__n))
1958 {
1959 if (!__beg)
1960 __beg = __n;
1961 continue;
1962 }
1963
1964 if (__beg)
1965 break;
1966 }
1967
1968 return { iterator(__beg), iterator(__n) };
1969 }
1970
1971 __hash_code __code = this->_M_hash_code_tr(__k);
1972 std::size_t __bkt = _M_bucket_index(__code);
1973 auto __n = _M_find_node_tr(__bkt, __k, __code);
1974 iterator __ite(__n);
1975 if (!__n)
1976 return { __ite, __ite };
1977
1978 auto __beg = __ite++;
1979 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1980 ++__ite;
1981
1982 return { __beg, __ite };
1983 }
1984
1985 template<typename _Key, typename _Value, typename _Alloc,
1986 typename _ExtractKey, typename _Equal,
1987 typename _Hash, typename _RangeHash, typename _Unused,
1988 typename _RehashPolicy, typename _Traits>
1989 template<typename _Kt, typename, typename>
1990 auto
1991 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1992 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1993 _M_equal_range_tr(const _Kt& __k) const
1994 -> pair<const_iterator, const_iterator>
1995 {
1996 if (size() <= __small_size_threshold())
1997 {
1998 __node_ptr __n, __beg = nullptr;
1999 for (__n = _M_begin(); __n; __n = __n->_M_next())
2000 {
2001 if (this->_M_key_equals_tr(__k, *__n))
2002 {
2003 if (!__beg)
2004 __beg = __n;
2005 continue;
2006 }
2007
2008 if (__beg)
2009 break;
2010 }
2011
2012 return { const_iterator(__beg), const_iterator(__n) };
2013 }
2014
2015 __hash_code __code = this->_M_hash_code_tr(__k);
2016 std::size_t __bkt = _M_bucket_index(__code);
2017 auto __n = _M_find_node_tr(__bkt, __k, __code);
2018 const_iterator __ite(__n);
2019 if (!__n)
2020 return { __ite, __ite };
2021
2022 auto __beg = __ite++;
2023 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2024 ++__ite;
2025
2026 return { __beg, __ite };
2027 }
2028 #endif
2029
2030 // Find the node before the one whose key compares equal to k.
2031 // Return nullptr if no node is found.
2032 template<typename _Key, typename _Value, typename _Alloc,
2033 typename _ExtractKey, typename _Equal,
2034 typename _Hash, typename _RangeHash, typename _Unused,
2035 typename _RehashPolicy, typename _Traits>
2036 auto
2037 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2038 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2039 _M_find_before_node(const key_type& __k)
2040 -> __node_base_ptr
2041 {
2042 __node_base_ptr __prev_p = &_M_before_begin;
2043 if (!__prev_p->_M_nxt)
2044 return nullptr;
2045
2046 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);
2047 __p != nullptr;
2048 __p = __p->_M_next())
2049 {
2050 if (this->_M_key_equals(__k, *__p))
2051 return __prev_p;
2052
2053 __prev_p = __p;
2054 }
2055
2056 return nullptr;
2057 }
2058
2059 // Find the node before the one whose key compares equal to k in the bucket
2060 // bkt. Return nullptr if no node is found.
2061 template<typename _Key, typename _Value, typename _Alloc,
2062 typename _ExtractKey, typename _Equal,
2063 typename _Hash, typename _RangeHash, typename _Unused,
2064 typename _RehashPolicy, typename _Traits>
2065 auto
2066 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2067 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2068 _M_find_before_node(size_type __bkt, const key_type& __k,
2069 __hash_code __code) const
2070 -> __node_base_ptr
2071 {
2072 __node_base_ptr __prev_p = _M_buckets[__bkt];
2073 if (!__prev_p)
2074 return nullptr;
2075
2076 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2077 __p = __p->_M_next())
2078 {
2079 if (this->_M_equals(__k, __code, *__p))
2080 return __prev_p;
2081
2082 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
2083 break;
2084 __prev_p = __p;
2085 }
2086
2087 return nullptr;
2088 }
2089
2090 template<typename _Key, typename _Value, typename _Alloc,
2091 typename _ExtractKey, typename _Equal,
2092 typename _Hash, typename _RangeHash, typename _Unused,
2093 typename _RehashPolicy, typename _Traits>
2094 template<typename _Kt>
2095 auto
2096 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2097 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2098 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
2099 __hash_code __code) const
2100 -> __node_base_ptr
2101 {
2102 __node_base_ptr __prev_p = _M_buckets[__bkt];
2103 if (!__prev_p)
2104 return nullptr;
2105
2106 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2107 __p = __p->_M_next())
2108 {
2109 if (this->_M_equals_tr(__k, __code, *__p))
2110 return __prev_p;
2111
2112 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
2113 break;
2114 __prev_p = __p;
2115 }
2116
2117 return nullptr;
2118 }
2119
2120 template<typename _Key, typename _Value, typename _Alloc,
2121 typename _ExtractKey, typename _Equal,
2122 typename _Hash, typename _RangeHash, typename _Unused,
2123 typename _RehashPolicy, typename _Traits>
2124 auto
2125 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2126 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2127 _M_get_previous_node(size_type __bkt, __node_ptr __n)
2128 -> __node_base_ptr
2129 {
2130 __node_base_ptr __prev_n = _M_buckets[__bkt];
2131 while (__prev_n->_M_nxt != __n)
2132 __prev_n = __prev_n->_M_nxt;
2133 return __prev_n;
2134 }
2135
2136 template<typename _Key, typename _Value, typename _Alloc,
2137 typename _ExtractKey, typename _Equal,
2138 typename _Hash, typename _RangeHash, typename _Unused,
2139 typename _RehashPolicy, typename _Traits>
2140 template<typename... _Args>
2141 auto
2142 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2143 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2144 _M_emplace(true_type /* __uks */, _Args&&... __args)
2145 -> pair<iterator, bool>
2146 {
2147 // First build the node to get access to the hash code
2148 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2149 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2150 const size_type __size = size();
2151 if (__size <= __small_size_threshold())
2152 {
2153 for (auto __it = _M_begin(); __it; __it = __it->_M_next())
2154 if (this->_M_key_equals(__k, *__it))
2155 // There is already an equivalent node, no insertion
2156 return { iterator(__it), false };
2157 }
2158
2159 __hash_code __code = this->_M_hash_code(__k);
2160 size_type __bkt = _M_bucket_index(__code);
2161 if (__size > __small_size_threshold())
2162 if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
2163 // There is already an equivalent node, no insertion
2164 return { iterator(__p), false };
2165
2166 // Insert the node
2167 auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2168 __node._M_node = nullptr;
2169 return { __pos, true };
2170 }
2171
2172 template<typename _Key, typename _Value, typename _Alloc,
2173 typename _ExtractKey, typename _Equal,
2174 typename _Hash, typename _RangeHash, typename _Unused,
2175 typename _RehashPolicy, typename _Traits>
2176 template<typename... _Args>
2177 auto
2178 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2179 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2180 _M_emplace(const_iterator __hint, false_type /* __uks */,
2181 _Args&&... __args)
2182 -> iterator
2183 {
2184 // First build the node to get its hash code.
2185 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2186 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2187
2188 auto __res = this->_M_compute_hash_code(__hint._M_cur, __k);
2189 auto __pos
2190 = _M_insert_multi_node(__res.first, __res.second, __node._M_node);
2191 __node._M_node = nullptr;
2192 return __pos;
2193 }
2194
2195 template<typename _Key, typename _Value, typename _Alloc,
2196 typename _ExtractKey, typename _Equal,
2197 typename _Hash, typename _RangeHash, typename _Unused,
2198 typename _RehashPolicy, typename _Traits>
2199 auto
2200 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2201 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2202 _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const
2203 -> pair<__node_ptr, __hash_code>
2204 {
2205 if (size() <= __small_size_threshold())
2206 {
2207 if (__hint)
2208 {
2209 for (auto __it = __hint; __it; __it = __it->_M_next())
2210 if (this->_M_key_equals(__k, *__it))
2211 return { __it, this->_M_hash_code(*__it) };
2212 }
2213
2214 for (auto __it = _M_begin(); __it != __hint; __it = __it->_M_next())
2215 if (this->_M_key_equals(__k, *__it))
2216 return { __it, this->_M_hash_code(*__it) };
2217
2218 __hint = nullptr;
2219 }
2220
2221 return { __hint, this->_M_hash_code(__k) };
2222 }
2223
2224 template<typename _Key, typename _Value, typename _Alloc,
2225 typename _ExtractKey, typename _Equal,
2226 typename _Hash, typename _RangeHash, typename _Unused,
2227 typename _RehashPolicy, typename _Traits>
2228 auto
2229 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2230 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2231 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2232 __node_ptr __node, size_type __n_elt)
2233 -> iterator
2234 {
2235 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2236 std::pair<bool, std::size_t> __do_rehash
2237 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2238 __n_elt);
2239
2240 if (__do_rehash.first)
2241 {
2242 _M_rehash(__do_rehash.second, true_type{});
2243 __bkt = _M_bucket_index(__code);
2244 }
2245
2246 __rehash_guard._M_guarded_obj = nullptr;
2247 this->_M_store_code(*__node, __code);
2248
2249 // Always insert at the beginning of the bucket.
2250 _M_insert_bucket_begin(__bkt, __node);
2251 ++_M_element_count;
2252 return iterator(__node);
2253 }
2254
2255 template<typename _Key, typename _Value, typename _Alloc,
2256 typename _ExtractKey, typename _Equal,
2257 typename _Hash, typename _RangeHash, typename _Unused,
2258 typename _RehashPolicy, typename _Traits>
2259 auto
2260 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2261 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2262 _M_insert_multi_node(__node_ptr __hint,
2263 __hash_code __code, __node_ptr __node)
2264 -> iterator
2265 {
2266 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2267 std::pair<bool, std::size_t> __do_rehash
2268 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2269
2270 if (__do_rehash.first)
2271 _M_rehash(__do_rehash.second, false_type{});
2272
2273 __rehash_guard._M_guarded_obj = nullptr;
2274 this->_M_store_code(*__node, __code);
2275 const key_type& __k = _ExtractKey{}(__node->_M_v());
2276 size_type __bkt = _M_bucket_index(__code);
2277
2278 // Find the node before an equivalent one or use hint if it exists and
2279 // if it is equivalent.
2280 __node_base_ptr __prev
2281 = __builtin_expect(__hint != nullptr, false)
2282 && this->_M_equals(__k, __code, *__hint)
2283 ? __hint
2284 : _M_find_before_node(__bkt, __k, __code);
2285
2286 if (__prev)
2287 {
2288 // Insert after the node before the equivalent one.
2289 __node->_M_nxt = __prev->_M_nxt;
2290 __prev->_M_nxt = __node;
2291 if (__builtin_expect(__prev == __hint, false))
2292 // hint might be the last bucket node, in this case we need to
2293 // update next bucket.
2294 if (__node->_M_nxt
2295 && !this->_M_equals(__k, __code, *__node->_M_next()))
2296 {
2297 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2298 if (__next_bkt != __bkt)
2299 _M_buckets[__next_bkt] = __node;
2300 }
2301 }
2302 else
2303 // The inserted node has no equivalent in the hashtable. We must
2304 // insert the new node at the beginning of the bucket to preserve
2305 // equivalent elements' relative positions.
2306 _M_insert_bucket_begin(__bkt, __node);
2307 ++_M_element_count;
2308 return iterator(__node);
2309 }
2310
2311 // Insert v if no element with its key is already present.
2312 template<typename _Key, typename _Value, typename _Alloc,
2313 typename _ExtractKey, typename _Equal,
2314 typename _Hash, typename _RangeHash, typename _Unused,
2315 typename _RehashPolicy, typename _Traits>
2316 template<typename _Kt, typename _Arg, typename _NodeGenerator>
2317 auto
2318 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2319 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2320 _M_insert_unique(_Kt&& __k, _Arg&& __v,
2321 const _NodeGenerator& __node_gen)
2322 -> pair<iterator, bool>
2323 {
2324 const size_type __size = size();
2325 if (__size <= __small_size_threshold())
2326 for (auto __it = _M_begin(); __it; __it = __it->_M_next())
2327 if (this->_M_key_equals_tr(__k, *__it))
2328 return { iterator(__it), false };
2329
2330 __hash_code __code = this->_M_hash_code_tr(__k);
2331 size_type __bkt = _M_bucket_index(__code);
2332
2333 if (__size > __small_size_threshold())
2334 if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
2335 return { iterator(__node), false };
2336
2337 _Scoped_node __node {
2338 __node_builder_t::_S_build(std::forward<_Kt>(__k),
2339 std::forward<_Arg>(__v),
2340 __node_gen),
2341 this
2342 };
2343 auto __pos
2344 = _M_insert_unique_node(__bkt, __code, __node._M_node);
2345 __node._M_node = nullptr;
2346 return { __pos, true };
2347 }
2348
2349 // Insert v unconditionally.
2350 template<typename _Key, typename _Value, typename _Alloc,
2351 typename _ExtractKey, typename _Equal,
2352 typename _Hash, typename _RangeHash, typename _Unused,
2353 typename _RehashPolicy, typename _Traits>
2354 template<typename _Arg, typename _NodeGenerator>
2355 auto
2356 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2357 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2358 _M_insert(const_iterator __hint, _Arg&& __v,
2359 const _NodeGenerator& __node_gen,
2360 false_type /* __uks */)
2361 -> iterator
2362 {
2363 // First allocate new node so that we don't do anything if it throws.
2364 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2365
2366 // Second compute the hash code so that we don't rehash if it throws.
2367 auto __res = this->_M_compute_hash_code(
2368 __hint._M_cur, _ExtractKey{}(__node._M_node->_M_v()));
2369
2370 auto __pos
2371 = _M_insert_multi_node(__res.first, __res.second, __node._M_node);
2372 __node._M_node = nullptr;
2373 return __pos;
2374 }
2375
2376 template<typename _Key, typename _Value, typename _Alloc,
2377 typename _ExtractKey, typename _Equal,
2378 typename _Hash, typename _RangeHash, typename _Unused,
2379 typename _RehashPolicy, typename _Traits>
2380 auto
2381 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2382 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2383 erase(const_iterator __it)
2384 -> iterator
2385 {
2386 __node_ptr __n = __it._M_cur;
2387 std::size_t __bkt = _M_bucket_index(*__n);
2388
2389 // Look for previous node to unlink it from the erased one, this
2390 // is why we need buckets to contain the before begin to make
2391 // this search fast.
2392 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2393 return _M_erase(__bkt, __prev_n, __n);
2394 }
2395
2396 template<typename _Key, typename _Value, typename _Alloc,
2397 typename _ExtractKey, typename _Equal,
2398 typename _Hash, typename _RangeHash, typename _Unused,
2399 typename _RehashPolicy, typename _Traits>
2400 auto
2401 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2402 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2403 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2404 -> iterator
2405 {
2406 if (__prev_n == _M_buckets[__bkt])
2407 _M_remove_bucket_begin(__bkt, __n->_M_next(),
2408 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2409 else if (__n->_M_nxt)
2410 {
2411 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2412 if (__next_bkt != __bkt)
2413 _M_buckets[__next_bkt] = __prev_n;
2414 }
2415
2416 __prev_n->_M_nxt = __n->_M_nxt;
2417 iterator __result(__n->_M_next());
2418 this->_M_deallocate_node(__n);
2419 --_M_element_count;
2420
2421 return __result;
2422 }
2423
2424 template<typename _Key, typename _Value, typename _Alloc,
2425 typename _ExtractKey, typename _Equal,
2426 typename _Hash, typename _RangeHash, typename _Unused,
2427 typename _RehashPolicy, typename _Traits>
2428 auto
2429 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2430 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2431 _M_erase(true_type /* __uks */, const key_type& __k)
2432 -> size_type
2433 {
2434 __node_base_ptr __prev_n;
2435 __node_ptr __n;
2436 std::size_t __bkt;
2437 if (size() <= __small_size_threshold())
2438 {
2439 __prev_n = _M_find_before_node(__k);
2440 if (!__prev_n)
2441 return 0;
2442
2443 // We found a matching node, erase it.
2444 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2445 __bkt = _M_bucket_index(*__n);
2446 }
2447 else
2448 {
2449 __hash_code __code = this->_M_hash_code(__k);
2450 __bkt = _M_bucket_index(__code);
2451
2452 // Look for the node before the first matching node.
2453 __prev_n = _M_find_before_node(__bkt, __k, __code);
2454 if (!__prev_n)
2455 return 0;
2456
2457 // We found a matching node, erase it.
2458 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2459 }
2460
2461 _M_erase(__bkt, __prev_n, __n);
2462 return 1;
2463 }
2464
2465 template<typename _Key, typename _Value, typename _Alloc,
2466 typename _ExtractKey, typename _Equal,
2467 typename _Hash, typename _RangeHash, typename _Unused,
2468 typename _RehashPolicy, typename _Traits>
2469 auto
2470 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2471 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2472 _M_erase(false_type /* __uks */, const key_type& __k)
2473 -> size_type
2474 {
2475 std::size_t __bkt;
2476 __node_base_ptr __prev_n;
2477 __node_ptr __n;
2478 if (size() <= __small_size_threshold())
2479 {
2480 __prev_n = _M_find_before_node(__k);
2481 if (!__prev_n)
2482 return 0;
2483
2484 // We found a matching node, erase it.
2485 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2486 __bkt = _M_bucket_index(*__n);
2487 }
2488 else
2489 {
2490 __hash_code __code = this->_M_hash_code(__k);
2491 __bkt = _M_bucket_index(__code);
2492
2493 // Look for the node before the first matching node.
2494 __prev_n = _M_find_before_node(__bkt, __k, __code);
2495 if (!__prev_n)
2496 return 0;
2497
2498 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2499 }
2500
2501 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2502 // 526. Is it undefined if a function in the standard changes
2503 // in parameters?
2504 // We use one loop to find all matching nodes and another to deallocate
2505 // them so that the key stays valid during the first loop. It might be
2506 // invalidated indirectly when destroying nodes.
2507 __node_ptr __n_last = __n->_M_next();
2508 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2509 __n_last = __n_last->_M_next();
2510
2511 std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2512
2513 // Deallocate nodes.
2514 size_type __result = 0;
2515 do
2516 {
2517 __node_ptr __p = __n->_M_next();
2518 this->_M_deallocate_node(__n);
2519 __n = __p;
2520 ++__result;
2521 }
2522 while (__n != __n_last);
2523
2524 _M_element_count -= __result;
2525 if (__prev_n == _M_buckets[__bkt])
2526 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2527 else if (__n_last_bkt != __bkt)
2528 _M_buckets[__n_last_bkt] = __prev_n;
2529 __prev_n->_M_nxt = __n_last;
2530 return __result;
2531 }
2532
2533 template<typename _Key, typename _Value, typename _Alloc,
2534 typename _ExtractKey, typename _Equal,
2535 typename _Hash, typename _RangeHash, typename _Unused,
2536 typename _RehashPolicy, typename _Traits>
2537 auto
2538 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2539 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2540 erase(const_iterator __first, const_iterator __last)
2541 -> iterator
2542 {
2543 __node_ptr __n = __first._M_cur;
2544 __node_ptr __last_n = __last._M_cur;
2545 if (__n == __last_n)
2546 return iterator(__n);
2547
2548 std::size_t __bkt = _M_bucket_index(*__n);
2549
2550 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2551 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2552 std::size_t __n_bkt = __bkt;
2553 for (;;)
2554 {
2555 do
2556 {
2557 __node_ptr __tmp = __n;
2558 __n = __n->_M_next();
2559 this->_M_deallocate_node(__tmp);
2560 --_M_element_count;
2561 if (!__n)
2562 break;
2563 __n_bkt = _M_bucket_index(*__n);
2564 }
2565 while (__n != __last_n && __n_bkt == __bkt);
2566 if (__is_bucket_begin)
2567 _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2568 if (__n == __last_n)
2569 break;
2570 __is_bucket_begin = true;
2571 __bkt = __n_bkt;
2572 }
2573
2574 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2575 _M_buckets[__n_bkt] = __prev_n;
2576 __prev_n->_M_nxt = __n;
2577 return iterator(__n);
2578 }
2579
2580 template<typename _Key, typename _Value, typename _Alloc,
2581 typename _ExtractKey, typename _Equal,
2582 typename _Hash, typename _RangeHash, typename _Unused,
2583 typename _RehashPolicy, typename _Traits>
2584 void
2585 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2586 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2587 clear() noexcept
2588 {
2589 this->_M_deallocate_nodes(_M_begin());
2590 __builtin_memset(_M_buckets, 0,
2591 _M_bucket_count * sizeof(__node_base_ptr));
2592 _M_element_count = 0;
2593 _M_before_begin._M_nxt = nullptr;
2594 }
2595
2596 template<typename _Key, typename _Value, typename _Alloc,
2597 typename _ExtractKey, typename _Equal,
2598 typename _Hash, typename _RangeHash, typename _Unused,
2599 typename _RehashPolicy, typename _Traits>
2600 void
2601 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2602 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2603 rehash(size_type __bkt_count)
2604 {
2605 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2606 __bkt_count
2607 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2608 __bkt_count);
2609 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2610
2611 if (__bkt_count != _M_bucket_count)
2612 {
2613 _M_rehash(__bkt_count, __unique_keys{});
2614 __rehash_guard._M_guarded_obj = nullptr;
2615 }
2616 }
2617
2618 // Rehash when there is no equivalent elements.
2619 template<typename _Key, typename _Value, typename _Alloc,
2620 typename _ExtractKey, typename _Equal,
2621 typename _Hash, typename _RangeHash, typename _Unused,
2622 typename _RehashPolicy, typename _Traits>
2623 void
2624 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2625 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2626 _M_rehash(size_type __bkt_count, true_type /* __uks */)
2627 {
2628 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2629 __node_ptr __p = _M_begin();
2630 _M_before_begin._M_nxt = nullptr;
2631 std::size_t __bbegin_bkt = 0;
2632 while (__p)
2633 {
2634 __node_ptr __next = __p->_M_next();
2635 std::size_t __bkt
2636 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2637 if (!__new_buckets[__bkt])
2638 {
2639 __p->_M_nxt = _M_before_begin._M_nxt;
2640 _M_before_begin._M_nxt = __p;
2641 __new_buckets[__bkt] = &_M_before_begin;
2642 if (__p->_M_nxt)
2643 __new_buckets[__bbegin_bkt] = __p;
2644 __bbegin_bkt = __bkt;
2645 }
2646 else
2647 {
2648 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2649 __new_buckets[__bkt]->_M_nxt = __p;
2650 }
2651
2652 __p = __next;
2653 }
2654
2655 _M_deallocate_buckets();
2656 _M_bucket_count = __bkt_count;
2657 _M_buckets = __new_buckets;
2658 }
2659
2660 // Rehash when there can be equivalent elements, preserve their relative
2661 // order.
2662 template<typename _Key, typename _Value, typename _Alloc,
2663 typename _ExtractKey, typename _Equal,
2664 typename _Hash, typename _RangeHash, typename _Unused,
2665 typename _RehashPolicy, typename _Traits>
2666 void
2667 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2668 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2669 _M_rehash(size_type __bkt_count, false_type /* __uks */)
2670 {
2671 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2672 __node_ptr __p = _M_begin();
2673 _M_before_begin._M_nxt = nullptr;
2674 std::size_t __bbegin_bkt = 0;
2675 std::size_t __prev_bkt = 0;
2676 __node_ptr __prev_p = nullptr;
2677 bool __check_bucket = false;
2678
2679 while (__p)
2680 {
2681 __node_ptr __next = __p->_M_next();
2682 std::size_t __bkt
2683 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2684
2685 if (__prev_p && __prev_bkt == __bkt)
2686 {
2687 // Previous insert was already in this bucket, we insert after
2688 // the previously inserted one to preserve equivalent elements
2689 // relative order.
2690 __p->_M_nxt = __prev_p->_M_nxt;
2691 __prev_p->_M_nxt = __p;
2692
2693 // Inserting after a node in a bucket require to check that we
2694 // haven't change the bucket last node, in this case next
2695 // bucket containing its before begin node must be updated. We
2696 // schedule a check as soon as we move out of the sequence of
2697 // equivalent nodes to limit the number of checks.
2698 __check_bucket = true;
2699 }
2700 else
2701 {
2702 if (__check_bucket)
2703 {
2704 // Check if we shall update the next bucket because of
2705 // insertions into __prev_bkt bucket.
2706 if (__prev_p->_M_nxt)
2707 {
2708 std::size_t __next_bkt
2709 = __hash_code_base::_M_bucket_index(
2710 *__prev_p->_M_next(), __bkt_count);
2711 if (__next_bkt != __prev_bkt)
2712 __new_buckets[__next_bkt] = __prev_p;
2713 }
2714 __check_bucket = false;
2715 }
2716
2717 if (!__new_buckets[__bkt])
2718 {
2719 __p->_M_nxt = _M_before_begin._M_nxt;
2720 _M_before_begin._M_nxt = __p;
2721 __new_buckets[__bkt] = &_M_before_begin;
2722 if (__p->_M_nxt)
2723 __new_buckets[__bbegin_bkt] = __p;
2724 __bbegin_bkt = __bkt;
2725 }
2726 else
2727 {
2728 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2729 __new_buckets[__bkt]->_M_nxt = __p;
2730 }
2731 }
2732 __prev_p = __p;
2733 __prev_bkt = __bkt;
2734 __p = __next;
2735 }
2736
2737 if (__check_bucket && __prev_p->_M_nxt)
2738 {
2739 std::size_t __next_bkt
2740 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2741 __bkt_count);
2742 if (__next_bkt != __prev_bkt)
2743 __new_buckets[__next_bkt] = __prev_p;
2744 }
2745
2746 _M_deallocate_buckets();
2747 _M_bucket_count = __bkt_count;
2748 _M_buckets = __new_buckets;
2749 }
2750
2751 #if __cplusplus > 201402L
2752 template<typename, typename, typename> class _Hash_merge_helper { };
2753 #endif // C++17
2754
2755 #if __cpp_deduction_guides >= 201606
2756 // Used to constrain deduction guides
2757 template<typename _Hash>
2758 using _RequireNotAllocatorOrIntegral
2759 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2760 #endif
2761
2762 /// @endcond
2763 _GLIBCXX_END_NAMESPACE_VERSION
2764 } // namespace std
2765
2766 #endif // _HASHTABLE_H