1 // hashtable.h header -*- C++ -*-
3 // Copyright (C) 2007-2021 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
31 #define _HASHTABLE_H 1
33 #pragma GCC system_header
35 #include <bits/hashtable_policy.h>
36 #if __cplusplus > 201402L
37 # include <bits/node_handle.h>
40 namespace std
_GLIBCXX_VISIBILITY(default)
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
44 template<typename _Tp
, typename _Hash
>
46 = __not_
<__and_
<// Do not cache for fast hasher.
47 __is_fast_hash
<_Hash
>,
48 // Mandatory to have erase not throwing.
49 __is_nothrow_invocable
<const _Hash
&, const _Tp
&>>>;
52 * Primary class template _Hashtable.
54 * @ingroup hashtable-detail
56 * @tparam _Value CopyConstructible type.
58 * @tparam _Key CopyConstructible type.
60 * @tparam _Alloc An allocator type
61 * ([lib.allocator.requirements]) whose _Alloc::value_type is
62 * _Value. As a conforming extension, we allow for
63 * _Alloc::value_type != _Value.
65 * @tparam _ExtractKey Function object that takes an object of type
66 * _Value and returns a value of type _Key.
68 * @tparam _Equal Function object that takes two objects of type k
69 * and returns a bool-like value that is true if the two objects
70 * are considered equal.
72 * @tparam _Hash The hash function. A unary function object with
73 * argument type _Key and result type size_t. Return values should
74 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
76 * @tparam _RangeHash The range-hashing function (in the terminology of
77 * Tavori and Dreizin). A binary function object whose argument
78 * types and result type are all size_t. Given arguments r and N,
79 * the return value is in the range [0, N).
81 * @tparam _Unused Not used.
83 * @tparam _RehashPolicy Policy class with three members, all of
84 * which govern the bucket count. _M_next_bkt(n) returns a bucket
85 * count no smaller than n. _M_bkt_for_elements(n) returns a
86 * bucket count appropriate for an element count of n.
87 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
88 * current bucket count is n_bkt and the current element count is
89 * n_elt, we need to increase the bucket count for n_ins insertions.
90 * If so, returns make_pair(true, n), where n is the new bucket count. If
91 * not, returns make_pair(false, <anything>)
93 * @tparam _Traits Compile-time class with three boolean
94 * std::integral_constant members: __cache_hash_code, __constant_iterators,
97 * Each _Hashtable data structure has:
99 * - _Bucket[] _M_buckets
100 * - _Hash_node_base _M_before_begin
101 * - size_type _M_bucket_count
102 * - size_type _M_element_count
104 * with _Bucket being _Hash_node_base* and _Hash_node containing:
106 * - _Hash_node* _M_next
108 * - size_t _M_hash_code if cache_hash_code is true
110 * In terms of Standard containers the hashtable is like the aggregation of:
112 * - std::forward_list<_Node> containing the elements
113 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
115 * The non-empty buckets contain the node before the first node in the
116 * bucket. This design makes it possible to implement something like a
117 * std::forward_list::insert_after on container insertion and
118 * std::forward_list::erase_after on container erase
119 * calls. _M_before_begin is equivalent to
120 * std::forward_list::before_begin. Empty buckets contain
121 * nullptr. Note that one of the non-empty buckets contains
122 * &_M_before_begin which is not a dereferenceable node so the
123 * node pointer in a bucket shall never be dereferenced, only its
126 * Walking through a bucket's nodes requires a check on the hash code to
127 * see if each node is still in the bucket. Such a design assumes a
128 * quite efficient hash functor and is one of the reasons it is
129 * highly advisable to set __cache_hash_code to true.
131 * The container iterators are simply built from nodes. This way
132 * incrementing the iterator is perfectly efficient independent of
133 * how many empty buckets there are in the container.
135 * On insert we compute the element's hash code and use it to find the
136 * bucket index. If the element must be inserted in an empty bucket
137 * we add it at the beginning of the singly linked list and make the
138 * bucket point to _M_before_begin. The bucket that used to point to
139 * _M_before_begin, if any, is updated to point to its new before
142 * On erase, the simple iterator design requires using the hash
143 * functor to get the index of the bucket to update. For this
144 * reason, when __cache_hash_code is set to false the hash functor must
145 * not throw and this is enforced by a static assertion.
147 * Functionality is implemented by decomposition into base classes,
148 * where the derived _Hashtable class is used in _Map_base,
149 * _Insert, _Rehash_base, and _Equality base classes to access the
150 * "this" pointer. _Hashtable_base is used in the base classes as a
151 * non-recursive, fully-completed-type so that detailed nested type
152 * information, such as iterator type and node type, can be
153 * used. This is similar to the "Curiously Recurring Template
154 * Pattern" (CRTP) technique, but uses a reconstructed, not
155 * explicitly passed, template pattern.
157 * Base class templates are:
158 * - __detail::_Hashtable_base
159 * - __detail::_Map_base
160 * - __detail::_Insert
161 * - __detail::_Rehash_base
162 * - __detail::_Equality
164 template<typename _Key
, typename _Value
, typename _Alloc
,
165 typename _ExtractKey
, typename _Equal
,
166 typename _Hash
, typename _RangeHash
, typename _Unused
,
167 typename _RehashPolicy
, typename _Traits
>
169 : public __detail::_Hashtable_base
<_Key
, _Value
, _ExtractKey
, _Equal
,
170 _Hash
, _RangeHash
, _Unused
, _Traits
>,
171 public __detail::_Map_base
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
172 _Hash
, _RangeHash
, _Unused
,
173 _RehashPolicy
, _Traits
>,
174 public __detail::_Insert
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
175 _Hash
, _RangeHash
, _Unused
,
176 _RehashPolicy
, _Traits
>,
177 public __detail::_Rehash_base
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
178 _Hash
, _RangeHash
, _Unused
,
179 _RehashPolicy
, _Traits
>,
180 public __detail::_Equality
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
181 _Hash
, _RangeHash
, _Unused
,
182 _RehashPolicy
, _Traits
>,
183 private __detail::_Hashtable_alloc
<
184 __alloc_rebind
<_Alloc
,
185 __detail::_Hash_node
<_Value
,
186 _Traits::__hash_cached::value
>>>
188 static_assert(is_same
<typename remove_cv
<_Value
>::type
, _Value
>::value
,
189 "unordered container must have a non-const, non-volatile value_type");
190 #if __cplusplus > 201703L || defined __STRICT_ANSI__
191 static_assert(is_same
<typename
_Alloc::value_type
, _Value
>{},
192 "unordered container must have the same value_type as its allocator");
195 using __traits_type
= _Traits
;
196 using __hash_cached
= typename
__traits_type::__hash_cached
;
197 using __constant_iterators
= typename
__traits_type::__constant_iterators
;
198 using __node_type
= __detail::_Hash_node
<_Value
, __hash_cached::value
>;
199 using __node_alloc_type
= __alloc_rebind
<_Alloc
, __node_type
>;
201 using __hashtable_alloc
= __detail::_Hashtable_alloc
<__node_alloc_type
>;
203 using __node_value_type
=
204 __detail::_Hash_node_value
<_Value
, __hash_cached::value
>;
205 using __node_ptr
= typename
__hashtable_alloc::__node_ptr
;
206 using __value_alloc_traits
=
207 typename
__hashtable_alloc::__value_alloc_traits
;
208 using __node_alloc_traits
=
209 typename
__hashtable_alloc::__node_alloc_traits
;
210 using __node_base
= typename
__hashtable_alloc::__node_base
;
211 using __node_base_ptr
= typename
__hashtable_alloc::__node_base_ptr
;
212 using __buckets_ptr
= typename
__hashtable_alloc::__buckets_ptr
;
214 using __insert_base
= __detail::_Insert
<_Key
, _Value
, _Alloc
, _ExtractKey
,
217 _RehashPolicy
, _Traits
>;
220 typedef _Key key_type
;
221 typedef _Value value_type
;
222 typedef _Alloc allocator_type
;
223 typedef _Equal key_equal
;
225 // mapped_type, if present, comes from _Map_base.
226 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
227 typedef typename
__value_alloc_traits::pointer pointer
;
228 typedef typename
__value_alloc_traits::const_pointer const_pointer
;
229 typedef value_type
& reference
;
230 typedef const value_type
& const_reference
;
232 using iterator
= typename
__insert_base::iterator
;
234 using const_iterator
= typename
__insert_base::const_iterator
;
236 using local_iterator
= __detail::_Local_iterator
<key_type
, _Value
,
237 _ExtractKey
, _Hash
, _RangeHash
, _Unused
,
238 __constant_iterators::value
,
239 __hash_cached::value
>;
241 using const_local_iterator
= __detail::_Local_const_iterator
<
243 _ExtractKey
, _Hash
, _RangeHash
, _Unused
,
244 __constant_iterators::value
, __hash_cached::value
>;
247 using __rehash_type
= _RehashPolicy
;
248 using __rehash_state
= typename
__rehash_type::_State
;
250 using __unique_keys
= typename
__traits_type::__unique_keys
;
252 using __hashtable_base
= __detail::
253 _Hashtable_base
<_Key
, _Value
, _ExtractKey
,
254 _Equal
, _Hash
, _RangeHash
, _Unused
, _Traits
>;
256 using __hash_code_base
= typename
__hashtable_base::__hash_code_base
;
257 using __hash_code
= typename
__hashtable_base::__hash_code
;
258 using __ireturn_type
= typename
__insert_base::__ireturn_type
;
260 using __map_base
= __detail::_Map_base
<_Key
, _Value
, _Alloc
, _ExtractKey
,
261 _Equal
, _Hash
, _RangeHash
, _Unused
,
262 _RehashPolicy
, _Traits
>;
264 using __rehash_base
= __detail::_Rehash_base
<_Key
, _Value
, _Alloc
,
266 _Hash
, _RangeHash
, _Unused
,
267 _RehashPolicy
, _Traits
>;
269 using __eq_base
= __detail::_Equality
<_Key
, _Value
, _Alloc
, _ExtractKey
,
270 _Equal
, _Hash
, _RangeHash
, _Unused
,
271 _RehashPolicy
, _Traits
>;
273 using __reuse_or_alloc_node_gen_t
=
274 __detail::_ReuseOrAllocNode
<__node_alloc_type
>;
275 using __alloc_node_gen_t
=
276 __detail::_AllocNode
<__node_alloc_type
>;
278 // Simple RAII type for managing a node containing an element
281 // Take ownership of a node with a constructed element.
282 _Scoped_node(__node_ptr __n
, __hashtable_alloc
* __h
)
283 : _M_h(__h
), _M_node(__n
) { }
285 // Allocate a node and construct an element within it.
286 template<typename
... _Args
>
287 _Scoped_node(__hashtable_alloc
* __h
, _Args
&&... __args
)
289 _M_node(__h
->_M_allocate_node(std::forward
<_Args
>(__args
)...))
292 // Destroy element and deallocate node.
293 ~_Scoped_node() { if (_M_node
) _M_h
->_M_deallocate_node(_M_node
); };
295 _Scoped_node(const _Scoped_node
&) = delete;
296 _Scoped_node
& operator=(const _Scoped_node
&) = delete;
298 __hashtable_alloc
* _M_h
;
302 template<typename _Ht
>
304 typename conditional
<std::is_lvalue_reference
<_Ht
>::value
,
305 const value_type
&, value_type
&&>::type
306 __fwd_value_for(value_type
& __val
) noexcept
307 { return std::move(__val
); }
309 // Compile-time diagnostics.
311 // _Hash_code_base has everything protected, so use this derived type to
313 struct __hash_code_base_access
: __hash_code_base
314 { using __hash_code_base::_M_bucket_index
; };
316 // Getting a bucket index from a node shall not throw because it is used
317 // in methods (erase, swap...) that shall not throw.
318 static_assert(noexcept(declval
<const __hash_code_base_access
&>()
319 ._M_bucket_index(declval
<const __node_value_type
&>(),
321 "Cache the hash code or qualify your functors involved"
322 " in hash code and bucket index computation with noexcept");
324 // To get bucket index we need _RangeHash not to throw.
325 static_assert(is_nothrow_default_constructible
<_RangeHash
>::value
,
326 "Functor used to map hash code to bucket index"
327 " must be nothrow default constructible");
328 static_assert(noexcept(
329 std::declval
<const _RangeHash
&>()((std::size_t)0, (std::size_t)0)),
330 "Functor used to map hash code to bucket index must be"
333 // To compute bucket index we also need _ExtratKey not to throw.
334 static_assert(is_nothrow_default_constructible
<_ExtractKey
>::value
,
335 "_ExtractKey must be nothrow default constructible");
336 static_assert(noexcept(
337 std::declval
<const _ExtractKey
&>()(std::declval
<_Value
>())),
338 "_ExtractKey functor must be noexcept invocable");
340 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
341 typename _ExtractKeya
, typename _Equala
,
342 typename _Hasha
, typename _RangeHasha
, typename _Unuseda
,
343 typename _RehashPolicya
, typename _Traitsa
,
345 friend struct __detail::_Map_base
;
347 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
348 typename _ExtractKeya
, typename _Equala
,
349 typename _Hasha
, typename _RangeHasha
, typename _Unuseda
,
350 typename _RehashPolicya
, typename _Traitsa
>
351 friend struct __detail::_Insert_base
;
353 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
354 typename _ExtractKeya
, typename _Equala
,
355 typename _Hasha
, typename _RangeHasha
, typename _Unuseda
,
356 typename _RehashPolicya
, typename _Traitsa
,
357 bool _Constant_iteratorsa
>
358 friend struct __detail::_Insert
;
360 template<typename _Keya
, typename _Valuea
, typename _Alloca
,
361 typename _ExtractKeya
, typename _Equala
,
362 typename _Hasha
, typename _RangeHasha
, typename _Unuseda
,
363 typename _RehashPolicya
, typename _Traitsa
,
365 friend struct __detail::_Equality
;
368 using size_type
= typename
__hashtable_base::size_type
;
369 using difference_type
= typename
__hashtable_base::difference_type
;
371 #if __cplusplus > 201402L
372 using node_type
= _Node_handle
<_Key
, _Value
, __node_alloc_type
>;
373 using insert_return_type
= _Node_insert_return
<iterator
, node_type
>;
377 __buckets_ptr _M_buckets
= &_M_single_bucket
;
378 size_type _M_bucket_count
= 1;
379 __node_base _M_before_begin
;
380 size_type _M_element_count
= 0;
381 _RehashPolicy _M_rehash_policy
;
383 // A single bucket used when only need for 1 bucket. Especially
384 // interesting in move semantic to leave hashtable with only 1 bucket
385 // which is not allocated so that we can have those operations noexcept
387 // Note that we can't leave hashtable with 0 bucket without adding
388 // numerous checks in the code to avoid 0 modulus.
389 __node_base_ptr _M_single_bucket
= nullptr;
395 _M_buckets
[_M_bucket_index(*_M_begin())] = &_M_before_begin
;
399 _M_update_bbegin(__node_ptr __n
)
401 _M_before_begin
._M_nxt
= __n
;
406 _M_uses_single_bucket(__buckets_ptr __bkts
) const
407 { return __builtin_expect(__bkts
== &_M_single_bucket
, false); }
410 _M_uses_single_bucket() const
411 { return _M_uses_single_bucket(_M_buckets
); }
414 _M_base_alloc() { return *this; }
417 _M_allocate_buckets(size_type __bkt_count
)
419 if (__builtin_expect(__bkt_count
== 1, false))
421 _M_single_bucket
= nullptr;
422 return &_M_single_bucket
;
425 return __hashtable_alloc::_M_allocate_buckets(__bkt_count
);
429 _M_deallocate_buckets(__buckets_ptr __bkts
, size_type __bkt_count
)
431 if (_M_uses_single_bucket(__bkts
))
434 __hashtable_alloc::_M_deallocate_buckets(__bkts
, __bkt_count
);
438 _M_deallocate_buckets()
439 { _M_deallocate_buckets(_M_buckets
, _M_bucket_count
); }
441 // Gets bucket begin, deals with the fact that non-empty buckets contain
442 // their before begin node.
444 _M_bucket_begin(size_type __bkt
) const;
448 { return static_cast<__node_ptr
>(_M_before_begin
._M_nxt
); }
450 // Assign *this using another _Hashtable instance. Whether elements
451 // are copied or moved depends on the _Ht reference.
452 template<typename _Ht
>
454 _M_assign_elements(_Ht
&&);
456 template<typename _Ht
, typename _NodeGenerator
>
458 _M_assign(_Ht
&&, const _NodeGenerator
&);
461 _M_move_assign(_Hashtable
&&, true_type
);
464 _M_move_assign(_Hashtable
&&, false_type
);
469 _Hashtable(const _Hash
& __h
, const _Equal
& __eq
,
470 const allocator_type
& __a
)
471 : __hashtable_base(__h
, __eq
),
472 __hashtable_alloc(__node_alloc_type(__a
))
475 _Hashtable(_Hashtable
&& __ht
, __node_alloc_type
&& __a
,
476 true_type
/* alloc always equal */)
477 noexcept(std::is_nothrow_copy_constructible
<_Hash
>::value
&&
478 std::is_nothrow_copy_constructible
<_Equal
>::value
);
480 _Hashtable(_Hashtable
&&, __node_alloc_type
&&,
481 false_type
/* alloc always equal */);
483 template<typename _InputIterator
>
484 _Hashtable(_InputIterator __first
, _InputIterator __last
,
485 size_type __bkt_count_hint
,
486 const _Hash
&, const _Equal
&, const allocator_type
&,
489 template<typename _InputIterator
>
490 _Hashtable(_InputIterator __first
, _InputIterator __last
,
491 size_type __bkt_count_hint
,
492 const _Hash
&, const _Equal
&, const allocator_type
&,
496 // Constructor, destructor, assignment, swap
497 _Hashtable() = default;
499 _Hashtable(const _Hashtable
&);
501 _Hashtable(const _Hashtable
&, const allocator_type
&);
504 _Hashtable(size_type __bkt_count_hint
,
505 const _Hash
& __hf
= _Hash(),
506 const key_equal
& __eql
= key_equal(),
507 const allocator_type
& __a
= allocator_type());
509 // Use delegating constructors.
510 _Hashtable(_Hashtable
&& __ht
)
512 _Hashtable(std::declval
<_Hashtable
>(),
513 std::declval
<__node_alloc_type
>(),
515 : _Hashtable(std::move(__ht
), std::move(__ht
._M_node_allocator()),
519 _Hashtable(_Hashtable
&& __ht
, const allocator_type
& __a
)
521 _Hashtable(std::declval
<_Hashtable
>(),
522 std::declval
<__node_alloc_type
>(),
523 typename
__node_alloc_traits::is_always_equal
{})) )
524 : _Hashtable(std::move(__ht
), __node_alloc_type(__a
),
525 typename
__node_alloc_traits::is_always_equal
{})
529 _Hashtable(const allocator_type
& __a
)
530 : __hashtable_alloc(__node_alloc_type(__a
))
533 template<typename _InputIterator
>
534 _Hashtable(_InputIterator __f
, _InputIterator __l
,
535 size_type __bkt_count_hint
= 0,
536 const _Hash
& __hf
= _Hash(),
537 const key_equal
& __eql
= key_equal(),
538 const allocator_type
& __a
= allocator_type())
539 : _Hashtable(__f
, __l
, __bkt_count_hint
, __hf
, __eql
, __a
,
543 _Hashtable(initializer_list
<value_type
> __l
,
544 size_type __bkt_count_hint
= 0,
545 const _Hash
& __hf
= _Hash(),
546 const key_equal
& __eql
= key_equal(),
547 const allocator_type
& __a
= allocator_type())
548 : _Hashtable(__l
.begin(), __l
.end(), __bkt_count_hint
,
549 __hf
, __eql
, __a
, __unique_keys
{})
553 operator=(const _Hashtable
& __ht
);
556 operator=(_Hashtable
&& __ht
)
557 noexcept(__node_alloc_traits::_S_nothrow_move()
558 && is_nothrow_move_assignable
<_Hash
>::value
559 && is_nothrow_move_assignable
<_Equal
>::value
)
561 constexpr bool __move_storage
=
562 __node_alloc_traits::_S_propagate_on_move_assign()
563 || __node_alloc_traits::_S_always_equal();
564 _M_move_assign(std::move(__ht
), __bool_constant
<__move_storage
>());
569 operator=(initializer_list
<value_type
> __l
)
571 __reuse_or_alloc_node_gen_t
__roan(_M_begin(), *this);
572 _M_before_begin
._M_nxt
= nullptr;
575 // We consider that all elements of __l are going to be inserted.
576 auto __l_bkt_count
= _M_rehash_policy
._M_bkt_for_elements(__l
.size());
578 // Do not shrink to keep potential user reservation.
579 if (_M_bucket_count
< __l_bkt_count
)
580 rehash(__l_bkt_count
);
582 this->_M_insert_range(__l
.begin(), __l
.end(), __roan
, __unique_keys
{});
586 ~_Hashtable() noexcept
;
590 noexcept(__and_
<__is_nothrow_swappable
<_Hash
>,
591 __is_nothrow_swappable
<_Equal
>>::value
);
593 // Basic container operations
596 { return iterator(_M_begin()); }
599 begin() const noexcept
600 { return const_iterator(_M_begin()); }
604 { return iterator(nullptr); }
608 { return const_iterator(nullptr); }
611 cbegin() const noexcept
612 { return const_iterator(_M_begin()); }
615 cend() const noexcept
616 { return const_iterator(nullptr); }
619 size() const noexcept
620 { return _M_element_count
; }
622 _GLIBCXX_NODISCARD
bool
623 empty() const noexcept
624 { return size() == 0; }
627 get_allocator() const noexcept
628 { return allocator_type(this->_M_node_allocator()); }
631 max_size() const noexcept
632 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
637 { return this->_M_eq(); }
639 // hash_function, if present, comes from _Hash_code_base.
643 bucket_count() const noexcept
644 { return _M_bucket_count
; }
647 max_bucket_count() const noexcept
648 { return max_size(); }
651 bucket_size(size_type __bkt
) const
652 { return std::distance(begin(__bkt
), end(__bkt
)); }
655 bucket(const key_type
& __k
) const
656 { return _M_bucket_index(this->_M_hash_code(__k
)); }
659 begin(size_type __bkt
)
661 return local_iterator(*this, _M_bucket_begin(__bkt
),
662 __bkt
, _M_bucket_count
);
667 { return local_iterator(*this, nullptr, __bkt
, _M_bucket_count
); }
670 begin(size_type __bkt
) const
672 return const_local_iterator(*this, _M_bucket_begin(__bkt
),
673 __bkt
, _M_bucket_count
);
677 end(size_type __bkt
) const
678 { return const_local_iterator(*this, nullptr, __bkt
, _M_bucket_count
); }
682 cbegin(size_type __bkt
) const
684 return const_local_iterator(*this, _M_bucket_begin(__bkt
),
685 __bkt
, _M_bucket_count
);
689 cend(size_type __bkt
) const
690 { return const_local_iterator(*this, nullptr, __bkt
, _M_bucket_count
); }
693 load_factor() const noexcept
695 return static_cast<float>(size()) / static_cast<float>(bucket_count());
698 // max_load_factor, if present, comes from _Rehash_base.
700 // Generalization of max_load_factor. Extension, not found in
701 // TR1. Only useful if _RehashPolicy is something other than
704 __rehash_policy() const
705 { return _M_rehash_policy
; }
708 __rehash_policy(const _RehashPolicy
& __pol
)
709 { _M_rehash_policy
= __pol
; }
713 find(const key_type
& __k
);
716 find(const key_type
& __k
) const;
719 count(const key_type
& __k
) const;
721 std::pair
<iterator
, iterator
>
722 equal_range(const key_type
& __k
);
724 std::pair
<const_iterator
, const_iterator
>
725 equal_range(const key_type
& __k
) const;
728 // Bucket index computation helpers.
730 _M_bucket_index(const __node_value_type
& __n
) const noexcept
731 { return __hash_code_base::_M_bucket_index(__n
, _M_bucket_count
); }
734 _M_bucket_index(__hash_code __c
) const
735 { return __hash_code_base::_M_bucket_index(__c
, _M_bucket_count
); }
737 // Find and insert helper functions and types
738 // Find the node before the one matching the criteria.
740 _M_find_before_node(size_type
, const key_type
&, __hash_code
) const;
743 _M_find_node(size_type __bkt
, const key_type
& __key
,
744 __hash_code __c
) const
746 __node_base_ptr __before_n
= _M_find_before_node(__bkt
, __key
, __c
);
748 return static_cast<__node_ptr
>(__before_n
->_M_nxt
);
752 // Insert a node at the beginning of a bucket.
754 _M_insert_bucket_begin(size_type
, __node_ptr
);
756 // Remove the bucket first node
758 _M_remove_bucket_begin(size_type __bkt
, __node_ptr __next_n
,
759 size_type __next_bkt
);
761 // Get the node before __n in the bucket __bkt
763 _M_get_previous_node(size_type __bkt
, __node_ptr __n
);
765 // Insert node __n with hash code __code, in bucket __bkt if no
766 // rehash (assumes no element with same key already present).
767 // Takes ownership of __n if insertion succeeds, throws otherwise.
769 _M_insert_unique_node(size_type __bkt
, __hash_code
,
770 __node_ptr __n
, size_type __n_elt
= 1);
772 // Insert node __n with key __k and hash code __code.
773 // Takes ownership of __n if insertion succeeds, throws otherwise.
775 _M_insert_multi_node(__node_ptr __hint
,
776 __hash_code __code
, __node_ptr __n
);
778 template<typename
... _Args
>
779 std::pair
<iterator
, bool>
780 _M_emplace(true_type __uks
, _Args
&&... __args
);
782 template<typename
... _Args
>
784 _M_emplace(false_type __uks
, _Args
&&... __args
)
785 { return _M_emplace(cend(), __uks
, std::forward
<_Args
>(__args
)...); }
787 // Emplace with hint, useless when keys are unique.
788 template<typename
... _Args
>
790 _M_emplace(const_iterator
, true_type __uks
, _Args
&&... __args
)
791 { return _M_emplace(__uks
, std::forward
<_Args
>(__args
)...).first
; }
793 template<typename
... _Args
>
795 _M_emplace(const_iterator
, false_type __uks
, _Args
&&... __args
);
797 template<typename _Arg
, typename _NodeGenerator
>
798 std::pair
<iterator
, bool>
799 _M_insert(_Arg
&&, const _NodeGenerator
&, true_type __uks
);
801 template<typename _Arg
, typename _NodeGenerator
>
803 _M_insert(_Arg
&& __arg
, const _NodeGenerator
& __node_gen
,
806 return _M_insert(cend(), std::forward
<_Arg
>(__arg
), __node_gen
,
810 // Insert with hint, not used when keys are unique.
811 template<typename _Arg
, typename _NodeGenerator
>
813 _M_insert(const_iterator
, _Arg
&& __arg
,
814 const _NodeGenerator
& __node_gen
, true_type __uks
)
817 _M_insert(std::forward
<_Arg
>(__arg
), __node_gen
, __uks
).first
;
820 // Insert with hint when keys are not unique.
821 template<typename _Arg
, typename _NodeGenerator
>
823 _M_insert(const_iterator
, _Arg
&&,
824 const _NodeGenerator
&, false_type __uks
);
827 _M_erase(true_type __uks
, const key_type
&);
830 _M_erase(false_type __uks
, const key_type
&);
833 _M_erase(size_type __bkt
, __node_base_ptr __prev_n
, __node_ptr __n
);
837 template<typename
... _Args
>
839 emplace(_Args
&&... __args
)
840 { return _M_emplace(__unique_keys
{}, std::forward
<_Args
>(__args
)...); }
842 template<typename
... _Args
>
844 emplace_hint(const_iterator __hint
, _Args
&&... __args
)
846 return _M_emplace(__hint
, __unique_keys
{},
847 std::forward
<_Args
>(__args
)...);
850 // Insert member functions via inheritance.
854 erase(const_iterator
);
859 { return erase(const_iterator(__it
)); }
862 erase(const key_type
& __k
)
863 { return _M_erase(__unique_keys
{}, __k
); }
866 erase(const_iterator
, const_iterator
);
871 // Set number of buckets keeping it appropriate for container's number
873 void rehash(size_type __bkt_count
);
876 // reserve, if present, comes from _Rehash_base.
878 #if __cplusplus > 201402L
879 /// Re-insert an extracted node into a container with unique keys.
881 _M_reinsert_node(node_type
&& __nh
)
883 insert_return_type __ret
;
885 __ret
.position
= end();
888 __glibcxx_assert(get_allocator() == __nh
.get_allocator());
890 const key_type
& __k
= __nh
._M_key();
891 __hash_code __code
= this->_M_hash_code(__k
);
892 size_type __bkt
= _M_bucket_index(__code
);
893 if (__node_ptr __n
= _M_find_node(__bkt
, __k
, __code
))
895 __ret
.node
= std::move(__nh
);
896 __ret
.position
= iterator(__n
);
897 __ret
.inserted
= false;
902 = _M_insert_unique_node(__bkt
, __code
, __nh
._M_ptr
);
903 __nh
._M_ptr
= nullptr;
904 __ret
.inserted
= true;
910 /// Re-insert an extracted node into a container with equivalent keys.
912 _M_reinsert_node_multi(const_iterator __hint
, node_type
&& __nh
)
917 __glibcxx_assert(get_allocator() == __nh
.get_allocator());
919 const key_type
& __k
= __nh
._M_key();
920 auto __code
= this->_M_hash_code(__k
);
922 = _M_insert_multi_node(__hint
._M_cur
, __code
, __nh
._M_ptr
);
923 __nh
._M_ptr
= nullptr;
929 _M_extract_node(size_t __bkt
, __node_base_ptr __prev_n
)
931 __node_ptr __n
= static_cast<__node_ptr
>(__prev_n
->_M_nxt
);
932 if (__prev_n
== _M_buckets
[__bkt
])
933 _M_remove_bucket_begin(__bkt
, __n
->_M_next(),
934 __n
->_M_nxt
? _M_bucket_index(*__n
->_M_next()) : 0);
935 else if (__n
->_M_nxt
)
937 size_type __next_bkt
= _M_bucket_index(*__n
->_M_next());
938 if (__next_bkt
!= __bkt
)
939 _M_buckets
[__next_bkt
] = __prev_n
;
942 __prev_n
->_M_nxt
= __n
->_M_nxt
;
943 __n
->_M_nxt
= nullptr;
945 return { __n
, this->_M_node_allocator() };
951 extract(const_iterator __pos
)
953 size_t __bkt
= _M_bucket_index(*__pos
._M_cur
);
954 return _M_extract_node(__bkt
,
955 _M_get_previous_node(__bkt
, __pos
._M_cur
));
960 extract(const _Key
& __k
)
963 __hash_code __code
= this->_M_hash_code(__k
);
964 std::size_t __bkt
= _M_bucket_index(__code
);
965 if (__node_base_ptr __prev_node
= _M_find_before_node(__bkt
, __k
, __code
))
966 __nh
= _M_extract_node(__bkt
, __prev_node
);
970 /// Merge from a compatible container into one with unique keys.
971 template<typename _Compatible_Hashtable
>
973 _M_merge_unique(_Compatible_Hashtable
& __src
) noexcept
975 static_assert(is_same_v
<typename
_Compatible_Hashtable::node_type
,
976 node_type
>, "Node types are compatible");
977 __glibcxx_assert(get_allocator() == __src
.get_allocator());
979 auto __n_elt
= __src
.size();
980 for (auto __i
= __src
.begin(), __end
= __src
.end(); __i
!= __end
;)
983 const key_type
& __k
= _ExtractKey
{}(*__pos
);
984 __hash_code __code
= this->_M_hash_code(__k
);
985 size_type __bkt
= _M_bucket_index(__code
);
986 if (_M_find_node(__bkt
, __k
, __code
) == nullptr)
988 auto __nh
= __src
.extract(__pos
);
989 _M_insert_unique_node(__bkt
, __code
, __nh
._M_ptr
, __n_elt
);
990 __nh
._M_ptr
= nullptr;
993 else if (__n_elt
!= 1)
998 /// Merge from a compatible container into one with equivalent keys.
999 template<typename _Compatible_Hashtable
>
1001 _M_merge_multi(_Compatible_Hashtable
& __src
) noexcept
1003 static_assert(is_same_v
<typename
_Compatible_Hashtable::node_type
,
1004 node_type
>, "Node types are compatible");
1005 __glibcxx_assert(get_allocator() == __src
.get_allocator());
1007 this->reserve(size() + __src
.size());
1008 for (auto __i
= __src
.begin(), __end
= __src
.end(); __i
!= __end
;)
1009 _M_reinsert_node_multi(cend(), __src
.extract(__i
++));
1014 // Helper rehash method used when keys are unique.
1015 void _M_rehash_aux(size_type __bkt_count
, true_type __uks
);
1017 // Helper rehash method used when keys can be non-unique.
1018 void _M_rehash_aux(size_type __bkt_count
, false_type __uks
);
1020 // Unconditionally change size of bucket array to n, restore
1021 // hash policy state to __state on exception.
1022 void _M_rehash(size_type __bkt_count
, const __rehash_state
& __state
);
1026 // Definitions of class template _Hashtable's out-of-line member functions.
1027 template<typename _Key
, typename _Value
, typename _Alloc
,
1028 typename _ExtractKey
, typename _Equal
,
1029 typename _Hash
, typename _RangeHash
, typename _Unused
,
1030 typename _RehashPolicy
, typename _Traits
>
1032 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1033 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1034 _M_bucket_begin(size_type __bkt
) const
1037 __node_base_ptr __n
= _M_buckets
[__bkt
];
1038 return __n
? static_cast<__node_ptr
>(__n
->_M_nxt
) : nullptr;
1041 template<typename _Key
, typename _Value
, typename _Alloc
,
1042 typename _ExtractKey
, typename _Equal
,
1043 typename _Hash
, typename _RangeHash
, typename _Unused
,
1044 typename _RehashPolicy
, typename _Traits
>
1045 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1046 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1047 _Hashtable(size_type __bkt_count_hint
,
1048 const _Hash
& __h
, const _Equal
& __eq
, const allocator_type
& __a
)
1049 : _Hashtable(__h
, __eq
, __a
)
1051 auto __bkt_count
= _M_rehash_policy
._M_next_bkt(__bkt_count_hint
);
1052 if (__bkt_count
> _M_bucket_count
)
1054 _M_buckets
= _M_allocate_buckets(__bkt_count
);
1055 _M_bucket_count
= __bkt_count
;
1059 template<typename _Key
, typename _Value
, typename _Alloc
,
1060 typename _ExtractKey
, typename _Equal
,
1061 typename _Hash
, typename _RangeHash
, typename _Unused
,
1062 typename _RehashPolicy
, typename _Traits
>
1063 template<typename _InputIterator
>
1064 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1065 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1066 _Hashtable(_InputIterator __f
, _InputIterator __l
,
1067 size_type __bkt_count_hint
,
1068 const _Hash
& __h
, const _Equal
& __eq
,
1069 const allocator_type
& __a
, true_type
/* __uks */)
1070 : _Hashtable(__bkt_count_hint
, __h
, __eq
, __a
)
1072 for (; __f
!= __l
; ++__f
)
1076 template<typename _Key
, typename _Value
, typename _Alloc
,
1077 typename _ExtractKey
, typename _Equal
,
1078 typename _Hash
, typename _RangeHash
, typename _Unused
,
1079 typename _RehashPolicy
, typename _Traits
>
1080 template<typename _InputIterator
>
1081 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1082 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1083 _Hashtable(_InputIterator __f
, _InputIterator __l
,
1084 size_type __bkt_count_hint
,
1085 const _Hash
& __h
, const _Equal
& __eq
,
1086 const allocator_type
& __a
, false_type
/* __uks */)
1087 : _Hashtable(__h
, __eq
, __a
)
1089 auto __nb_elems
= __detail::__distance_fw(__f
, __l
);
1091 _M_rehash_policy
._M_next_bkt(
1092 std::max(_M_rehash_policy
._M_bkt_for_elements(__nb_elems
),
1095 if (__bkt_count
> _M_bucket_count
)
1097 _M_buckets
= _M_allocate_buckets(__bkt_count
);
1098 _M_bucket_count
= __bkt_count
;
1101 for (; __f
!= __l
; ++__f
)
1105 template<typename _Key
, typename _Value
, typename _Alloc
,
1106 typename _ExtractKey
, typename _Equal
,
1107 typename _Hash
, typename _RangeHash
, typename _Unused
,
1108 typename _RehashPolicy
, typename _Traits
>
1110 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1111 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1112 operator=(const _Hashtable
& __ht
)
1118 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1120 auto& __this_alloc
= this->_M_node_allocator();
1121 auto& __that_alloc
= __ht
._M_node_allocator();
1122 if (!__node_alloc_traits::_S_always_equal()
1123 && __this_alloc
!= __that_alloc
)
1125 // Replacement allocator cannot free existing storage.
1126 this->_M_deallocate_nodes(_M_begin());
1127 _M_before_begin
._M_nxt
= nullptr;
1128 _M_deallocate_buckets();
1129 _M_buckets
= nullptr;
1130 std::__alloc_on_copy(__this_alloc
, __that_alloc
);
1131 __hashtable_base::operator=(__ht
);
1132 _M_bucket_count
= __ht
._M_bucket_count
;
1133 _M_element_count
= __ht
._M_element_count
;
1134 _M_rehash_policy
= __ht
._M_rehash_policy
;
1135 __alloc_node_gen_t
__alloc_node_gen(*this);
1138 _M_assign(__ht
, __alloc_node_gen
);
1142 // _M_assign took care of deallocating all memory. Now we
1143 // must make sure this instance remains in a usable state.
1145 __throw_exception_again
;
1149 std::__alloc_on_copy(__this_alloc
, __that_alloc
);
1152 // Reuse allocated buckets and nodes.
1153 _M_assign_elements(__ht
);
1157 template<typename _Key
, typename _Value
, typename _Alloc
,
1158 typename _ExtractKey
, typename _Equal
,
1159 typename _Hash
, typename _RangeHash
, typename _Unused
,
1160 typename _RehashPolicy
, typename _Traits
>
1161 template<typename _Ht
>
1163 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1164 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1165 _M_assign_elements(_Ht
&& __ht
)
1167 __buckets_ptr __former_buckets
= nullptr;
1168 std::size_t __former_bucket_count
= _M_bucket_count
;
1169 const __rehash_state
& __former_state
= _M_rehash_policy
._M_state();
1171 if (_M_bucket_count
!= __ht
._M_bucket_count
)
1173 __former_buckets
= _M_buckets
;
1174 _M_buckets
= _M_allocate_buckets(__ht
._M_bucket_count
);
1175 _M_bucket_count
= __ht
._M_bucket_count
;
1178 __builtin_memset(_M_buckets
, 0,
1179 _M_bucket_count
* sizeof(__node_base_ptr
));
1183 __hashtable_base::operator=(std::forward
<_Ht
>(__ht
));
1184 _M_element_count
= __ht
._M_element_count
;
1185 _M_rehash_policy
= __ht
._M_rehash_policy
;
1186 __reuse_or_alloc_node_gen_t
__roan(_M_begin(), *this);
1187 _M_before_begin
._M_nxt
= nullptr;
1188 _M_assign(std::forward
<_Ht
>(__ht
), __roan
);
1189 if (__former_buckets
)
1190 _M_deallocate_buckets(__former_buckets
, __former_bucket_count
);
1194 if (__former_buckets
)
1196 // Restore previous buckets.
1197 _M_deallocate_buckets();
1198 _M_rehash_policy
._M_reset(__former_state
);
1199 _M_buckets
= __former_buckets
;
1200 _M_bucket_count
= __former_bucket_count
;
1202 __builtin_memset(_M_buckets
, 0,
1203 _M_bucket_count
* sizeof(__node_base_ptr
));
1204 __throw_exception_again
;
1208 template<typename _Key
, typename _Value
, typename _Alloc
,
1209 typename _ExtractKey
, typename _Equal
,
1210 typename _Hash
, typename _RangeHash
, typename _Unused
,
1211 typename _RehashPolicy
, typename _Traits
>
1212 template<typename _Ht
, typename _NodeGenerator
>
1214 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1215 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1216 _M_assign(_Ht
&& __ht
, const _NodeGenerator
& __node_gen
)
1218 __buckets_ptr __buckets
= nullptr;
1220 _M_buckets
= __buckets
= _M_allocate_buckets(_M_bucket_count
);
1224 if (!__ht
._M_before_begin
._M_nxt
)
1227 // First deal with the special first node pointed to by
1229 __node_ptr __ht_n
= __ht
._M_begin();
1231 = __node_gen(__fwd_value_for
<_Ht
>(__ht_n
->_M_v()));
1232 this->_M_copy_code(*__this_n
, *__ht_n
);
1233 _M_update_bbegin(__this_n
);
1235 // Then deal with other nodes.
1236 __node_ptr __prev_n
= __this_n
;
1237 for (__ht_n
= __ht_n
->_M_next(); __ht_n
; __ht_n
= __ht_n
->_M_next())
1239 __this_n
= __node_gen(__fwd_value_for
<_Ht
>(__ht_n
->_M_v()));
1240 __prev_n
->_M_nxt
= __this_n
;
1241 this->_M_copy_code(*__this_n
, *__ht_n
);
1242 size_type __bkt
= _M_bucket_index(*__this_n
);
1243 if (!_M_buckets
[__bkt
])
1244 _M_buckets
[__bkt
] = __prev_n
;
1245 __prev_n
= __this_n
;
1252 _M_deallocate_buckets();
1253 __throw_exception_again
;
1257 template<typename _Key
, typename _Value
, typename _Alloc
,
1258 typename _ExtractKey
, typename _Equal
,
1259 typename _Hash
, typename _RangeHash
, typename _Unused
,
1260 typename _RehashPolicy
, typename _Traits
>
1262 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1263 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1266 _M_rehash_policy
._M_reset();
1267 _M_bucket_count
= 1;
1268 _M_single_bucket
= nullptr;
1269 _M_buckets
= &_M_single_bucket
;
1270 _M_before_begin
._M_nxt
= nullptr;
1271 _M_element_count
= 0;
1274 template<typename _Key
, typename _Value
, typename _Alloc
,
1275 typename _ExtractKey
, typename _Equal
,
1276 typename _Hash
, typename _RangeHash
, typename _Unused
,
1277 typename _RehashPolicy
, typename _Traits
>
1279 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1280 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1281 _M_move_assign(_Hashtable
&& __ht
, true_type
)
1283 if (__builtin_expect(std::__addressof(__ht
) == this, false))
1286 this->_M_deallocate_nodes(_M_begin());
1287 _M_deallocate_buckets();
1288 __hashtable_base::operator=(std::move(__ht
));
1289 _M_rehash_policy
= __ht
._M_rehash_policy
;
1290 if (!__ht
._M_uses_single_bucket())
1291 _M_buckets
= __ht
._M_buckets
;
1294 _M_buckets
= &_M_single_bucket
;
1295 _M_single_bucket
= __ht
._M_single_bucket
;
1298 _M_bucket_count
= __ht
._M_bucket_count
;
1299 _M_before_begin
._M_nxt
= __ht
._M_before_begin
._M_nxt
;
1300 _M_element_count
= __ht
._M_element_count
;
1301 std::__alloc_on_move(this->_M_node_allocator(), __ht
._M_node_allocator());
1303 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1308 template<typename _Key
, typename _Value
, typename _Alloc
,
1309 typename _ExtractKey
, typename _Equal
,
1310 typename _Hash
, typename _RangeHash
, typename _Unused
,
1311 typename _RehashPolicy
, typename _Traits
>
1313 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1314 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1315 _M_move_assign(_Hashtable
&& __ht
, false_type
)
1317 if (__ht
._M_node_allocator() == this->_M_node_allocator())
1318 _M_move_assign(std::move(__ht
), true_type
{});
1321 // Can't move memory, move elements then.
1322 _M_assign_elements(std::move(__ht
));
1327 template<typename _Key
, typename _Value
, typename _Alloc
,
1328 typename _ExtractKey
, typename _Equal
,
1329 typename _Hash
, typename _RangeHash
, typename _Unused
,
1330 typename _RehashPolicy
, typename _Traits
>
1331 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1332 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1333 _Hashtable(const _Hashtable
& __ht
)
1334 : __hashtable_base(__ht
),
1336 __rehash_base(__ht
),
1338 __node_alloc_traits::_S_select_on_copy(__ht
._M_node_allocator())),
1339 _M_buckets(nullptr),
1340 _M_bucket_count(__ht
._M_bucket_count
),
1341 _M_element_count(__ht
._M_element_count
),
1342 _M_rehash_policy(__ht
._M_rehash_policy
)
1344 __alloc_node_gen_t
__alloc_node_gen(*this);
1345 _M_assign(__ht
, __alloc_node_gen
);
1348 template<typename _Key
, typename _Value
, typename _Alloc
,
1349 typename _ExtractKey
, typename _Equal
,
1350 typename _Hash
, typename _RangeHash
, typename _Unused
,
1351 typename _RehashPolicy
, typename _Traits
>
1352 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1353 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1354 _Hashtable(_Hashtable
&& __ht
, __node_alloc_type
&& __a
,
1355 true_type
/* alloc always equal */)
1356 noexcept(std::is_nothrow_copy_constructible
<_Hash
>::value
&&
1357 std::is_nothrow_copy_constructible
<_Equal
>::value
)
1358 : __hashtable_base(__ht
),
1360 __rehash_base(__ht
),
1361 __hashtable_alloc(std::move(__a
)),
1362 _M_buckets(__ht
._M_buckets
),
1363 _M_bucket_count(__ht
._M_bucket_count
),
1364 _M_before_begin(__ht
._M_before_begin
._M_nxt
),
1365 _M_element_count(__ht
._M_element_count
),
1366 _M_rehash_policy(__ht
._M_rehash_policy
)
1368 // Update buckets if __ht is using its single bucket.
1369 if (__ht
._M_uses_single_bucket())
1371 _M_buckets
= &_M_single_bucket
;
1372 _M_single_bucket
= __ht
._M_single_bucket
;
1375 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1381 template<typename _Key
, typename _Value
, typename _Alloc
,
1382 typename _ExtractKey
, typename _Equal
,
1383 typename _Hash
, typename _RangeHash
, typename _Unused
,
1384 typename _RehashPolicy
, typename _Traits
>
1385 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1386 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1387 _Hashtable(const _Hashtable
& __ht
, const allocator_type
& __a
)
1388 : __hashtable_base(__ht
),
1390 __rehash_base(__ht
),
1391 __hashtable_alloc(__node_alloc_type(__a
)),
1393 _M_bucket_count(__ht
._M_bucket_count
),
1394 _M_element_count(__ht
._M_element_count
),
1395 _M_rehash_policy(__ht
._M_rehash_policy
)
1397 __alloc_node_gen_t
__alloc_node_gen(*this);
1398 _M_assign(__ht
, __alloc_node_gen
);
1401 template<typename _Key
, typename _Value
, typename _Alloc
,
1402 typename _ExtractKey
, typename _Equal
,
1403 typename _Hash
, typename _RangeHash
, typename _Unused
,
1404 typename _RehashPolicy
, typename _Traits
>
1405 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1406 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1407 _Hashtable(_Hashtable
&& __ht
, __node_alloc_type
&& __a
,
1408 false_type
/* alloc always equal */)
1409 : __hashtable_base(__ht
),
1411 __rehash_base(__ht
),
1412 __hashtable_alloc(std::move(__a
)),
1413 _M_buckets(nullptr),
1414 _M_bucket_count(__ht
._M_bucket_count
),
1415 _M_element_count(__ht
._M_element_count
),
1416 _M_rehash_policy(__ht
._M_rehash_policy
)
1418 if (__ht
._M_node_allocator() == this->_M_node_allocator())
1420 if (__ht
._M_uses_single_bucket())
1422 _M_buckets
= &_M_single_bucket
;
1423 _M_single_bucket
= __ht
._M_single_bucket
;
1426 _M_buckets
= __ht
._M_buckets
;
1428 // Fix bucket containing the _M_before_begin pointer that can't be
1430 _M_update_bbegin(__ht
._M_begin());
1436 __alloc_node_gen_t
__alloc_gen(*this);
1438 using _Fwd_Ht
= typename
1439 conditional
<__move_if_noexcept_cond
<value_type
>::value
,
1440 const _Hashtable
&, _Hashtable
&&>::type
;
1441 _M_assign(std::forward
<_Fwd_Ht
>(__ht
), __alloc_gen
);
1446 template<typename _Key
, typename _Value
, typename _Alloc
,
1447 typename _ExtractKey
, typename _Equal
,
1448 typename _Hash
, typename _RangeHash
, typename _Unused
,
1449 typename _RehashPolicy
, typename _Traits
>
1450 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1451 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1452 ~_Hashtable() noexcept
1455 _M_deallocate_buckets();
1458 template<typename _Key
, typename _Value
, typename _Alloc
,
1459 typename _ExtractKey
, typename _Equal
,
1460 typename _Hash
, typename _RangeHash
, typename _Unused
,
1461 typename _RehashPolicy
, typename _Traits
>
1463 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1464 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1465 swap(_Hashtable
& __x
)
1466 noexcept(__and_
<__is_nothrow_swappable
<_Hash
>,
1467 __is_nothrow_swappable
<_Equal
>>::value
)
1469 // The only base class with member variables is hash_code_base.
1470 // We define _Hash_code_base::_M_swap because different
1471 // specializations have different members.
1474 std::__alloc_on_swap(this->_M_node_allocator(), __x
._M_node_allocator());
1475 std::swap(_M_rehash_policy
, __x
._M_rehash_policy
);
1477 // Deal properly with potentially moved instances.
1478 if (this->_M_uses_single_bucket())
1480 if (!__x
._M_uses_single_bucket())
1482 _M_buckets
= __x
._M_buckets
;
1483 __x
._M_buckets
= &__x
._M_single_bucket
;
1486 else if (__x
._M_uses_single_bucket())
1488 __x
._M_buckets
= _M_buckets
;
1489 _M_buckets
= &_M_single_bucket
;
1492 std::swap(_M_buckets
, __x
._M_buckets
);
1494 std::swap(_M_bucket_count
, __x
._M_bucket_count
);
1495 std::swap(_M_before_begin
._M_nxt
, __x
._M_before_begin
._M_nxt
);
1496 std::swap(_M_element_count
, __x
._M_element_count
);
1497 std::swap(_M_single_bucket
, __x
._M_single_bucket
);
1499 // Fix buckets containing the _M_before_begin pointers that can't be
1502 __x
._M_update_bbegin();
1505 template<typename _Key
, typename _Value
, typename _Alloc
,
1506 typename _ExtractKey
, typename _Equal
,
1507 typename _Hash
, typename _RangeHash
, typename _Unused
,
1508 typename _RehashPolicy
, typename _Traits
>
1510 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1511 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1512 find(const key_type
& __k
)
1515 __hash_code __code
= this->_M_hash_code(__k
);
1516 std::size_t __bkt
= _M_bucket_index(__code
);
1517 return iterator(_M_find_node(__bkt
, __k
, __code
));
1520 template<typename _Key
, typename _Value
, typename _Alloc
,
1521 typename _ExtractKey
, typename _Equal
,
1522 typename _Hash
, typename _RangeHash
, typename _Unused
,
1523 typename _RehashPolicy
, typename _Traits
>
1525 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1526 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1527 find(const key_type
& __k
) const
1530 __hash_code __code
= this->_M_hash_code(__k
);
1531 std::size_t __bkt
= _M_bucket_index(__code
);
1532 return const_iterator(_M_find_node(__bkt
, __k
, __code
));
1535 template<typename _Key
, typename _Value
, typename _Alloc
,
1536 typename _ExtractKey
, typename _Equal
,
1537 typename _Hash
, typename _RangeHash
, typename _Unused
,
1538 typename _RehashPolicy
, typename _Traits
>
1540 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1541 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1542 count(const key_type
& __k
) const
1545 auto __it
= find(__k
);
1549 if (__unique_keys::value
)
1552 // All equivalent values are next to each other, if we find a
1553 // non-equivalent value after an equivalent one it means that we won't
1554 // find any new equivalent value.
1555 size_type __result
= 1;
1556 for (auto __ref
= __it
++;
1557 __it
._M_cur
&& this->_M_node_equals(*__ref
._M_cur
, *__it
._M_cur
);
1564 template<typename _Key
, typename _Value
, typename _Alloc
,
1565 typename _ExtractKey
, typename _Equal
,
1566 typename _Hash
, typename _RangeHash
, typename _Unused
,
1567 typename _RehashPolicy
, typename _Traits
>
1569 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1570 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1571 equal_range(const key_type
& __k
)
1572 -> pair
<iterator
, iterator
>
1574 auto __ite
= find(__k
);
1576 return { __ite
, __ite
};
1578 auto __beg
= __ite
++;
1579 if (__unique_keys::value
)
1580 return { __beg
, __ite
};
1582 // All equivalent values are next to each other, if we find a
1583 // non-equivalent value after an equivalent one it means that we won't
1584 // find any new equivalent value.
1585 while (__ite
._M_cur
&& this->_M_node_equals(*__beg
._M_cur
, *__ite
._M_cur
))
1588 return { __beg
, __ite
};
1591 template<typename _Key
, typename _Value
, typename _Alloc
,
1592 typename _ExtractKey
, typename _Equal
,
1593 typename _Hash
, typename _RangeHash
, typename _Unused
,
1594 typename _RehashPolicy
, typename _Traits
>
1596 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1597 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1598 equal_range(const key_type
& __k
) const
1599 -> pair
<const_iterator
, const_iterator
>
1601 auto __ite
= find(__k
);
1603 return { __ite
, __ite
};
1605 auto __beg
= __ite
++;
1606 if (__unique_keys::value
)
1607 return { __beg
, __ite
};
1609 // All equivalent values are next to each other, if we find a
1610 // non-equivalent value after an equivalent one it means that we won't
1611 // find any new equivalent value.
1612 while (__ite
._M_cur
&& this->_M_node_equals(*__beg
._M_cur
, *__ite
._M_cur
))
1615 return { __beg
, __ite
};
1618 // Find the node before the one whose key compares equal to k in the bucket
1619 // bkt. Return nullptr if no node is found.
1620 template<typename _Key
, typename _Value
, typename _Alloc
,
1621 typename _ExtractKey
, typename _Equal
,
1622 typename _Hash
, typename _RangeHash
, typename _Unused
,
1623 typename _RehashPolicy
, typename _Traits
>
1625 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1626 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1627 _M_find_before_node(size_type __bkt
, const key_type
& __k
,
1628 __hash_code __code
) const
1631 __node_base_ptr __prev_p
= _M_buckets
[__bkt
];
1635 for (__node_ptr __p
= static_cast<__node_ptr
>(__prev_p
->_M_nxt
);;
1636 __p
= __p
->_M_next())
1638 if (this->_M_equals(__k
, __code
, *__p
))
1641 if (!__p
->_M_nxt
|| _M_bucket_index(*__p
->_M_next()) != __bkt
)
1649 template<typename _Key
, typename _Value
, typename _Alloc
,
1650 typename _ExtractKey
, typename _Equal
,
1651 typename _Hash
, typename _RangeHash
, typename _Unused
,
1652 typename _RehashPolicy
, typename _Traits
>
1654 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1655 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1656 _M_insert_bucket_begin(size_type __bkt
, __node_ptr __node
)
1658 if (_M_buckets
[__bkt
])
1660 // Bucket is not empty, we just need to insert the new node
1661 // after the bucket before begin.
1662 __node
->_M_nxt
= _M_buckets
[__bkt
]->_M_nxt
;
1663 _M_buckets
[__bkt
]->_M_nxt
= __node
;
1667 // The bucket is empty, the new node is inserted at the
1668 // beginning of the singly-linked list and the bucket will
1669 // contain _M_before_begin pointer.
1670 __node
->_M_nxt
= _M_before_begin
._M_nxt
;
1671 _M_before_begin
._M_nxt
= __node
;
1674 // We must update former begin bucket that is pointing to
1676 _M_buckets
[_M_bucket_index(*__node
->_M_next())] = __node
;
1678 _M_buckets
[__bkt
] = &_M_before_begin
;
1682 template<typename _Key
, typename _Value
, typename _Alloc
,
1683 typename _ExtractKey
, typename _Equal
,
1684 typename _Hash
, typename _RangeHash
, typename _Unused
,
1685 typename _RehashPolicy
, typename _Traits
>
1687 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1688 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1689 _M_remove_bucket_begin(size_type __bkt
, __node_ptr __next
,
1690 size_type __next_bkt
)
1692 if (!__next
|| __next_bkt
!= __bkt
)
1694 // Bucket is now empty
1695 // First update next bucket if any
1697 _M_buckets
[__next_bkt
] = _M_buckets
[__bkt
];
1699 // Second update before begin node if necessary
1700 if (&_M_before_begin
== _M_buckets
[__bkt
])
1701 _M_before_begin
._M_nxt
= __next
;
1702 _M_buckets
[__bkt
] = nullptr;
1706 template<typename _Key
, typename _Value
, typename _Alloc
,
1707 typename _ExtractKey
, typename _Equal
,
1708 typename _Hash
, typename _RangeHash
, typename _Unused
,
1709 typename _RehashPolicy
, typename _Traits
>
1711 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1712 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1713 _M_get_previous_node(size_type __bkt
, __node_ptr __n
)
1716 __node_base_ptr __prev_n
= _M_buckets
[__bkt
];
1717 while (__prev_n
->_M_nxt
!= __n
)
1718 __prev_n
= __prev_n
->_M_nxt
;
1722 template<typename _Key
, typename _Value
, typename _Alloc
,
1723 typename _ExtractKey
, typename _Equal
,
1724 typename _Hash
, typename _RangeHash
, typename _Unused
,
1725 typename _RehashPolicy
, typename _Traits
>
1726 template<typename
... _Args
>
1728 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1729 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1730 _M_emplace(true_type
/* __uks */, _Args
&&... __args
)
1731 -> pair
<iterator
, bool>
1733 // First build the node to get access to the hash code
1734 _Scoped_node __node
{ this, std::forward
<_Args
>(__args
)... };
1735 const key_type
& __k
= _ExtractKey
{}(__node
._M_node
->_M_v());
1736 __hash_code __code
= this->_M_hash_code(__k
);
1737 size_type __bkt
= _M_bucket_index(__code
);
1738 if (__node_ptr __p
= _M_find_node(__bkt
, __k
, __code
))
1739 // There is already an equivalent node, no insertion
1740 return std::make_pair(iterator(__p
), false);
1743 auto __pos
= _M_insert_unique_node(__bkt
, __code
, __node
._M_node
);
1744 __node
._M_node
= nullptr;
1745 return { __pos
, true };
1748 template<typename _Key
, typename _Value
, typename _Alloc
,
1749 typename _ExtractKey
, typename _Equal
,
1750 typename _Hash
, typename _RangeHash
, typename _Unused
,
1751 typename _RehashPolicy
, typename _Traits
>
1752 template<typename
... _Args
>
1754 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1755 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1756 _M_emplace(const_iterator __hint
, false_type
/* __uks */,
1760 // First build the node to get its hash code.
1761 _Scoped_node __node
{ this, std::forward
<_Args
>(__args
)... };
1762 const key_type
& __k
= _ExtractKey
{}(__node
._M_node
->_M_v());
1764 __hash_code __code
= this->_M_hash_code(__k
);
1766 = _M_insert_multi_node(__hint
._M_cur
, __code
, __node
._M_node
);
1767 __node
._M_node
= nullptr;
1771 template<typename _Key
, typename _Value
, typename _Alloc
,
1772 typename _ExtractKey
, typename _Equal
,
1773 typename _Hash
, typename _RangeHash
, typename _Unused
,
1774 typename _RehashPolicy
, typename _Traits
>
1776 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1777 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1778 _M_insert_unique_node(size_type __bkt
, __hash_code __code
,
1779 __node_ptr __node
, size_type __n_elt
)
1782 const __rehash_state
& __saved_state
= _M_rehash_policy
._M_state();
1783 std::pair
<bool, std::size_t> __do_rehash
1784 = _M_rehash_policy
._M_need_rehash(_M_bucket_count
, _M_element_count
,
1787 if (__do_rehash
.first
)
1789 _M_rehash(__do_rehash
.second
, __saved_state
);
1790 __bkt
= _M_bucket_index(__code
);
1793 this->_M_store_code(*__node
, __code
);
1795 // Always insert at the beginning of the bucket.
1796 _M_insert_bucket_begin(__bkt
, __node
);
1798 return iterator(__node
);
1801 template<typename _Key
, typename _Value
, typename _Alloc
,
1802 typename _ExtractKey
, typename _Equal
,
1803 typename _Hash
, typename _RangeHash
, typename _Unused
,
1804 typename _RehashPolicy
, typename _Traits
>
1806 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1807 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1808 _M_insert_multi_node(__node_ptr __hint
,
1809 __hash_code __code
, __node_ptr __node
)
1812 const __rehash_state
& __saved_state
= _M_rehash_policy
._M_state();
1813 std::pair
<bool, std::size_t> __do_rehash
1814 = _M_rehash_policy
._M_need_rehash(_M_bucket_count
, _M_element_count
, 1);
1816 if (__do_rehash
.first
)
1817 _M_rehash(__do_rehash
.second
, __saved_state
);
1819 this->_M_store_code(*__node
, __code
);
1820 const key_type
& __k
= _ExtractKey
{}(__node
->_M_v());
1821 size_type __bkt
= _M_bucket_index(__code
);
1823 // Find the node before an equivalent one or use hint if it exists and
1824 // if it is equivalent.
1825 __node_base_ptr __prev
1826 = __builtin_expect(__hint
!= nullptr, false)
1827 && this->_M_equals(__k
, __code
, *__hint
)
1829 : _M_find_before_node(__bkt
, __k
, __code
);
1833 // Insert after the node before the equivalent one.
1834 __node
->_M_nxt
= __prev
->_M_nxt
;
1835 __prev
->_M_nxt
= __node
;
1836 if (__builtin_expect(__prev
== __hint
, false))
1837 // hint might be the last bucket node, in this case we need to
1838 // update next bucket.
1840 && !this->_M_equals(__k
, __code
, *__node
->_M_next()))
1842 size_type __next_bkt
= _M_bucket_index(*__node
->_M_next());
1843 if (__next_bkt
!= __bkt
)
1844 _M_buckets
[__next_bkt
] = __node
;
1848 // The inserted node has no equivalent in the hashtable. We must
1849 // insert the new node at the beginning of the bucket to preserve
1850 // equivalent elements' relative positions.
1851 _M_insert_bucket_begin(__bkt
, __node
);
1853 return iterator(__node
);
1856 // Insert v if no element with its key is already present.
1857 template<typename _Key
, typename _Value
, typename _Alloc
,
1858 typename _ExtractKey
, typename _Equal
,
1859 typename _Hash
, typename _RangeHash
, typename _Unused
,
1860 typename _RehashPolicy
, typename _Traits
>
1861 template<typename _Arg
, typename _NodeGenerator
>
1863 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1864 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1865 _M_insert(_Arg
&& __v
, const _NodeGenerator
& __node_gen
,
1866 true_type
/* __uks */)
1867 -> pair
<iterator
, bool>
1869 const key_type
& __k
= _ExtractKey
{}(__v
);
1870 __hash_code __code
= this->_M_hash_code(__k
);
1871 size_type __bkt
= _M_bucket_index(__code
);
1873 if (__node_ptr __node
= _M_find_node(__bkt
, __k
, __code
))
1874 return { iterator(__node
), false };
1876 _Scoped_node __node
{ __node_gen(std::forward
<_Arg
>(__v
)), this };
1878 = _M_insert_unique_node(__bkt
, __code
, __node
._M_node
);
1879 __node
._M_node
= nullptr;
1880 return { __pos
, true };
1883 // Insert v unconditionally.
1884 template<typename _Key
, typename _Value
, typename _Alloc
,
1885 typename _ExtractKey
, typename _Equal
,
1886 typename _Hash
, typename _RangeHash
, typename _Unused
,
1887 typename _RehashPolicy
, typename _Traits
>
1888 template<typename _Arg
, typename _NodeGenerator
>
1890 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1891 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1892 _M_insert(const_iterator __hint
, _Arg
&& __v
,
1893 const _NodeGenerator
& __node_gen
,
1894 false_type
/* __uks */)
1897 // First compute the hash code so that we don't do anything if it
1899 __hash_code __code
= this->_M_hash_code(_ExtractKey
{}(__v
));
1901 // Second allocate new node so that we don't rehash if it throws.
1902 _Scoped_node __node
{ __node_gen(std::forward
<_Arg
>(__v
)), this };
1904 = _M_insert_multi_node(__hint
._M_cur
, __code
, __node
._M_node
);
1905 __node
._M_node
= nullptr;
1909 template<typename _Key
, typename _Value
, typename _Alloc
,
1910 typename _ExtractKey
, typename _Equal
,
1911 typename _Hash
, typename _RangeHash
, typename _Unused
,
1912 typename _RehashPolicy
, typename _Traits
>
1914 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1915 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1916 erase(const_iterator __it
)
1919 __node_ptr __n
= __it
._M_cur
;
1920 std::size_t __bkt
= _M_bucket_index(*__n
);
1922 // Look for previous node to unlink it from the erased one, this
1923 // is why we need buckets to contain the before begin to make
1924 // this search fast.
1925 __node_base_ptr __prev_n
= _M_get_previous_node(__bkt
, __n
);
1926 return _M_erase(__bkt
, __prev_n
, __n
);
1929 template<typename _Key
, typename _Value
, typename _Alloc
,
1930 typename _ExtractKey
, typename _Equal
,
1931 typename _Hash
, typename _RangeHash
, typename _Unused
,
1932 typename _RehashPolicy
, typename _Traits
>
1934 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1935 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1936 _M_erase(size_type __bkt
, __node_base_ptr __prev_n
, __node_ptr __n
)
1939 if (__prev_n
== _M_buckets
[__bkt
])
1940 _M_remove_bucket_begin(__bkt
, __n
->_M_next(),
1941 __n
->_M_nxt
? _M_bucket_index(*__n
->_M_next()) : 0);
1942 else if (__n
->_M_nxt
)
1944 size_type __next_bkt
= _M_bucket_index(*__n
->_M_next());
1945 if (__next_bkt
!= __bkt
)
1946 _M_buckets
[__next_bkt
] = __prev_n
;
1949 __prev_n
->_M_nxt
= __n
->_M_nxt
;
1950 iterator
__result(__n
->_M_next());
1951 this->_M_deallocate_node(__n
);
1957 template<typename _Key
, typename _Value
, typename _Alloc
,
1958 typename _ExtractKey
, typename _Equal
,
1959 typename _Hash
, typename _RangeHash
, typename _Unused
,
1960 typename _RehashPolicy
, typename _Traits
>
1962 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1963 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1964 _M_erase(true_type
/* __uks */, const key_type
& __k
)
1967 __hash_code __code
= this->_M_hash_code(__k
);
1968 std::size_t __bkt
= _M_bucket_index(__code
);
1970 // Look for the node before the first matching node.
1971 __node_base_ptr __prev_n
= _M_find_before_node(__bkt
, __k
, __code
);
1975 // We found a matching node, erase it.
1976 __node_ptr __n
= static_cast<__node_ptr
>(__prev_n
->_M_nxt
);
1977 _M_erase(__bkt
, __prev_n
, __n
);
1981 template<typename _Key
, typename _Value
, typename _Alloc
,
1982 typename _ExtractKey
, typename _Equal
,
1983 typename _Hash
, typename _RangeHash
, typename _Unused
,
1984 typename _RehashPolicy
, typename _Traits
>
1986 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
1987 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
1988 _M_erase(false_type
/* __uks */, const key_type
& __k
)
1991 __hash_code __code
= this->_M_hash_code(__k
);
1992 std::size_t __bkt
= _M_bucket_index(__code
);
1994 // Look for the node before the first matching node.
1995 __node_base_ptr __prev_n
= _M_find_before_node(__bkt
, __k
, __code
);
1999 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2000 // 526. Is it undefined if a function in the standard changes
2002 // We use one loop to find all matching nodes and another to deallocate
2003 // them so that the key stays valid during the first loop. It might be
2004 // invalidated indirectly when destroying nodes.
2005 __node_ptr __n
= static_cast<__node_ptr
>(__prev_n
->_M_nxt
);
2006 __node_ptr __n_last
= __n
->_M_next();
2007 while (__n_last
&& this->_M_node_equals(*__n
, *__n_last
))
2008 __n_last
= __n_last
->_M_next();
2010 std::size_t __n_last_bkt
= __n_last
? _M_bucket_index(*__n_last
) : __bkt
;
2012 // Deallocate nodes.
2013 size_type __result
= 0;
2016 __node_ptr __p
= __n
->_M_next();
2017 this->_M_deallocate_node(__n
);
2021 while (__n
!= __n_last
);
2023 _M_element_count
-= __result
;
2024 if (__prev_n
== _M_buckets
[__bkt
])
2025 _M_remove_bucket_begin(__bkt
, __n_last
, __n_last_bkt
);
2026 else if (__n_last_bkt
!= __bkt
)
2027 _M_buckets
[__n_last_bkt
] = __prev_n
;
2028 __prev_n
->_M_nxt
= __n_last
;
2032 template<typename _Key
, typename _Value
, typename _Alloc
,
2033 typename _ExtractKey
, typename _Equal
,
2034 typename _Hash
, typename _RangeHash
, typename _Unused
,
2035 typename _RehashPolicy
, typename _Traits
>
2037 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2038 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
2039 erase(const_iterator __first
, const_iterator __last
)
2042 __node_ptr __n
= __first
._M_cur
;
2043 __node_ptr __last_n
= __last
._M_cur
;
2044 if (__n
== __last_n
)
2045 return iterator(__n
);
2047 std::size_t __bkt
= _M_bucket_index(*__n
);
2049 __node_base_ptr __prev_n
= _M_get_previous_node(__bkt
, __n
);
2050 bool __is_bucket_begin
= __n
== _M_bucket_begin(__bkt
);
2051 std::size_t __n_bkt
= __bkt
;
2056 __node_ptr __tmp
= __n
;
2057 __n
= __n
->_M_next();
2058 this->_M_deallocate_node(__tmp
);
2062 __n_bkt
= _M_bucket_index(*__n
);
2064 while (__n
!= __last_n
&& __n_bkt
== __bkt
);
2065 if (__is_bucket_begin
)
2066 _M_remove_bucket_begin(__bkt
, __n
, __n_bkt
);
2067 if (__n
== __last_n
)
2069 __is_bucket_begin
= true;
2073 if (__n
&& (__n_bkt
!= __bkt
|| __is_bucket_begin
))
2074 _M_buckets
[__n_bkt
] = __prev_n
;
2075 __prev_n
->_M_nxt
= __n
;
2076 return iterator(__n
);
2079 template<typename _Key
, typename _Value
, typename _Alloc
,
2080 typename _ExtractKey
, typename _Equal
,
2081 typename _Hash
, typename _RangeHash
, typename _Unused
,
2082 typename _RehashPolicy
, typename _Traits
>
2084 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2085 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
2088 this->_M_deallocate_nodes(_M_begin());
2089 __builtin_memset(_M_buckets
, 0,
2090 _M_bucket_count
* sizeof(__node_base_ptr
));
2091 _M_element_count
= 0;
2092 _M_before_begin
._M_nxt
= nullptr;
2095 template<typename _Key
, typename _Value
, typename _Alloc
,
2096 typename _ExtractKey
, typename _Equal
,
2097 typename _Hash
, typename _RangeHash
, typename _Unused
,
2098 typename _RehashPolicy
, typename _Traits
>
2100 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2101 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
2102 rehash(size_type __bkt_count
)
2104 const __rehash_state
& __saved_state
= _M_rehash_policy
._M_state();
2106 = std::max(_M_rehash_policy
._M_bkt_for_elements(_M_element_count
+ 1),
2108 __bkt_count
= _M_rehash_policy
._M_next_bkt(__bkt_count
);
2110 if (__bkt_count
!= _M_bucket_count
)
2111 _M_rehash(__bkt_count
, __saved_state
);
2113 // No rehash, restore previous state to keep it consistent with
2115 _M_rehash_policy
._M_reset(__saved_state
);
2118 template<typename _Key
, typename _Value
, typename _Alloc
,
2119 typename _ExtractKey
, typename _Equal
,
2120 typename _Hash
, typename _RangeHash
, typename _Unused
,
2121 typename _RehashPolicy
, typename _Traits
>
2123 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2124 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
2125 _M_rehash(size_type __bkt_count
, const __rehash_state
& __state
)
2129 _M_rehash_aux(__bkt_count
, __unique_keys
{});
2133 // A failure here means that buckets allocation failed. We only
2134 // have to restore hash policy previous state.
2135 _M_rehash_policy
._M_reset(__state
);
2136 __throw_exception_again
;
2140 // Rehash when there is no equivalent elements.
2141 template<typename _Key
, typename _Value
, typename _Alloc
,
2142 typename _ExtractKey
, typename _Equal
,
2143 typename _Hash
, typename _RangeHash
, typename _Unused
,
2144 typename _RehashPolicy
, typename _Traits
>
2146 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2147 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
2148 _M_rehash_aux(size_type __bkt_count
, true_type
/* __uks */)
2150 __buckets_ptr __new_buckets
= _M_allocate_buckets(__bkt_count
);
2151 __node_ptr __p
= _M_begin();
2152 _M_before_begin
._M_nxt
= nullptr;
2153 std::size_t __bbegin_bkt
= 0;
2156 __node_ptr __next
= __p
->_M_next();
2158 = __hash_code_base::_M_bucket_index(*__p
, __bkt_count
);
2159 if (!__new_buckets
[__bkt
])
2161 __p
->_M_nxt
= _M_before_begin
._M_nxt
;
2162 _M_before_begin
._M_nxt
= __p
;
2163 __new_buckets
[__bkt
] = &_M_before_begin
;
2165 __new_buckets
[__bbegin_bkt
] = __p
;
2166 __bbegin_bkt
= __bkt
;
2170 __p
->_M_nxt
= __new_buckets
[__bkt
]->_M_nxt
;
2171 __new_buckets
[__bkt
]->_M_nxt
= __p
;
2177 _M_deallocate_buckets();
2178 _M_bucket_count
= __bkt_count
;
2179 _M_buckets
= __new_buckets
;
2182 // Rehash when there can be equivalent elements, preserve their relative
2184 template<typename _Key
, typename _Value
, typename _Alloc
,
2185 typename _ExtractKey
, typename _Equal
,
2186 typename _Hash
, typename _RangeHash
, typename _Unused
,
2187 typename _RehashPolicy
, typename _Traits
>
2189 _Hashtable
<_Key
, _Value
, _Alloc
, _ExtractKey
, _Equal
,
2190 _Hash
, _RangeHash
, _Unused
, _RehashPolicy
, _Traits
>::
2191 _M_rehash_aux(size_type __bkt_count
, false_type
/* __uks */)
2193 __buckets_ptr __new_buckets
= _M_allocate_buckets(__bkt_count
);
2194 __node_ptr __p
= _M_begin();
2195 _M_before_begin
._M_nxt
= nullptr;
2196 std::size_t __bbegin_bkt
= 0;
2197 std::size_t __prev_bkt
= 0;
2198 __node_ptr __prev_p
= nullptr;
2199 bool __check_bucket
= false;
2203 __node_ptr __next
= __p
->_M_next();
2205 = __hash_code_base::_M_bucket_index(*__p
, __bkt_count
);
2207 if (__prev_p
&& __prev_bkt
== __bkt
)
2209 // Previous insert was already in this bucket, we insert after
2210 // the previously inserted one to preserve equivalent elements
2212 __p
->_M_nxt
= __prev_p
->_M_nxt
;
2213 __prev_p
->_M_nxt
= __p
;
2215 // Inserting after a node in a bucket require to check that we
2216 // haven't change the bucket last node, in this case next
2217 // bucket containing its before begin node must be updated. We
2218 // schedule a check as soon as we move out of the sequence of
2219 // equivalent nodes to limit the number of checks.
2220 __check_bucket
= true;
2226 // Check if we shall update the next bucket because of
2227 // insertions into __prev_bkt bucket.
2228 if (__prev_p
->_M_nxt
)
2230 std::size_t __next_bkt
2231 = __hash_code_base::_M_bucket_index(
2232 *__prev_p
->_M_next(), __bkt_count
);
2233 if (__next_bkt
!= __prev_bkt
)
2234 __new_buckets
[__next_bkt
] = __prev_p
;
2236 __check_bucket
= false;
2239 if (!__new_buckets
[__bkt
])
2241 __p
->_M_nxt
= _M_before_begin
._M_nxt
;
2242 _M_before_begin
._M_nxt
= __p
;
2243 __new_buckets
[__bkt
] = &_M_before_begin
;
2245 __new_buckets
[__bbegin_bkt
] = __p
;
2246 __bbegin_bkt
= __bkt
;
2250 __p
->_M_nxt
= __new_buckets
[__bkt
]->_M_nxt
;
2251 __new_buckets
[__bkt
]->_M_nxt
= __p
;
2259 if (__check_bucket
&& __prev_p
->_M_nxt
)
2261 std::size_t __next_bkt
2262 = __hash_code_base::_M_bucket_index(*__prev_p
->_M_next(),
2264 if (__next_bkt
!= __prev_bkt
)
2265 __new_buckets
[__next_bkt
] = __prev_p
;
2268 _M_deallocate_buckets();
2269 _M_bucket_count
= __bkt_count
;
2270 _M_buckets
= __new_buckets
;
2273 #if __cplusplus > 201402L
2274 template<typename
, typename
, typename
> class _Hash_merge_helper
{ };
2277 #if __cpp_deduction_guides >= 201606
2278 // Used to constrain deduction guides
2279 template<typename _Hash
>
2280 using _RequireNotAllocatorOrIntegral
2281 = __enable_if_t
<!__or_
<is_integral
<_Hash
>, __is_allocator
<_Hash
>>::value
>;
2284 _GLIBCXX_END_NAMESPACE_VERSION
2287 #endif // _HASHTABLE_H