libstdc++
bits/hashtable.h
Go to the documentation of this file.
1 // hashtable.h header -*- C++ -*-
2 
3 // Copyright (C) 2007-2021 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/hashtable.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28  */
29 
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/hashtable_policy.h>
37 #if __cplusplus > 201402L
38 # include <bits/node_handle.h>
39 #endif
40 
41 namespace std _GLIBCXX_VISIBILITY(default)
42 {
43 _GLIBCXX_BEGIN_NAMESPACE_VERSION
44 /// @cond undocumented
45 
46  template<typename _Tp, typename _Hash>
47  using __cache_default
48  = __not_<__and_<// Do not cache for fast hasher.
49  __is_fast_hash<_Hash>,
50  // Mandatory to have erase not throwing.
51  __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
52 
53  // Helper to conditionally delete the default constructor.
54  // The _Hash_node_base type is used to distinguish this specialization
55  // from any other potentially-overlapping subobjects of the hashtable.
56  template<typename _Equal, typename _Hash, typename _Allocator>
57  using _Hashtable_enable_default_ctor
58  = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
59  is_default_constructible<_Hash>,
60  is_default_constructible<_Allocator>>{},
61  __detail::_Hash_node_base>;
62 
63  /**
64  * Primary class template _Hashtable.
65  *
66  * @ingroup hashtable-detail
67  *
68  * @tparam _Value CopyConstructible type.
69  *
70  * @tparam _Key CopyConstructible type.
71  *
72  * @tparam _Alloc An allocator type
73  * ([lib.allocator.requirements]) whose _Alloc::value_type is
74  * _Value. As a conforming extension, we allow for
75  * _Alloc::value_type != _Value.
76  *
77  * @tparam _ExtractKey Function object that takes an object of type
78  * _Value and returns a value of type _Key.
79  *
80  * @tparam _Equal Function object that takes two objects of type k
81  * and returns a bool-like value that is true if the two objects
82  * are considered equal.
83  *
84  * @tparam _Hash The hash function. A unary function object with
85  * argument type _Key and result type size_t. Return values should
86  * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
87  *
88  * @tparam _RangeHash The range-hashing function (in the terminology of
89  * Tavori and Dreizin). A binary function object whose argument
90  * types and result type are all size_t. Given arguments r and N,
91  * the return value is in the range [0, N).
92  *
93  * @tparam _Unused Not used.
94  *
95  * @tparam _RehashPolicy Policy class with three members, all of
96  * which govern the bucket count. _M_next_bkt(n) returns a bucket
97  * count no smaller than n. _M_bkt_for_elements(n) returns a
98  * bucket count appropriate for an element count of n.
99  * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
100  * current bucket count is n_bkt and the current element count is
101  * n_elt, we need to increase the bucket count for n_ins insertions.
102  * If so, returns make_pair(true, n), where n is the new bucket count. If
103  * not, returns make_pair(false, <anything>)
104  *
105  * @tparam _Traits Compile-time class with three boolean
106  * std::integral_constant members: __cache_hash_code, __constant_iterators,
107  * __unique_keys.
108  *
109  * Each _Hashtable data structure has:
110  *
111  * - _Bucket[] _M_buckets
112  * - _Hash_node_base _M_before_begin
113  * - size_type _M_bucket_count
114  * - size_type _M_element_count
115  *
116  * with _Bucket being _Hash_node_base* and _Hash_node containing:
117  *
118  * - _Hash_node* _M_next
119  * - Tp _M_value
120  * - size_t _M_hash_code if cache_hash_code is true
121  *
122  * In terms of Standard containers the hashtable is like the aggregation of:
123  *
124  * - std::forward_list<_Node> containing the elements
125  * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
126  *
127  * The non-empty buckets contain the node before the first node in the
128  * bucket. This design makes it possible to implement something like a
129  * std::forward_list::insert_after on container insertion and
130  * std::forward_list::erase_after on container erase
131  * calls. _M_before_begin is equivalent to
132  * std::forward_list::before_begin. Empty buckets contain
133  * nullptr. Note that one of the non-empty buckets contains
134  * &_M_before_begin which is not a dereferenceable node so the
135  * node pointer in a bucket shall never be dereferenced, only its
136  * next node can be.
137  *
138  * Walking through a bucket's nodes requires a check on the hash code to
139  * see if each node is still in the bucket. Such a design assumes a
140  * quite efficient hash functor and is one of the reasons it is
141  * highly advisable to set __cache_hash_code to true.
142  *
143  * The container iterators are simply built from nodes. This way
144  * incrementing the iterator is perfectly efficient independent of
145  * how many empty buckets there are in the container.
146  *
147  * On insert we compute the element's hash code and use it to find the
148  * bucket index. If the element must be inserted in an empty bucket
149  * we add it at the beginning of the singly linked list and make the
150  * bucket point to _M_before_begin. The bucket that used to point to
151  * _M_before_begin, if any, is updated to point to its new before
152  * begin node.
153  *
154  * On erase, the simple iterator design requires using the hash
155  * functor to get the index of the bucket to update. For this
156  * reason, when __cache_hash_code is set to false the hash functor must
157  * not throw and this is enforced by a static assertion.
158  *
159  * Functionality is implemented by decomposition into base classes,
160  * where the derived _Hashtable class is used in _Map_base,
161  * _Insert, _Rehash_base, and _Equality base classes to access the
162  * "this" pointer. _Hashtable_base is used in the base classes as a
163  * non-recursive, fully-completed-type so that detailed nested type
164  * information, such as iterator type and node type, can be
165  * used. This is similar to the "Curiously Recurring Template
166  * Pattern" (CRTP) technique, but uses a reconstructed, not
167  * explicitly passed, template pattern.
168  *
169  * Base class templates are:
170  * - __detail::_Hashtable_base
171  * - __detail::_Map_base
172  * - __detail::_Insert
173  * - __detail::_Rehash_base
174  * - __detail::_Equality
175  */
176  template<typename _Key, typename _Value, typename _Alloc,
177  typename _ExtractKey, typename _Equal,
178  typename _Hash, typename _RangeHash, typename _Unused,
179  typename _RehashPolicy, typename _Traits>
180  class _Hashtable
181  : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
182  _Hash, _RangeHash, _Unused, _Traits>,
183  public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
184  _Hash, _RangeHash, _Unused,
185  _RehashPolicy, _Traits>,
186  public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
187  _Hash, _RangeHash, _Unused,
188  _RehashPolicy, _Traits>,
189  public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
190  _Hash, _RangeHash, _Unused,
191  _RehashPolicy, _Traits>,
192  public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193  _Hash, _RangeHash, _Unused,
194  _RehashPolicy, _Traits>,
195  private __detail::_Hashtable_alloc<
196  __alloc_rebind<_Alloc,
197  __detail::_Hash_node<_Value,
198  _Traits::__hash_cached::value>>>,
199  private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
200  {
201  static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
202  "unordered container must have a non-const, non-volatile value_type");
203 #if __cplusplus > 201703L || defined __STRICT_ANSI__
204  static_assert(is_same<typename _Alloc::value_type, _Value>{},
205  "unordered container must have the same value_type as its allocator");
206 #endif
207 
208  using __traits_type = _Traits;
209  using __hash_cached = typename __traits_type::__hash_cached;
210  using __constant_iterators = typename __traits_type::__constant_iterators;
211  using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
212  using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
213 
214  using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
215 
216  using __node_value_type =
217  __detail::_Hash_node_value<_Value, __hash_cached::value>;
218  using __node_ptr = typename __hashtable_alloc::__node_ptr;
219  using __value_alloc_traits =
220  typename __hashtable_alloc::__value_alloc_traits;
221  using __node_alloc_traits =
222  typename __hashtable_alloc::__node_alloc_traits;
223  using __node_base = typename __hashtable_alloc::__node_base;
224  using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
225  using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
226 
227  using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
228  _Equal, _Hash,
229  _RangeHash, _Unused,
230  _RehashPolicy, _Traits>;
231  using __enable_default_ctor
232  = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
233 
234  public:
235  typedef _Key key_type;
236  typedef _Value value_type;
237  typedef _Alloc allocator_type;
238  typedef _Equal key_equal;
239 
240  // mapped_type, if present, comes from _Map_base.
241  // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
242  typedef typename __value_alloc_traits::pointer pointer;
243  typedef typename __value_alloc_traits::const_pointer const_pointer;
244  typedef value_type& reference;
245  typedef const value_type& const_reference;
246 
247  using iterator = typename __insert_base::iterator;
248 
249  using const_iterator = typename __insert_base::const_iterator;
250 
251  using local_iterator = __detail::_Local_iterator<key_type, _Value,
252  _ExtractKey, _Hash, _RangeHash, _Unused,
253  __constant_iterators::value,
254  __hash_cached::value>;
255 
256  using const_local_iterator = __detail::_Local_const_iterator<
257  key_type, _Value,
258  _ExtractKey, _Hash, _RangeHash, _Unused,
259  __constant_iterators::value, __hash_cached::value>;
260 
261  private:
262  using __rehash_type = _RehashPolicy;
263  using __rehash_state = typename __rehash_type::_State;
264 
265  using __unique_keys = typename __traits_type::__unique_keys;
266 
267  using __hashtable_base = __detail::
268  _Hashtable_base<_Key, _Value, _ExtractKey,
269  _Equal, _Hash, _RangeHash, _Unused, _Traits>;
270 
271  using __hash_code_base = typename __hashtable_base::__hash_code_base;
272  using __hash_code = typename __hashtable_base::__hash_code;
273  using __ireturn_type = typename __insert_base::__ireturn_type;
274 
275  using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
276  _Equal, _Hash, _RangeHash, _Unused,
277  _RehashPolicy, _Traits>;
278 
279  using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
280  _ExtractKey, _Equal,
281  _Hash, _RangeHash, _Unused,
282  _RehashPolicy, _Traits>;
283 
284  using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
285  _Equal, _Hash, _RangeHash, _Unused,
286  _RehashPolicy, _Traits>;
287 
288  using __reuse_or_alloc_node_gen_t =
289  __detail::_ReuseOrAllocNode<__node_alloc_type>;
290  using __alloc_node_gen_t =
291  __detail::_AllocNode<__node_alloc_type>;
292  using __node_builder_t =
293  __detail::_NodeBuilder<_ExtractKey>;
294 
295  // Simple RAII type for managing a node containing an element
296  struct _Scoped_node
297  {
298  // Take ownership of a node with a constructed element.
299  _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
300  : _M_h(__h), _M_node(__n) { }
301 
302  // Allocate a node and construct an element within it.
303  template<typename... _Args>
304  _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
305  : _M_h(__h),
306  _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
307  { }
308 
309  // Destroy element and deallocate node.
310  ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
311 
312  _Scoped_node(const _Scoped_node&) = delete;
313  _Scoped_node& operator=(const _Scoped_node&) = delete;
314 
315  __hashtable_alloc* _M_h;
316  __node_ptr _M_node;
317  };
318 
319  template<typename _Ht>
320  static constexpr
321  __conditional_t<std::is_lvalue_reference<_Ht>::value,
322  const value_type&, value_type&&>
323  __fwd_value_for(value_type& __val) noexcept
324  { return std::move(__val); }
325 
326  // Compile-time diagnostics.
327 
328  // _Hash_code_base has everything protected, so use this derived type to
329  // access it.
330  struct __hash_code_base_access : __hash_code_base
331  { using __hash_code_base::_M_bucket_index; };
332 
333  // To get bucket index we need _RangeHash not to throw.
334  static_assert(is_nothrow_default_constructible<_RangeHash>::value,
335  "Functor used to map hash code to bucket index"
336  " must be nothrow default constructible");
337  static_assert(noexcept(
338  std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
339  "Functor used to map hash code to bucket index must be"
340  " noexcept");
341 
342  // To compute bucket index we also need _ExtratKey not to throw.
343  static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
344  "_ExtractKey must be nothrow default constructible");
345  static_assert(noexcept(
346  std::declval<const _ExtractKey&>()(std::declval<_Value>())),
347  "_ExtractKey functor must be noexcept invocable");
348 
349  template<typename _Keya, typename _Valuea, typename _Alloca,
350  typename _ExtractKeya, typename _Equala,
351  typename _Hasha, typename _RangeHasha, typename _Unuseda,
352  typename _RehashPolicya, typename _Traitsa,
353  bool _Unique_keysa>
354  friend struct __detail::_Map_base;
355 
356  template<typename _Keya, typename _Valuea, typename _Alloca,
357  typename _ExtractKeya, typename _Equala,
358  typename _Hasha, typename _RangeHasha, typename _Unuseda,
359  typename _RehashPolicya, typename _Traitsa>
360  friend struct __detail::_Insert_base;
361 
362  template<typename _Keya, typename _Valuea, typename _Alloca,
363  typename _ExtractKeya, typename _Equala,
364  typename _Hasha, typename _RangeHasha, typename _Unuseda,
365  typename _RehashPolicya, typename _Traitsa,
366  bool _Constant_iteratorsa>
367  friend struct __detail::_Insert;
368 
369  template<typename _Keya, typename _Valuea, typename _Alloca,
370  typename _ExtractKeya, typename _Equala,
371  typename _Hasha, typename _RangeHasha, typename _Unuseda,
372  typename _RehashPolicya, typename _Traitsa,
373  bool _Unique_keysa>
374  friend struct __detail::_Equality;
375 
376  public:
377  using size_type = typename __hashtable_base::size_type;
378  using difference_type = typename __hashtable_base::difference_type;
379 
380 #if __cplusplus > 201402L
381  using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
382  using insert_return_type = _Node_insert_return<iterator, node_type>;
383 #endif
384 
385  private:
386  __buckets_ptr _M_buckets = &_M_single_bucket;
387  size_type _M_bucket_count = 1;
388  __node_base _M_before_begin;
389  size_type _M_element_count = 0;
390  _RehashPolicy _M_rehash_policy;
391 
392  // A single bucket used when only need for 1 bucket. Especially
393  // interesting in move semantic to leave hashtable with only 1 bucket
394  // which is not allocated so that we can have those operations noexcept
395  // qualified.
396  // Note that we can't leave hashtable with 0 bucket without adding
397  // numerous checks in the code to avoid 0 modulus.
398  __node_base_ptr _M_single_bucket = nullptr;
399 
400  void
401  _M_update_bbegin()
402  {
403  if (_M_begin())
404  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
405  }
406 
407  void
408  _M_update_bbegin(__node_ptr __n)
409  {
410  _M_before_begin._M_nxt = __n;
411  _M_update_bbegin();
412  }
413 
414  bool
415  _M_uses_single_bucket(__buckets_ptr __bkts) const
416  { return __builtin_expect(__bkts == &_M_single_bucket, false); }
417 
418  bool
419  _M_uses_single_bucket() const
420  { return _M_uses_single_bucket(_M_buckets); }
421 
422  __hashtable_alloc&
423  _M_base_alloc() { return *this; }
424 
425  __buckets_ptr
426  _M_allocate_buckets(size_type __bkt_count)
427  {
428  if (__builtin_expect(__bkt_count == 1, false))
429  {
430  _M_single_bucket = nullptr;
431  return &_M_single_bucket;
432  }
433 
434  return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
435  }
436 
437  void
438  _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
439  {
440  if (_M_uses_single_bucket(__bkts))
441  return;
442 
443  __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
444  }
445 
446  void
447  _M_deallocate_buckets()
448  { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
449 
450  // Gets bucket begin, deals with the fact that non-empty buckets contain
451  // their before begin node.
452  __node_ptr
453  _M_bucket_begin(size_type __bkt) const;
454 
455  __node_ptr
456  _M_begin() const
457  { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
458 
459  // Assign *this using another _Hashtable instance. Whether elements
460  // are copied or moved depends on the _Ht reference.
461  template<typename _Ht>
462  void
463  _M_assign_elements(_Ht&&);
464 
465  template<typename _Ht, typename _NodeGenerator>
466  void
467  _M_assign(_Ht&&, const _NodeGenerator&);
468 
469  void
470  _M_move_assign(_Hashtable&&, true_type);
471 
472  void
473  _M_move_assign(_Hashtable&&, false_type);
474 
475  void
476  _M_reset() noexcept;
477 
478  _Hashtable(const _Hash& __h, const _Equal& __eq,
479  const allocator_type& __a)
480  : __hashtable_base(__h, __eq),
481  __hashtable_alloc(__node_alloc_type(__a)),
482  __enable_default_ctor(_Enable_default_constructor_tag{})
483  { }
484 
485  template<bool _No_realloc = true>
486  static constexpr bool
487  _S_nothrow_move()
488  {
489 #if __cplusplus <= 201402L
490  return __and_<__bool_constant<_No_realloc>,
491  is_nothrow_copy_constructible<_Hash>,
492  is_nothrow_copy_constructible<_Equal>>::value;
493 #else
494  if constexpr (_No_realloc)
495  if constexpr (is_nothrow_copy_constructible<_Hash>())
496  return is_nothrow_copy_constructible<_Equal>();
497  return false;
498 #endif
499  }
500 
501  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
502  true_type /* alloc always equal */)
503  noexcept(_S_nothrow_move());
504 
505  _Hashtable(_Hashtable&&, __node_alloc_type&&,
506  false_type /* alloc always equal */);
507 
508  template<typename _InputIterator>
509  _Hashtable(_InputIterator __first, _InputIterator __last,
510  size_type __bkt_count_hint,
511  const _Hash&, const _Equal&, const allocator_type&,
512  true_type __uks);
513 
514  template<typename _InputIterator>
515  _Hashtable(_InputIterator __first, _InputIterator __last,
516  size_type __bkt_count_hint,
517  const _Hash&, const _Equal&, const allocator_type&,
518  false_type __uks);
519 
520  public:
521  // Constructor, destructor, assignment, swap
522  _Hashtable() = default;
523 
524  _Hashtable(const _Hashtable&);
525 
526  _Hashtable(const _Hashtable&, const allocator_type&);
527 
528  explicit
529  _Hashtable(size_type __bkt_count_hint,
530  const _Hash& __hf = _Hash(),
531  const key_equal& __eql = key_equal(),
532  const allocator_type& __a = allocator_type());
533 
534  // Use delegating constructors.
535  _Hashtable(_Hashtable&& __ht)
536  noexcept(_S_nothrow_move())
537  : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
538  true_type{})
539  { }
540 
541  _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
542  noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
543  : _Hashtable(std::move(__ht), __node_alloc_type(__a),
544  typename __node_alloc_traits::is_always_equal{})
545  { }
546 
547  explicit
548  _Hashtable(const allocator_type& __a)
549  : __hashtable_alloc(__node_alloc_type(__a)),
550  __enable_default_ctor(_Enable_default_constructor_tag{})
551  { }
552 
553  template<typename _InputIterator>
554  _Hashtable(_InputIterator __f, _InputIterator __l,
555  size_type __bkt_count_hint = 0,
556  const _Hash& __hf = _Hash(),
557  const key_equal& __eql = key_equal(),
558  const allocator_type& __a = allocator_type())
559  : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
560  __unique_keys{})
561  { }
562 
563  _Hashtable(initializer_list<value_type> __l,
564  size_type __bkt_count_hint = 0,
565  const _Hash& __hf = _Hash(),
566  const key_equal& __eql = key_equal(),
567  const allocator_type& __a = allocator_type())
568  : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
569  __hf, __eql, __a, __unique_keys{})
570  { }
571 
572  _Hashtable&
573  operator=(const _Hashtable& __ht);
574 
575  _Hashtable&
576  operator=(_Hashtable&& __ht)
577  noexcept(__node_alloc_traits::_S_nothrow_move()
578  && is_nothrow_move_assignable<_Hash>::value
579  && is_nothrow_move_assignable<_Equal>::value)
580  {
581  constexpr bool __move_storage =
582  __node_alloc_traits::_S_propagate_on_move_assign()
583  || __node_alloc_traits::_S_always_equal();
584  _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
585  return *this;
586  }
587 
588  _Hashtable&
589  operator=(initializer_list<value_type> __l)
590  {
591  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
592  _M_before_begin._M_nxt = nullptr;
593  clear();
594 
595  // We consider that all elements of __l are going to be inserted.
596  auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
597 
598  // Do not shrink to keep potential user reservation.
599  if (_M_bucket_count < __l_bkt_count)
600  rehash(__l_bkt_count);
601 
602  this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
603  return *this;
604  }
605 
606  ~_Hashtable() noexcept;
607 
608  void
609  swap(_Hashtable&)
610  noexcept(__and_<__is_nothrow_swappable<_Hash>,
611  __is_nothrow_swappable<_Equal>>::value);
612 
613  // Basic container operations
614  iterator
615  begin() noexcept
616  { return iterator(_M_begin()); }
617 
618  const_iterator
619  begin() const noexcept
620  { return const_iterator(_M_begin()); }
621 
622  iterator
623  end() noexcept
624  { return iterator(nullptr); }
625 
626  const_iterator
627  end() const noexcept
628  { return const_iterator(nullptr); }
629 
630  const_iterator
631  cbegin() const noexcept
632  { return const_iterator(_M_begin()); }
633 
634  const_iterator
635  cend() const noexcept
636  { return const_iterator(nullptr); }
637 
638  size_type
639  size() const noexcept
640  { return _M_element_count; }
641 
642  _GLIBCXX_NODISCARD bool
643  empty() const noexcept
644  { return size() == 0; }
645 
646  allocator_type
647  get_allocator() const noexcept
648  { return allocator_type(this->_M_node_allocator()); }
649 
650  size_type
651  max_size() const noexcept
652  { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
653 
654  // Observers
655  key_equal
656  key_eq() const
657  { return this->_M_eq(); }
658 
659  // hash_function, if present, comes from _Hash_code_base.
660 
661  // Bucket operations
662  size_type
663  bucket_count() const noexcept
664  { return _M_bucket_count; }
665 
666  size_type
667  max_bucket_count() const noexcept
668  { return max_size(); }
669 
670  size_type
671  bucket_size(size_type __bkt) const
672  { return std::distance(begin(__bkt), end(__bkt)); }
673 
674  size_type
675  bucket(const key_type& __k) const
676  { return _M_bucket_index(this->_M_hash_code(__k)); }
677 
678  local_iterator
679  begin(size_type __bkt)
680  {
681  return local_iterator(*this, _M_bucket_begin(__bkt),
682  __bkt, _M_bucket_count);
683  }
684 
685  local_iterator
686  end(size_type __bkt)
687  { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
688 
689  const_local_iterator
690  begin(size_type __bkt) const
691  {
692  return const_local_iterator(*this, _M_bucket_begin(__bkt),
693  __bkt, _M_bucket_count);
694  }
695 
696  const_local_iterator
697  end(size_type __bkt) const
698  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
699 
700  // DR 691.
701  const_local_iterator
702  cbegin(size_type __bkt) const
703  {
704  return const_local_iterator(*this, _M_bucket_begin(__bkt),
705  __bkt, _M_bucket_count);
706  }
707 
708  const_local_iterator
709  cend(size_type __bkt) const
710  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
711 
712  float
713  load_factor() const noexcept
714  {
715  return static_cast<float>(size()) / static_cast<float>(bucket_count());
716  }
717 
718  // max_load_factor, if present, comes from _Rehash_base.
719 
720  // Generalization of max_load_factor. Extension, not found in
721  // TR1. Only useful if _RehashPolicy is something other than
722  // the default.
723  const _RehashPolicy&
724  __rehash_policy() const
725  { return _M_rehash_policy; }
726 
727  void
728  __rehash_policy(const _RehashPolicy& __pol)
729  { _M_rehash_policy = __pol; }
730 
731  // Lookup.
732  iterator
733  find(const key_type& __k);
734 
735  const_iterator
736  find(const key_type& __k) const;
737 
738  size_type
739  count(const key_type& __k) const;
740 
742  equal_range(const key_type& __k);
743 
745  equal_range(const key_type& __k) const;
746 
747 #if __cplusplus >= 202002L
748 #define __cpp_lib_generic_unordered_lookup 201811L
749 
750  template<typename _Kt,
751  typename = __has_is_transparent_t<_Hash, _Kt>,
752  typename = __has_is_transparent_t<_Equal, _Kt>>
753  iterator
754  _M_find_tr(const _Kt& __k);
755 
756  template<typename _Kt,
757  typename = __has_is_transparent_t<_Hash, _Kt>,
758  typename = __has_is_transparent_t<_Equal, _Kt>>
759  const_iterator
760  _M_find_tr(const _Kt& __k) const;
761 
762  template<typename _Kt,
763  typename = __has_is_transparent_t<_Hash, _Kt>,
764  typename = __has_is_transparent_t<_Equal, _Kt>>
765  size_type
766  _M_count_tr(const _Kt& __k) const;
767 
768  template<typename _Kt,
769  typename = __has_is_transparent_t<_Hash, _Kt>,
770  typename = __has_is_transparent_t<_Equal, _Kt>>
771  pair<iterator, iterator>
772  _M_equal_range_tr(const _Kt& __k);
773 
774  template<typename _Kt,
775  typename = __has_is_transparent_t<_Hash, _Kt>,
776  typename = __has_is_transparent_t<_Equal, _Kt>>
777  pair<const_iterator, const_iterator>
778  _M_equal_range_tr(const _Kt& __k) const;
779 #endif // C++20
780 
781  private:
782  // Bucket index computation helpers.
783  size_type
784  _M_bucket_index(const __node_value_type& __n) const noexcept
785  { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
786 
787  size_type
788  _M_bucket_index(__hash_code __c) const
789  { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
790 
791  // Find and insert helper functions and types
792  // Find the node before the one matching the criteria.
793  __node_base_ptr
794  _M_find_before_node(size_type, const key_type&, __hash_code) const;
795 
796  template<typename _Kt>
797  __node_base_ptr
798  _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
799 
800  __node_ptr
801  _M_find_node(size_type __bkt, const key_type& __key,
802  __hash_code __c) const
803  {
804  __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
805  if (__before_n)
806  return static_cast<__node_ptr>(__before_n->_M_nxt);
807  return nullptr;
808  }
809 
810  template<typename _Kt>
811  __node_ptr
812  _M_find_node_tr(size_type __bkt, const _Kt& __key,
813  __hash_code __c) const
814  {
815  auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
816  if (__before_n)
817  return static_cast<__node_ptr>(__before_n->_M_nxt);
818  return nullptr;
819  }
820 
821  // Insert a node at the beginning of a bucket.
822  void
823  _M_insert_bucket_begin(size_type, __node_ptr);
824 
825  // Remove the bucket first node
826  void
827  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
828  size_type __next_bkt);
829 
830  // Get the node before __n in the bucket __bkt
831  __node_base_ptr
832  _M_get_previous_node(size_type __bkt, __node_ptr __n);
833 
834  // Insert node __n with hash code __code, in bucket __bkt if no
835  // rehash (assumes no element with same key already present).
836  // Takes ownership of __n if insertion succeeds, throws otherwise.
837  iterator
838  _M_insert_unique_node(size_type __bkt, __hash_code,
839  __node_ptr __n, size_type __n_elt = 1);
840 
841  // Insert node __n with key __k and hash code __code.
842  // Takes ownership of __n if insertion succeeds, throws otherwise.
843  iterator
844  _M_insert_multi_node(__node_ptr __hint,
845  __hash_code __code, __node_ptr __n);
846 
847  template<typename... _Args>
849  _M_emplace(true_type __uks, _Args&&... __args);
850 
851  template<typename... _Args>
852  iterator
853  _M_emplace(false_type __uks, _Args&&... __args)
854  { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
855 
856  // Emplace with hint, useless when keys are unique.
857  template<typename... _Args>
858  iterator
859  _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
860  { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
861 
862  template<typename... _Args>
863  iterator
864  _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
865 
866  template<typename _Kt, typename _Arg, typename _NodeGenerator>
868  _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
869 
870  template<typename _Kt>
871  static __conditional_t<
872  __and_<__is_nothrow_invocable<_Hash&, const key_type&>,
873  __not_<__is_nothrow_invocable<_Hash&, _Kt>>>::value,
874  key_type, _Kt&&>
875  _S_forward_key(_Kt&& __k)
876  { return std::forward<_Kt>(__k); }
877 
878  static const key_type&
879  _S_forward_key(const key_type& __k)
880  { return __k; }
881 
882  static key_type&&
883  _S_forward_key(key_type&& __k)
884  { return std::move(__k); }
885 
886  template<typename _Arg, typename _NodeGenerator>
888  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
889  true_type /* __uks */)
890  {
891  return _M_insert_unique(
892  _S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
893  std::forward<_Arg>(__arg), __node_gen);
894  }
895 
896  template<typename _Arg, typename _NodeGenerator>
897  iterator
898  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
899  false_type __uks)
900  {
901  return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
902  __uks);
903  }
904 
905  // Insert with hint, not used when keys are unique.
906  template<typename _Arg, typename _NodeGenerator>
907  iterator
908  _M_insert(const_iterator, _Arg&& __arg,
909  const _NodeGenerator& __node_gen, true_type __uks)
910  {
911  return
912  _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
913  }
914 
915  // Insert with hint when keys are not unique.
916  template<typename _Arg, typename _NodeGenerator>
917  iterator
918  _M_insert(const_iterator, _Arg&&,
919  const _NodeGenerator&, false_type __uks);
920 
921  size_type
922  _M_erase(true_type __uks, const key_type&);
923 
924  size_type
925  _M_erase(false_type __uks, const key_type&);
926 
927  iterator
928  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
929 
930  public:
931  // Emplace
932  template<typename... _Args>
933  __ireturn_type
934  emplace(_Args&&... __args)
935  { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
936 
937  template<typename... _Args>
938  iterator
939  emplace_hint(const_iterator __hint, _Args&&... __args)
940  {
941  return _M_emplace(__hint, __unique_keys{},
942  std::forward<_Args>(__args)...);
943  }
944 
945  // Insert member functions via inheritance.
946 
947  // Erase
948  iterator
949  erase(const_iterator);
950 
951  // LWG 2059.
952  iterator
953  erase(iterator __it)
954  { return erase(const_iterator(__it)); }
955 
956  size_type
957  erase(const key_type& __k)
958  { return _M_erase(__unique_keys{}, __k); }
959 
960  iterator
961  erase(const_iterator, const_iterator);
962 
963  void
964  clear() noexcept;
965 
966  // Set number of buckets keeping it appropriate for container's number
967  // of elements.
968  void rehash(size_type __bkt_count);
969 
970  // DR 1189.
971  // reserve, if present, comes from _Rehash_base.
972 
973 #if __cplusplus > 201402L
974  /// Re-insert an extracted node into a container with unique keys.
975  insert_return_type
976  _M_reinsert_node(node_type&& __nh)
977  {
978  insert_return_type __ret;
979  if (__nh.empty())
980  __ret.position = end();
981  else
982  {
983  __glibcxx_assert(get_allocator() == __nh.get_allocator());
984 
985  const key_type& __k = __nh._M_key();
986  __hash_code __code = this->_M_hash_code(__k);
987  size_type __bkt = _M_bucket_index(__code);
988  if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
989  {
990  __ret.node = std::move(__nh);
991  __ret.position = iterator(__n);
992  __ret.inserted = false;
993  }
994  else
995  {
996  __ret.position
997  = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
998  __nh._M_ptr = nullptr;
999  __ret.inserted = true;
1000  }
1001  }
1002  return __ret;
1003  }
1004 
1005  /// Re-insert an extracted node into a container with equivalent keys.
1006  iterator
1007  _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1008  {
1009  if (__nh.empty())
1010  return end();
1011 
1012  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1013 
1014  const key_type& __k = __nh._M_key();
1015  auto __code = this->_M_hash_code(__k);
1016  auto __ret
1017  = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1018  __nh._M_ptr = nullptr;
1019  return __ret;
1020  }
1021 
1022  private:
1023  node_type
1024  _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1025  {
1026  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1027  if (__prev_n == _M_buckets[__bkt])
1028  _M_remove_bucket_begin(__bkt, __n->_M_next(),
1029  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1030  else if (__n->_M_nxt)
1031  {
1032  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1033  if (__next_bkt != __bkt)
1034  _M_buckets[__next_bkt] = __prev_n;
1035  }
1036 
1037  __prev_n->_M_nxt = __n->_M_nxt;
1038  __n->_M_nxt = nullptr;
1039  --_M_element_count;
1040  return { __n, this->_M_node_allocator() };
1041  }
1042 
1043  public:
1044  // Extract a node.
1045  node_type
1046  extract(const_iterator __pos)
1047  {
1048  size_t __bkt = _M_bucket_index(*__pos._M_cur);
1049  return _M_extract_node(__bkt,
1050  _M_get_previous_node(__bkt, __pos._M_cur));
1051  }
1052 
1053  /// Extract a node.
1054  node_type
1055  extract(const _Key& __k)
1056  {
1057  node_type __nh;
1058  __hash_code __code = this->_M_hash_code(__k);
1059  std::size_t __bkt = _M_bucket_index(__code);
1060  if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1061  __nh = _M_extract_node(__bkt, __prev_node);
1062  return __nh;
1063  }
1064 
1065  /// Merge from a compatible container into one with unique keys.
1066  template<typename _Compatible_Hashtable>
1067  void
1068  _M_merge_unique(_Compatible_Hashtable& __src)
1069  {
1070  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1071  node_type>, "Node types are compatible");
1072  __glibcxx_assert(get_allocator() == __src.get_allocator());
1073 
1074  auto __n_elt = __src.size();
1075  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1076  {
1077  auto __pos = __i++;
1078  const key_type& __k = _ExtractKey{}(*__pos);
1079  __hash_code __code
1080  = this->_M_hash_code(__src.hash_function(), *__pos._M_cur);
1081  size_type __bkt = _M_bucket_index(__code);
1082  if (_M_find_node(__bkt, __k, __code) == nullptr)
1083  {
1084  auto __nh = __src.extract(__pos);
1085  _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1086  __nh._M_ptr = nullptr;
1087  __n_elt = 1;
1088  }
1089  else if (__n_elt != 1)
1090  --__n_elt;
1091  }
1092  }
1093 
1094  /// Merge from a compatible container into one with equivalent keys.
1095  template<typename _Compatible_Hashtable>
1096  void
1097  _M_merge_multi(_Compatible_Hashtable& __src)
1098  {
1099  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1100  node_type>, "Node types are compatible");
1101  __glibcxx_assert(get_allocator() == __src.get_allocator());
1102 
1103  __node_ptr __hint = nullptr;
1104  this->reserve(size() + __src.size());
1105  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1106  {
1107  auto __pos = __i++;
1108  __hash_code __code
1109  = this->_M_hash_code(__src.hash_function(), *__pos._M_cur);
1110  auto __nh = __src.extract(__pos);
1111  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1112  __nh._M_ptr = nullptr;
1113  }
1114  }
1115 #endif // C++17
1116 
1117  private:
1118  // Helper rehash method used when keys are unique.
1119  void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1120 
1121  // Helper rehash method used when keys can be non-unique.
1122  void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1123 
1124  // Unconditionally change size of bucket array to n, restore
1125  // hash policy state to __state on exception.
1126  void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1127  };
1128 
1129 
1130  // Definitions of class template _Hashtable's out-of-line member functions.
1131  template<typename _Key, typename _Value, typename _Alloc,
1132  typename _ExtractKey, typename _Equal,
1133  typename _Hash, typename _RangeHash, typename _Unused,
1134  typename _RehashPolicy, typename _Traits>
1135  auto
1136  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1137  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1138  _M_bucket_begin(size_type __bkt) const
1139  -> __node_ptr
1140  {
1141  __node_base_ptr __n = _M_buckets[__bkt];
1142  return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1143  }
1144 
1145  template<typename _Key, typename _Value, typename _Alloc,
1146  typename _ExtractKey, typename _Equal,
1147  typename _Hash, typename _RangeHash, typename _Unused,
1148  typename _RehashPolicy, typename _Traits>
1149  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1150  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1151  _Hashtable(size_type __bkt_count_hint,
1152  const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1153  : _Hashtable(__h, __eq, __a)
1154  {
1155  auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1156  if (__bkt_count > _M_bucket_count)
1157  {
1158  _M_buckets = _M_allocate_buckets(__bkt_count);
1159  _M_bucket_count = __bkt_count;
1160  }
1161  }
1162 
1163  template<typename _Key, typename _Value, typename _Alloc,
1164  typename _ExtractKey, typename _Equal,
1165  typename _Hash, typename _RangeHash, typename _Unused,
1166  typename _RehashPolicy, typename _Traits>
1167  template<typename _InputIterator>
1168  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1169  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1170  _Hashtable(_InputIterator __f, _InputIterator __l,
1171  size_type __bkt_count_hint,
1172  const _Hash& __h, const _Equal& __eq,
1173  const allocator_type& __a, true_type /* __uks */)
1174  : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1175  {
1176  for (; __f != __l; ++__f)
1177  this->insert(*__f);
1178  }
1179 
1180  template<typename _Key, typename _Value, typename _Alloc,
1181  typename _ExtractKey, typename _Equal,
1182  typename _Hash, typename _RangeHash, typename _Unused,
1183  typename _RehashPolicy, typename _Traits>
1184  template<typename _InputIterator>
1185  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1186  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1187  _Hashtable(_InputIterator __f, _InputIterator __l,
1188  size_type __bkt_count_hint,
1189  const _Hash& __h, const _Equal& __eq,
1190  const allocator_type& __a, false_type /* __uks */)
1191  : _Hashtable(__h, __eq, __a)
1192  {
1193  auto __nb_elems = __detail::__distance_fw(__f, __l);
1194  auto __bkt_count =
1195  _M_rehash_policy._M_next_bkt(
1196  std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1197  __bkt_count_hint));
1198 
1199  if (__bkt_count > _M_bucket_count)
1200  {
1201  _M_buckets = _M_allocate_buckets(__bkt_count);
1202  _M_bucket_count = __bkt_count;
1203  }
1204 
1205  for (; __f != __l; ++__f)
1206  this->insert(*__f);
1207  }
1208 
1209  template<typename _Key, typename _Value, typename _Alloc,
1210  typename _ExtractKey, typename _Equal,
1211  typename _Hash, typename _RangeHash, typename _Unused,
1212  typename _RehashPolicy, typename _Traits>
1213  auto
1214  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1215  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1216  operator=(const _Hashtable& __ht)
1217  -> _Hashtable&
1218  {
1219  if (&__ht == this)
1220  return *this;
1221 
1222  if (__node_alloc_traits::_S_propagate_on_copy_assign())
1223  {
1224  auto& __this_alloc = this->_M_node_allocator();
1225  auto& __that_alloc = __ht._M_node_allocator();
1226  if (!__node_alloc_traits::_S_always_equal()
1227  && __this_alloc != __that_alloc)
1228  {
1229  // Replacement allocator cannot free existing storage.
1230  this->_M_deallocate_nodes(_M_begin());
1231  _M_before_begin._M_nxt = nullptr;
1232  _M_deallocate_buckets();
1233  _M_buckets = nullptr;
1234  std::__alloc_on_copy(__this_alloc, __that_alloc);
1235  __hashtable_base::operator=(__ht);
1236  _M_bucket_count = __ht._M_bucket_count;
1237  _M_element_count = __ht._M_element_count;
1238  _M_rehash_policy = __ht._M_rehash_policy;
1239  __alloc_node_gen_t __alloc_node_gen(*this);
1240  __try
1241  {
1242  _M_assign(__ht, __alloc_node_gen);
1243  }
1244  __catch(...)
1245  {
1246  // _M_assign took care of deallocating all memory. Now we
1247  // must make sure this instance remains in a usable state.
1248  _M_reset();
1249  __throw_exception_again;
1250  }
1251  return *this;
1252  }
1253  std::__alloc_on_copy(__this_alloc, __that_alloc);
1254  }
1255 
1256  // Reuse allocated buckets and nodes.
1257  _M_assign_elements(__ht);
1258  return *this;
1259  }
1260 
1261  template<typename _Key, typename _Value, typename _Alloc,
1262  typename _ExtractKey, typename _Equal,
1263  typename _Hash, typename _RangeHash, typename _Unused,
1264  typename _RehashPolicy, typename _Traits>
1265  template<typename _Ht>
1266  void
1267  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1268  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1269  _M_assign_elements(_Ht&& __ht)
1270  {
1271  __buckets_ptr __former_buckets = nullptr;
1272  std::size_t __former_bucket_count = _M_bucket_count;
1273  const __rehash_state& __former_state = _M_rehash_policy._M_state();
1274 
1275  if (_M_bucket_count != __ht._M_bucket_count)
1276  {
1277  __former_buckets = _M_buckets;
1278  _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1279  _M_bucket_count = __ht._M_bucket_count;
1280  }
1281  else
1282  __builtin_memset(_M_buckets, 0,
1283  _M_bucket_count * sizeof(__node_base_ptr));
1284 
1285  __try
1286  {
1287  __hashtable_base::operator=(std::forward<_Ht>(__ht));
1288  _M_element_count = __ht._M_element_count;
1289  _M_rehash_policy = __ht._M_rehash_policy;
1290  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1291  _M_before_begin._M_nxt = nullptr;
1292  _M_assign(std::forward<_Ht>(__ht), __roan);
1293  if (__former_buckets)
1294  _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1295  }
1296  __catch(...)
1297  {
1298  if (__former_buckets)
1299  {
1300  // Restore previous buckets.
1301  _M_deallocate_buckets();
1302  _M_rehash_policy._M_reset(__former_state);
1303  _M_buckets = __former_buckets;
1304  _M_bucket_count = __former_bucket_count;
1305  }
1306  __builtin_memset(_M_buckets, 0,
1307  _M_bucket_count * sizeof(__node_base_ptr));
1308  __throw_exception_again;
1309  }
1310  }
1311 
1312  template<typename _Key, typename _Value, typename _Alloc,
1313  typename _ExtractKey, typename _Equal,
1314  typename _Hash, typename _RangeHash, typename _Unused,
1315  typename _RehashPolicy, typename _Traits>
1316  template<typename _Ht, typename _NodeGenerator>
1317  void
1318  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1319  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1320  _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1321  {
1322  __buckets_ptr __buckets = nullptr;
1323  if (!_M_buckets)
1324  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1325 
1326  __try
1327  {
1328  if (!__ht._M_before_begin._M_nxt)
1329  return;
1330 
1331  // First deal with the special first node pointed to by
1332  // _M_before_begin.
1333  __node_ptr __ht_n = __ht._M_begin();
1334  __node_ptr __this_n
1335  = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1336  this->_M_copy_code(*__this_n, *__ht_n);
1337  _M_update_bbegin(__this_n);
1338 
1339  // Then deal with other nodes.
1340  __node_ptr __prev_n = __this_n;
1341  for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1342  {
1343  __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1344  __prev_n->_M_nxt = __this_n;
1345  this->_M_copy_code(*__this_n, *__ht_n);
1346  size_type __bkt = _M_bucket_index(*__this_n);
1347  if (!_M_buckets[__bkt])
1348  _M_buckets[__bkt] = __prev_n;
1349  __prev_n = __this_n;
1350  }
1351  }
1352  __catch(...)
1353  {
1354  clear();
1355  if (__buckets)
1356  _M_deallocate_buckets();
1357  __throw_exception_again;
1358  }
1359  }
1360 
1361  template<typename _Key, typename _Value, typename _Alloc,
1362  typename _ExtractKey, typename _Equal,
1363  typename _Hash, typename _RangeHash, typename _Unused,
1364  typename _RehashPolicy, typename _Traits>
1365  void
1366  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1367  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1368  _M_reset() noexcept
1369  {
1370  _M_rehash_policy._M_reset();
1371  _M_bucket_count = 1;
1372  _M_single_bucket = nullptr;
1373  _M_buckets = &_M_single_bucket;
1374  _M_before_begin._M_nxt = nullptr;
1375  _M_element_count = 0;
1376  }
1377 
1378  template<typename _Key, typename _Value, typename _Alloc,
1379  typename _ExtractKey, typename _Equal,
1380  typename _Hash, typename _RangeHash, typename _Unused,
1381  typename _RehashPolicy, typename _Traits>
1382  void
1383  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1384  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1385  _M_move_assign(_Hashtable&& __ht, true_type)
1386  {
1387  if (__builtin_expect(std::__addressof(__ht) == this, false))
1388  return;
1389 
1390  this->_M_deallocate_nodes(_M_begin());
1391  _M_deallocate_buckets();
1392  __hashtable_base::operator=(std::move(__ht));
1393  _M_rehash_policy = __ht._M_rehash_policy;
1394  if (!__ht._M_uses_single_bucket())
1395  _M_buckets = __ht._M_buckets;
1396  else
1397  {
1398  _M_buckets = &_M_single_bucket;
1399  _M_single_bucket = __ht._M_single_bucket;
1400  }
1401 
1402  _M_bucket_count = __ht._M_bucket_count;
1403  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1404  _M_element_count = __ht._M_element_count;
1405  std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1406 
1407  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1408  _M_update_bbegin();
1409  __ht._M_reset();
1410  }
1411 
1412  template<typename _Key, typename _Value, typename _Alloc,
1413  typename _ExtractKey, typename _Equal,
1414  typename _Hash, typename _RangeHash, typename _Unused,
1415  typename _RehashPolicy, typename _Traits>
1416  void
1417  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1418  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1419  _M_move_assign(_Hashtable&& __ht, false_type)
1420  {
1421  if (__ht._M_node_allocator() == this->_M_node_allocator())
1422  _M_move_assign(std::move(__ht), true_type{});
1423  else
1424  {
1425  // Can't move memory, move elements then.
1426  _M_assign_elements(std::move(__ht));
1427  __ht.clear();
1428  }
1429  }
1430 
1431  template<typename _Key, typename _Value, typename _Alloc,
1432  typename _ExtractKey, typename _Equal,
1433  typename _Hash, typename _RangeHash, typename _Unused,
1434  typename _RehashPolicy, typename _Traits>
1435  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1436  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1437  _Hashtable(const _Hashtable& __ht)
1438  : __hashtable_base(__ht),
1439  __map_base(__ht),
1440  __rehash_base(__ht),
1441  __hashtable_alloc(
1442  __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1443  __enable_default_ctor(__ht),
1444  _M_buckets(nullptr),
1445  _M_bucket_count(__ht._M_bucket_count),
1446  _M_element_count(__ht._M_element_count),
1447  _M_rehash_policy(__ht._M_rehash_policy)
1448  {
1449  __alloc_node_gen_t __alloc_node_gen(*this);
1450  _M_assign(__ht, __alloc_node_gen);
1451  }
1452 
1453  template<typename _Key, typename _Value, typename _Alloc,
1454  typename _ExtractKey, typename _Equal,
1455  typename _Hash, typename _RangeHash, typename _Unused,
1456  typename _RehashPolicy, typename _Traits>
1457  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1458  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1459  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1460  true_type /* alloc always equal */)
1461  noexcept(_S_nothrow_move())
1462  : __hashtable_base(__ht),
1463  __map_base(__ht),
1464  __rehash_base(__ht),
1465  __hashtable_alloc(std::move(__a)),
1466  __enable_default_ctor(__ht),
1467  _M_buckets(__ht._M_buckets),
1468  _M_bucket_count(__ht._M_bucket_count),
1469  _M_before_begin(__ht._M_before_begin._M_nxt),
1470  _M_element_count(__ht._M_element_count),
1471  _M_rehash_policy(__ht._M_rehash_policy)
1472  {
1473  // Update buckets if __ht is using its single bucket.
1474  if (__ht._M_uses_single_bucket())
1475  {
1476  _M_buckets = &_M_single_bucket;
1477  _M_single_bucket = __ht._M_single_bucket;
1478  }
1479 
1480  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1481  _M_update_bbegin();
1482 
1483  __ht._M_reset();
1484  }
1485 
1486  template<typename _Key, typename _Value, typename _Alloc,
1487  typename _ExtractKey, typename _Equal,
1488  typename _Hash, typename _RangeHash, typename _Unused,
1489  typename _RehashPolicy, typename _Traits>
1490  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1491  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1492  _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1493  : __hashtable_base(__ht),
1494  __map_base(__ht),
1495  __rehash_base(__ht),
1496  __hashtable_alloc(__node_alloc_type(__a)),
1497  __enable_default_ctor(__ht),
1498  _M_buckets(),
1499  _M_bucket_count(__ht._M_bucket_count),
1500  _M_element_count(__ht._M_element_count),
1501  _M_rehash_policy(__ht._M_rehash_policy)
1502  {
1503  __alloc_node_gen_t __alloc_node_gen(*this);
1504  _M_assign(__ht, __alloc_node_gen);
1505  }
1506 
1507  template<typename _Key, typename _Value, typename _Alloc,
1508  typename _ExtractKey, typename _Equal,
1509  typename _Hash, typename _RangeHash, typename _Unused,
1510  typename _RehashPolicy, typename _Traits>
1511  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1512  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1513  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1514  false_type /* alloc always equal */)
1515  : __hashtable_base(__ht),
1516  __map_base(__ht),
1517  __rehash_base(__ht),
1518  __hashtable_alloc(std::move(__a)),
1519  __enable_default_ctor(__ht),
1520  _M_buckets(nullptr),
1521  _M_bucket_count(__ht._M_bucket_count),
1522  _M_element_count(__ht._M_element_count),
1523  _M_rehash_policy(__ht._M_rehash_policy)
1524  {
1525  if (__ht._M_node_allocator() == this->_M_node_allocator())
1526  {
1527  if (__ht._M_uses_single_bucket())
1528  {
1529  _M_buckets = &_M_single_bucket;
1530  _M_single_bucket = __ht._M_single_bucket;
1531  }
1532  else
1533  _M_buckets = __ht._M_buckets;
1534 
1535  // Fix bucket containing the _M_before_begin pointer that can't be
1536  // moved.
1537  _M_update_bbegin(__ht._M_begin());
1538 
1539  __ht._M_reset();
1540  }
1541  else
1542  {
1543  __alloc_node_gen_t __alloc_gen(*this);
1544 
1545  using _Fwd_Ht = __conditional_t<
1546  __move_if_noexcept_cond<value_type>::value,
1547  const _Hashtable&, _Hashtable&&>;
1548  _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1549  __ht.clear();
1550  }
1551  }
1552 
1553  template<typename _Key, typename _Value, typename _Alloc,
1554  typename _ExtractKey, typename _Equal,
1555  typename _Hash, typename _RangeHash, typename _Unused,
1556  typename _RehashPolicy, typename _Traits>
1557  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1558  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1559  ~_Hashtable() noexcept
1560  {
1561  // Getting a bucket index from a node shall not throw because it is used
1562  // in methods (erase, swap...) that shall not throw. Need a complete
1563  // type to check this, so do it in the destructor not at class scope.
1564  static_assert(noexcept(declval<const __hash_code_base_access&>()
1565  ._M_bucket_index(declval<const __node_value_type&>(),
1566  (std::size_t)0)),
1567  "Cache the hash code or qualify your functors involved"
1568  " in hash code and bucket index computation with noexcept");
1569 
1570  clear();
1571  _M_deallocate_buckets();
1572  }
1573 
1574  template<typename _Key, typename _Value, typename _Alloc,
1575  typename _ExtractKey, typename _Equal,
1576  typename _Hash, typename _RangeHash, typename _Unused,
1577  typename _RehashPolicy, typename _Traits>
1578  void
1579  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1580  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1581  swap(_Hashtable& __x)
1582  noexcept(__and_<__is_nothrow_swappable<_Hash>,
1583  __is_nothrow_swappable<_Equal>>::value)
1584  {
1585  // The only base class with member variables is hash_code_base.
1586  // We define _Hash_code_base::_M_swap because different
1587  // specializations have different members.
1588  this->_M_swap(__x);
1589 
1590  std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1591  std::swap(_M_rehash_policy, __x._M_rehash_policy);
1592 
1593  // Deal properly with potentially moved instances.
1594  if (this->_M_uses_single_bucket())
1595  {
1596  if (!__x._M_uses_single_bucket())
1597  {
1598  _M_buckets = __x._M_buckets;
1599  __x._M_buckets = &__x._M_single_bucket;
1600  }
1601  }
1602  else if (__x._M_uses_single_bucket())
1603  {
1604  __x._M_buckets = _M_buckets;
1605  _M_buckets = &_M_single_bucket;
1606  }
1607  else
1608  std::swap(_M_buckets, __x._M_buckets);
1609 
1610  std::swap(_M_bucket_count, __x._M_bucket_count);
1611  std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1612  std::swap(_M_element_count, __x._M_element_count);
1613  std::swap(_M_single_bucket, __x._M_single_bucket);
1614 
1615  // Fix buckets containing the _M_before_begin pointers that can't be
1616  // swapped.
1617  _M_update_bbegin();
1618  __x._M_update_bbegin();
1619  }
1620 
1621  template<typename _Key, typename _Value, typename _Alloc,
1622  typename _ExtractKey, typename _Equal,
1623  typename _Hash, typename _RangeHash, typename _Unused,
1624  typename _RehashPolicy, typename _Traits>
1625  auto
1626  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1627  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1628  find(const key_type& __k)
1629  -> iterator
1630  {
1631  __hash_code __code = this->_M_hash_code(__k);
1632  std::size_t __bkt = _M_bucket_index(__code);
1633  return iterator(_M_find_node(__bkt, __k, __code));
1634  }
1635 
1636  template<typename _Key, typename _Value, typename _Alloc,
1637  typename _ExtractKey, typename _Equal,
1638  typename _Hash, typename _RangeHash, typename _Unused,
1639  typename _RehashPolicy, typename _Traits>
1640  auto
1641  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1642  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1643  find(const key_type& __k) const
1644  -> const_iterator
1645  {
1646  __hash_code __code = this->_M_hash_code(__k);
1647  std::size_t __bkt = _M_bucket_index(__code);
1648  return const_iterator(_M_find_node(__bkt, __k, __code));
1649  }
1650 
1651 #if __cplusplus > 201703L
1652  template<typename _Key, typename _Value, typename _Alloc,
1653  typename _ExtractKey, typename _Equal,
1654  typename _Hash, typename _RangeHash, typename _Unused,
1655  typename _RehashPolicy, typename _Traits>
1656  template<typename _Kt, typename, typename>
1657  auto
1658  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1659  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1660  _M_find_tr(const _Kt& __k)
1661  -> iterator
1662  {
1663  __hash_code __code = this->_M_hash_code_tr(__k);
1664  std::size_t __bkt = _M_bucket_index(__code);
1665  return iterator(_M_find_node_tr(__bkt, __k, __code));
1666  }
1667 
1668  template<typename _Key, typename _Value, typename _Alloc,
1669  typename _ExtractKey, typename _Equal,
1670  typename _Hash, typename _RangeHash, typename _Unused,
1671  typename _RehashPolicy, typename _Traits>
1672  template<typename _Kt, typename, typename>
1673  auto
1674  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1675  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1676  _M_find_tr(const _Kt& __k) const
1677  -> const_iterator
1678  {
1679  __hash_code __code = this->_M_hash_code_tr(__k);
1680  std::size_t __bkt = _M_bucket_index(__code);
1681  return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1682  }
1683 #endif
1684 
1685  template<typename _Key, typename _Value, typename _Alloc,
1686  typename _ExtractKey, typename _Equal,
1687  typename _Hash, typename _RangeHash, typename _Unused,
1688  typename _RehashPolicy, typename _Traits>
1689  auto
1690  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1691  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1692  count(const key_type& __k) const
1693  -> size_type
1694  {
1695  auto __it = find(__k);
1696  if (!__it._M_cur)
1697  return 0;
1698 
1699  if (__unique_keys::value)
1700  return 1;
1701 
1702  // All equivalent values are next to each other, if we find a
1703  // non-equivalent value after an equivalent one it means that we won't
1704  // find any new equivalent value.
1705  size_type __result = 1;
1706  for (auto __ref = __it++;
1707  __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1708  ++__it)
1709  ++__result;
1710 
1711  return __result;
1712  }
1713 
1714 #if __cplusplus > 201703L
1715  template<typename _Key, typename _Value, typename _Alloc,
1716  typename _ExtractKey, typename _Equal,
1717  typename _Hash, typename _RangeHash, typename _Unused,
1718  typename _RehashPolicy, typename _Traits>
1719  template<typename _Kt, typename, typename>
1720  auto
1721  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1722  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1723  _M_count_tr(const _Kt& __k) const
1724  -> size_type
1725  {
1726  __hash_code __code = this->_M_hash_code_tr(__k);
1727  std::size_t __bkt = _M_bucket_index(__code);
1728  auto __n = _M_find_node_tr(__bkt, __k, __code);
1729  if (!__n)
1730  return 0;
1731 
1732  // All equivalent values are next to each other, if we find a
1733  // non-equivalent value after an equivalent one it means that we won't
1734  // find any new equivalent value.
1735  iterator __it(__n);
1736  size_type __result = 1;
1737  for (++__it;
1738  __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1739  ++__it)
1740  ++__result;
1741 
1742  return __result;
1743  }
1744 #endif
1745 
1746  template<typename _Key, typename _Value, typename _Alloc,
1747  typename _ExtractKey, typename _Equal,
1748  typename _Hash, typename _RangeHash, typename _Unused,
1749  typename _RehashPolicy, typename _Traits>
1750  auto
1751  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1752  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1753  equal_range(const key_type& __k)
1754  -> pair<iterator, iterator>
1755  {
1756  auto __ite = find(__k);
1757  if (!__ite._M_cur)
1758  return { __ite, __ite };
1759 
1760  auto __beg = __ite++;
1761  if (__unique_keys::value)
1762  return { __beg, __ite };
1763 
1764  // All equivalent values are next to each other, if we find a
1765  // non-equivalent value after an equivalent one it means that we won't
1766  // find any new equivalent value.
1767  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1768  ++__ite;
1769 
1770  return { __beg, __ite };
1771  }
1772 
1773  template<typename _Key, typename _Value, typename _Alloc,
1774  typename _ExtractKey, typename _Equal,
1775  typename _Hash, typename _RangeHash, typename _Unused,
1776  typename _RehashPolicy, typename _Traits>
1777  auto
1778  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1779  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1780  equal_range(const key_type& __k) const
1781  -> pair<const_iterator, const_iterator>
1782  {
1783  auto __ite = find(__k);
1784  if (!__ite._M_cur)
1785  return { __ite, __ite };
1786 
1787  auto __beg = __ite++;
1788  if (__unique_keys::value)
1789  return { __beg, __ite };
1790 
1791  // All equivalent values are next to each other, if we find a
1792  // non-equivalent value after an equivalent one it means that we won't
1793  // find any new equivalent value.
1794  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1795  ++__ite;
1796 
1797  return { __beg, __ite };
1798  }
1799 
1800 #if __cplusplus > 201703L
1801  template<typename _Key, typename _Value, typename _Alloc,
1802  typename _ExtractKey, typename _Equal,
1803  typename _Hash, typename _RangeHash, typename _Unused,
1804  typename _RehashPolicy, typename _Traits>
1805  template<typename _Kt, typename, typename>
1806  auto
1807  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1808  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1809  _M_equal_range_tr(const _Kt& __k)
1810  -> pair<iterator, iterator>
1811  {
1812  __hash_code __code = this->_M_hash_code_tr(__k);
1813  std::size_t __bkt = _M_bucket_index(__code);
1814  auto __n = _M_find_node_tr(__bkt, __k, __code);
1815  iterator __ite(__n);
1816  if (!__n)
1817  return { __ite, __ite };
1818 
1819  // All equivalent values are next to each other, if we find a
1820  // non-equivalent value after an equivalent one it means that we won't
1821  // find any new equivalent value.
1822  auto __beg = __ite++;
1823  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1824  ++__ite;
1825 
1826  return { __beg, __ite };
1827  }
1828 
1829  template<typename _Key, typename _Value, typename _Alloc,
1830  typename _ExtractKey, typename _Equal,
1831  typename _Hash, typename _RangeHash, typename _Unused,
1832  typename _RehashPolicy, typename _Traits>
1833  template<typename _Kt, typename, typename>
1834  auto
1835  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1836  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1837  _M_equal_range_tr(const _Kt& __k) const
1838  -> pair<const_iterator, const_iterator>
1839  {
1840  __hash_code __code = this->_M_hash_code_tr(__k);
1841  std::size_t __bkt = _M_bucket_index(__code);
1842  auto __n = _M_find_node_tr(__bkt, __k, __code);
1843  const_iterator __ite(__n);
1844  if (!__n)
1845  return { __ite, __ite };
1846 
1847  // All equivalent values are next to each other, if we find a
1848  // non-equivalent value after an equivalent one it means that we won't
1849  // find any new equivalent value.
1850  auto __beg = __ite++;
1851  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1852  ++__ite;
1853 
1854  return { __beg, __ite };
1855  }
1856 #endif
1857 
1858  // Find the node before the one whose key compares equal to k in the bucket
1859  // bkt. Return nullptr if no node is found.
1860  template<typename _Key, typename _Value, typename _Alloc,
1861  typename _ExtractKey, typename _Equal,
1862  typename _Hash, typename _RangeHash, typename _Unused,
1863  typename _RehashPolicy, typename _Traits>
1864  auto
1865  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1866  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1867  _M_find_before_node(size_type __bkt, const key_type& __k,
1868  __hash_code __code) const
1869  -> __node_base_ptr
1870  {
1871  __node_base_ptr __prev_p = _M_buckets[__bkt];
1872  if (!__prev_p)
1873  return nullptr;
1874 
1875  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1876  __p = __p->_M_next())
1877  {
1878  if (this->_M_equals(__k, __code, *__p))
1879  return __prev_p;
1880 
1881  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1882  break;
1883  __prev_p = __p;
1884  }
1885 
1886  return nullptr;
1887  }
1888 
1889  template<typename _Key, typename _Value, typename _Alloc,
1890  typename _ExtractKey, typename _Equal,
1891  typename _Hash, typename _RangeHash, typename _Unused,
1892  typename _RehashPolicy, typename _Traits>
1893  template<typename _Kt>
1894  auto
1895  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1896  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1897  _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1898  __hash_code __code) const
1899  -> __node_base_ptr
1900  {
1901  __node_base_ptr __prev_p = _M_buckets[__bkt];
1902  if (!__prev_p)
1903  return nullptr;
1904 
1905  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1906  __p = __p->_M_next())
1907  {
1908  if (this->_M_equals_tr(__k, __code, *__p))
1909  return __prev_p;
1910 
1911  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1912  break;
1913  __prev_p = __p;
1914  }
1915 
1916  return nullptr;
1917  }
1918 
1919  template<typename _Key, typename _Value, typename _Alloc,
1920  typename _ExtractKey, typename _Equal,
1921  typename _Hash, typename _RangeHash, typename _Unused,
1922  typename _RehashPolicy, typename _Traits>
1923  void
1924  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1925  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1926  _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
1927  {
1928  if (_M_buckets[__bkt])
1929  {
1930  // Bucket is not empty, we just need to insert the new node
1931  // after the bucket before begin.
1932  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
1933  _M_buckets[__bkt]->_M_nxt = __node;
1934  }
1935  else
1936  {
1937  // The bucket is empty, the new node is inserted at the
1938  // beginning of the singly-linked list and the bucket will
1939  // contain _M_before_begin pointer.
1940  __node->_M_nxt = _M_before_begin._M_nxt;
1941  _M_before_begin._M_nxt = __node;
1942 
1943  if (__node->_M_nxt)
1944  // We must update former begin bucket that is pointing to
1945  // _M_before_begin.
1946  _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
1947 
1948  _M_buckets[__bkt] = &_M_before_begin;
1949  }
1950  }
1951 
1952  template<typename _Key, typename _Value, typename _Alloc,
1953  typename _ExtractKey, typename _Equal,
1954  typename _Hash, typename _RangeHash, typename _Unused,
1955  typename _RehashPolicy, typename _Traits>
1956  void
1957  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1958  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1959  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
1960  size_type __next_bkt)
1961  {
1962  if (!__next || __next_bkt != __bkt)
1963  {
1964  // Bucket is now empty
1965  // First update next bucket if any
1966  if (__next)
1967  _M_buckets[__next_bkt] = _M_buckets[__bkt];
1968 
1969  // Second update before begin node if necessary
1970  if (&_M_before_begin == _M_buckets[__bkt])
1971  _M_before_begin._M_nxt = __next;
1972  _M_buckets[__bkt] = nullptr;
1973  }
1974  }
1975 
1976  template<typename _Key, typename _Value, typename _Alloc,
1977  typename _ExtractKey, typename _Equal,
1978  typename _Hash, typename _RangeHash, typename _Unused,
1979  typename _RehashPolicy, typename _Traits>
1980  auto
1981  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1982  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1983  _M_get_previous_node(size_type __bkt, __node_ptr __n)
1984  -> __node_base_ptr
1985  {
1986  __node_base_ptr __prev_n = _M_buckets[__bkt];
1987  while (__prev_n->_M_nxt != __n)
1988  __prev_n = __prev_n->_M_nxt;
1989  return __prev_n;
1990  }
1991 
1992  template<typename _Key, typename _Value, typename _Alloc,
1993  typename _ExtractKey, typename _Equal,
1994  typename _Hash, typename _RangeHash, typename _Unused,
1995  typename _RehashPolicy, typename _Traits>
1996  template<typename... _Args>
1997  auto
1998  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1999  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2000  _M_emplace(true_type /* __uks */, _Args&&... __args)
2001  -> pair<iterator, bool>
2002  {
2003  // First build the node to get access to the hash code
2004  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2005  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2006  __hash_code __code = this->_M_hash_code(__k);
2007  size_type __bkt = _M_bucket_index(__code);
2008  if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
2009  // There is already an equivalent node, no insertion
2010  return std::make_pair(iterator(__p), false);
2011 
2012  // Insert the node
2013  auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2014  __node._M_node = nullptr;
2015  return { __pos, true };
2016  }
2017 
2018  template<typename _Key, typename _Value, typename _Alloc,
2019  typename _ExtractKey, typename _Equal,
2020  typename _Hash, typename _RangeHash, typename _Unused,
2021  typename _RehashPolicy, typename _Traits>
2022  template<typename... _Args>
2023  auto
2024  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2025  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2026  _M_emplace(const_iterator __hint, false_type /* __uks */,
2027  _Args&&... __args)
2028  -> iterator
2029  {
2030  // First build the node to get its hash code.
2031  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2032  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2033 
2034  __hash_code __code = this->_M_hash_code(__k);
2035  auto __pos
2036  = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
2037  __node._M_node = nullptr;
2038  return __pos;
2039  }
2040 
2041  template<typename _Key, typename _Value, typename _Alloc,
2042  typename _ExtractKey, typename _Equal,
2043  typename _Hash, typename _RangeHash, typename _Unused,
2044  typename _RehashPolicy, typename _Traits>
2045  auto
2046  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2047  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2048  _M_insert_unique_node(size_type __bkt, __hash_code __code,
2049  __node_ptr __node, size_type __n_elt)
2050  -> iterator
2051  {
2052  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2053  std::pair<bool, std::size_t> __do_rehash
2054  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2055  __n_elt);
2056 
2057  if (__do_rehash.first)
2058  {
2059  _M_rehash(__do_rehash.second, __saved_state);
2060  __bkt = _M_bucket_index(__code);
2061  }
2062 
2063  this->_M_store_code(*__node, __code);
2064 
2065  // Always insert at the beginning of the bucket.
2066  _M_insert_bucket_begin(__bkt, __node);
2067  ++_M_element_count;
2068  return iterator(__node);
2069  }
2070 
2071  template<typename _Key, typename _Value, typename _Alloc,
2072  typename _ExtractKey, typename _Equal,
2073  typename _Hash, typename _RangeHash, typename _Unused,
2074  typename _RehashPolicy, typename _Traits>
2075  auto
2076  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2077  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2078  _M_insert_multi_node(__node_ptr __hint,
2079  __hash_code __code, __node_ptr __node)
2080  -> iterator
2081  {
2082  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2083  std::pair<bool, std::size_t> __do_rehash
2084  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2085 
2086  if (__do_rehash.first)
2087  _M_rehash(__do_rehash.second, __saved_state);
2088 
2089  this->_M_store_code(*__node, __code);
2090  const key_type& __k = _ExtractKey{}(__node->_M_v());
2091  size_type __bkt = _M_bucket_index(__code);
2092 
2093  // Find the node before an equivalent one or use hint if it exists and
2094  // if it is equivalent.
2095  __node_base_ptr __prev
2096  = __builtin_expect(__hint != nullptr, false)
2097  && this->_M_equals(__k, __code, *__hint)
2098  ? __hint
2099  : _M_find_before_node(__bkt, __k, __code);
2100 
2101  if (__prev)
2102  {
2103  // Insert after the node before the equivalent one.
2104  __node->_M_nxt = __prev->_M_nxt;
2105  __prev->_M_nxt = __node;
2106  if (__builtin_expect(__prev == __hint, false))
2107  // hint might be the last bucket node, in this case we need to
2108  // update next bucket.
2109  if (__node->_M_nxt
2110  && !this->_M_equals(__k, __code, *__node->_M_next()))
2111  {
2112  size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2113  if (__next_bkt != __bkt)
2114  _M_buckets[__next_bkt] = __node;
2115  }
2116  }
2117  else
2118  // The inserted node has no equivalent in the hashtable. We must
2119  // insert the new node at the beginning of the bucket to preserve
2120  // equivalent elements' relative positions.
2121  _M_insert_bucket_begin(__bkt, __node);
2122  ++_M_element_count;
2123  return iterator(__node);
2124  }
2125 
2126  // Insert v if no element with its key is already present.
2127  template<typename _Key, typename _Value, typename _Alloc,
2128  typename _ExtractKey, typename _Equal,
2129  typename _Hash, typename _RangeHash, typename _Unused,
2130  typename _RehashPolicy, typename _Traits>
2131  template<typename _Kt, typename _Arg, typename _NodeGenerator>
2132  auto
2133  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2134  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2135  _M_insert_unique(_Kt&& __k, _Arg&& __v,
2136  const _NodeGenerator& __node_gen)
2137  -> pair<iterator, bool>
2138  {
2139  __hash_code __code = this->_M_hash_code_tr(__k);
2140  size_type __bkt = _M_bucket_index(__code);
2141 
2142  if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
2143  return { iterator(__node), false };
2144 
2145  _Scoped_node __node {
2146  __node_builder_t::_S_build(std::forward<_Kt>(__k),
2147  std::forward<_Arg>(__v),
2148  __node_gen),
2149  this
2150  };
2151  auto __pos
2152  = _M_insert_unique_node(__bkt, __code, __node._M_node);
2153  __node._M_node = nullptr;
2154  return { __pos, true };
2155  }
2156 
2157  // Insert v unconditionally.
2158  template<typename _Key, typename _Value, typename _Alloc,
2159  typename _ExtractKey, typename _Equal,
2160  typename _Hash, typename _RangeHash, typename _Unused,
2161  typename _RehashPolicy, typename _Traits>
2162  template<typename _Arg, typename _NodeGenerator>
2163  auto
2164  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2165  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2166  _M_insert(const_iterator __hint, _Arg&& __v,
2167  const _NodeGenerator& __node_gen,
2168  false_type /* __uks */)
2169  -> iterator
2170  {
2171  // First allocate new node so that we don't do anything if it throws.
2172  _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2173 
2174  // Second compute the hash code so that we don't rehash if it throws.
2175  __hash_code __code
2176  = this->_M_hash_code(_ExtractKey{}(__node._M_node->_M_v()));
2177 
2178  auto __pos
2179  = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
2180  __node._M_node = nullptr;
2181  return __pos;
2182  }
2183 
2184  template<typename _Key, typename _Value, typename _Alloc,
2185  typename _ExtractKey, typename _Equal,
2186  typename _Hash, typename _RangeHash, typename _Unused,
2187  typename _RehashPolicy, typename _Traits>
2188  auto
2189  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2190  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2191  erase(const_iterator __it)
2192  -> iterator
2193  {
2194  __node_ptr __n = __it._M_cur;
2195  std::size_t __bkt = _M_bucket_index(*__n);
2196 
2197  // Look for previous node to unlink it from the erased one, this
2198  // is why we need buckets to contain the before begin to make
2199  // this search fast.
2200  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2201  return _M_erase(__bkt, __prev_n, __n);
2202  }
2203 
2204  template<typename _Key, typename _Value, typename _Alloc,
2205  typename _ExtractKey, typename _Equal,
2206  typename _Hash, typename _RangeHash, typename _Unused,
2207  typename _RehashPolicy, typename _Traits>
2208  auto
2209  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2210  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2211  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2212  -> iterator
2213  {
2214  if (__prev_n == _M_buckets[__bkt])
2215  _M_remove_bucket_begin(__bkt, __n->_M_next(),
2216  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2217  else if (__n->_M_nxt)
2218  {
2219  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2220  if (__next_bkt != __bkt)
2221  _M_buckets[__next_bkt] = __prev_n;
2222  }
2223 
2224  __prev_n->_M_nxt = __n->_M_nxt;
2225  iterator __result(__n->_M_next());
2226  this->_M_deallocate_node(__n);
2227  --_M_element_count;
2228 
2229  return __result;
2230  }
2231 
2232  template<typename _Key, typename _Value, typename _Alloc,
2233  typename _ExtractKey, typename _Equal,
2234  typename _Hash, typename _RangeHash, typename _Unused,
2235  typename _RehashPolicy, typename _Traits>
2236  auto
2237  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2238  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2239  _M_erase(true_type /* __uks */, const key_type& __k)
2240  -> size_type
2241  {
2242  __hash_code __code = this->_M_hash_code(__k);
2243  std::size_t __bkt = _M_bucket_index(__code);
2244 
2245  // Look for the node before the first matching node.
2246  __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
2247  if (!__prev_n)
2248  return 0;
2249 
2250  // We found a matching node, erase it.
2251  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2252  _M_erase(__bkt, __prev_n, __n);
2253  return 1;
2254  }
2255 
2256  template<typename _Key, typename _Value, typename _Alloc,
2257  typename _ExtractKey, typename _Equal,
2258  typename _Hash, typename _RangeHash, typename _Unused,
2259  typename _RehashPolicy, typename _Traits>
2260  auto
2261  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2262  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2263  _M_erase(false_type /* __uks */, const key_type& __k)
2264  -> size_type
2265  {
2266  __hash_code __code = this->_M_hash_code(__k);
2267  std::size_t __bkt = _M_bucket_index(__code);
2268 
2269  // Look for the node before the first matching node.
2270  __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
2271  if (!__prev_n)
2272  return 0;
2273 
2274  // _GLIBCXX_RESOLVE_LIB_DEFECTS
2275  // 526. Is it undefined if a function in the standard changes
2276  // in parameters?
2277  // We use one loop to find all matching nodes and another to deallocate
2278  // them so that the key stays valid during the first loop. It might be
2279  // invalidated indirectly when destroying nodes.
2280  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2281  __node_ptr __n_last = __n->_M_next();
2282  while (__n_last && this->_M_node_equals(*__n, *__n_last))
2283  __n_last = __n_last->_M_next();
2284 
2285  std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2286 
2287  // Deallocate nodes.
2288  size_type __result = 0;
2289  do
2290  {
2291  __node_ptr __p = __n->_M_next();
2292  this->_M_deallocate_node(__n);
2293  __n = __p;
2294  ++__result;
2295  }
2296  while (__n != __n_last);
2297 
2298  _M_element_count -= __result;
2299  if (__prev_n == _M_buckets[__bkt])
2300  _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2301  else if (__n_last_bkt != __bkt)
2302  _M_buckets[__n_last_bkt] = __prev_n;
2303  __prev_n->_M_nxt = __n_last;
2304  return __result;
2305  }
2306 
2307  template<typename _Key, typename _Value, typename _Alloc,
2308  typename _ExtractKey, typename _Equal,
2309  typename _Hash, typename _RangeHash, typename _Unused,
2310  typename _RehashPolicy, typename _Traits>
2311  auto
2312  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2313  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2314  erase(const_iterator __first, const_iterator __last)
2315  -> iterator
2316  {
2317  __node_ptr __n = __first._M_cur;
2318  __node_ptr __last_n = __last._M_cur;
2319  if (__n == __last_n)
2320  return iterator(__n);
2321 
2322  std::size_t __bkt = _M_bucket_index(*__n);
2323 
2324  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2325  bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2326  std::size_t __n_bkt = __bkt;
2327  for (;;)
2328  {
2329  do
2330  {
2331  __node_ptr __tmp = __n;
2332  __n = __n->_M_next();
2333  this->_M_deallocate_node(__tmp);
2334  --_M_element_count;
2335  if (!__n)
2336  break;
2337  __n_bkt = _M_bucket_index(*__n);
2338  }
2339  while (__n != __last_n && __n_bkt == __bkt);
2340  if (__is_bucket_begin)
2341  _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2342  if (__n == __last_n)
2343  break;
2344  __is_bucket_begin = true;
2345  __bkt = __n_bkt;
2346  }
2347 
2348  if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2349  _M_buckets[__n_bkt] = __prev_n;
2350  __prev_n->_M_nxt = __n;
2351  return iterator(__n);
2352  }
2353 
2354  template<typename _Key, typename _Value, typename _Alloc,
2355  typename _ExtractKey, typename _Equal,
2356  typename _Hash, typename _RangeHash, typename _Unused,
2357  typename _RehashPolicy, typename _Traits>
2358  void
2359  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2360  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2361  clear() noexcept
2362  {
2363  this->_M_deallocate_nodes(_M_begin());
2364  __builtin_memset(_M_buckets, 0,
2365  _M_bucket_count * sizeof(__node_base_ptr));
2366  _M_element_count = 0;
2367  _M_before_begin._M_nxt = nullptr;
2368  }
2369 
2370  template<typename _Key, typename _Value, typename _Alloc,
2371  typename _ExtractKey, typename _Equal,
2372  typename _Hash, typename _RangeHash, typename _Unused,
2373  typename _RehashPolicy, typename _Traits>
2374  void
2375  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2376  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2377  rehash(size_type __bkt_count)
2378  {
2379  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2380  __bkt_count
2381  = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2382  __bkt_count);
2383  __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2384 
2385  if (__bkt_count != _M_bucket_count)
2386  _M_rehash(__bkt_count, __saved_state);
2387  else
2388  // No rehash, restore previous state to keep it consistent with
2389  // container state.
2390  _M_rehash_policy._M_reset(__saved_state);
2391  }
2392 
2393  template<typename _Key, typename _Value, typename _Alloc,
2394  typename _ExtractKey, typename _Equal,
2395  typename _Hash, typename _RangeHash, typename _Unused,
2396  typename _RehashPolicy, typename _Traits>
2397  void
2398  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2399  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2400  _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2401  {
2402  __try
2403  {
2404  _M_rehash_aux(__bkt_count, __unique_keys{});
2405  }
2406  __catch(...)
2407  {
2408  // A failure here means that buckets allocation failed. We only
2409  // have to restore hash policy previous state.
2410  _M_rehash_policy._M_reset(__state);
2411  __throw_exception_again;
2412  }
2413  }
2414 
2415  // Rehash when there is no equivalent elements.
2416  template<typename _Key, typename _Value, typename _Alloc,
2417  typename _ExtractKey, typename _Equal,
2418  typename _Hash, typename _RangeHash, typename _Unused,
2419  typename _RehashPolicy, typename _Traits>
2420  void
2421  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2422  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2423  _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2424  {
2425  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2426  __node_ptr __p = _M_begin();
2427  _M_before_begin._M_nxt = nullptr;
2428  std::size_t __bbegin_bkt = 0;
2429  while (__p)
2430  {
2431  __node_ptr __next = __p->_M_next();
2432  std::size_t __bkt
2433  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2434  if (!__new_buckets[__bkt])
2435  {
2436  __p->_M_nxt = _M_before_begin._M_nxt;
2437  _M_before_begin._M_nxt = __p;
2438  __new_buckets[__bkt] = &_M_before_begin;
2439  if (__p->_M_nxt)
2440  __new_buckets[__bbegin_bkt] = __p;
2441  __bbegin_bkt = __bkt;
2442  }
2443  else
2444  {
2445  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2446  __new_buckets[__bkt]->_M_nxt = __p;
2447  }
2448 
2449  __p = __next;
2450  }
2451 
2452  _M_deallocate_buckets();
2453  _M_bucket_count = __bkt_count;
2454  _M_buckets = __new_buckets;
2455  }
2456 
2457  // Rehash when there can be equivalent elements, preserve their relative
2458  // order.
2459  template<typename _Key, typename _Value, typename _Alloc,
2460  typename _ExtractKey, typename _Equal,
2461  typename _Hash, typename _RangeHash, typename _Unused,
2462  typename _RehashPolicy, typename _Traits>
2463  void
2464  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2465  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2466  _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2467  {
2468  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2469  __node_ptr __p = _M_begin();
2470  _M_before_begin._M_nxt = nullptr;
2471  std::size_t __bbegin_bkt = 0;
2472  std::size_t __prev_bkt = 0;
2473  __node_ptr __prev_p = nullptr;
2474  bool __check_bucket = false;
2475 
2476  while (__p)
2477  {
2478  __node_ptr __next = __p->_M_next();
2479  std::size_t __bkt
2480  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2481 
2482  if (__prev_p && __prev_bkt == __bkt)
2483  {
2484  // Previous insert was already in this bucket, we insert after
2485  // the previously inserted one to preserve equivalent elements
2486  // relative order.
2487  __p->_M_nxt = __prev_p->_M_nxt;
2488  __prev_p->_M_nxt = __p;
2489 
2490  // Inserting after a node in a bucket require to check that we
2491  // haven't change the bucket last node, in this case next
2492  // bucket containing its before begin node must be updated. We
2493  // schedule a check as soon as we move out of the sequence of
2494  // equivalent nodes to limit the number of checks.
2495  __check_bucket = true;
2496  }
2497  else
2498  {
2499  if (__check_bucket)
2500  {
2501  // Check if we shall update the next bucket because of
2502  // insertions into __prev_bkt bucket.
2503  if (__prev_p->_M_nxt)
2504  {
2505  std::size_t __next_bkt
2506  = __hash_code_base::_M_bucket_index(
2507  *__prev_p->_M_next(), __bkt_count);
2508  if (__next_bkt != __prev_bkt)
2509  __new_buckets[__next_bkt] = __prev_p;
2510  }
2511  __check_bucket = false;
2512  }
2513 
2514  if (!__new_buckets[__bkt])
2515  {
2516  __p->_M_nxt = _M_before_begin._M_nxt;
2517  _M_before_begin._M_nxt = __p;
2518  __new_buckets[__bkt] = &_M_before_begin;
2519  if (__p->_M_nxt)
2520  __new_buckets[__bbegin_bkt] = __p;
2521  __bbegin_bkt = __bkt;
2522  }
2523  else
2524  {
2525  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2526  __new_buckets[__bkt]->_M_nxt = __p;
2527  }
2528  }
2529  __prev_p = __p;
2530  __prev_bkt = __bkt;
2531  __p = __next;
2532  }
2533 
2534  if (__check_bucket && __prev_p->_M_nxt)
2535  {
2536  std::size_t __next_bkt
2537  = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2538  __bkt_count);
2539  if (__next_bkt != __prev_bkt)
2540  __new_buckets[__next_bkt] = __prev_p;
2541  }
2542 
2543  _M_deallocate_buckets();
2544  _M_bucket_count = __bkt_count;
2545  _M_buckets = __new_buckets;
2546  }
2547 
2548 #if __cplusplus > 201402L
2549  template<typename, typename, typename> class _Hash_merge_helper { };
2550 #endif // C++17
2551 
2552 #if __cpp_deduction_guides >= 201606
2553  // Used to constrain deduction guides
2554  template<typename _Hash>
2555  using _RequireNotAllocatorOrIntegral
2556  = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2557 #endif
2558 
2559 /// @endcond
2560 _GLIBCXX_END_NAMESPACE_VERSION
2561 } // namespace std
2562 
2563 #endif // _HASHTABLE_H
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:82
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:85
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition: move.h:77
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:429
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1237
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1215
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:254
void swap(shared_lock< _Mutex > &__x, shared_lock< _Mutex > &__y) noexcept
Swap specialization for shared_lock.
Definition: shared_mutex:851
ISO C++ entities toplevel namespace is std.
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:138
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:283
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:264
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:126
Struct holding two objects of arbitrary type.
Definition: stl_pair.h:201
_T1 first
The first member.
Definition: stl_pair.h:205
_T2 second
The second member.
Definition: stl_pair.h:206