Nav2 Navigation Stack - rolling  main
ROS 2 Navigation Stack
robin_hood.h
1 // Copyright (c) 2018-2021 Martin Ankerl <http://martin.ankerl.com>
2 // ______ _____ ______ _________
3 // ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ /
4 // __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ /
5 // _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ /
6 // /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/
7 // _/_____/
8 //
9 // Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20
10 // https://github.com/martinus/robin-hood-hashing
11 //
12 // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
13 // SPDX-License-Identifier: MIT
14 // Copyright (c) 2018-2021 Martin Ankerl <http://martin.ankerl.com>
15 //
16 // Permission is hereby granted, free of charge, to any person obtaining a copy
17 // of this software and associated documentation files (the "Software"), to deal
18 // in the Software without restriction, including without limitation the rights
19 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
20 // copies of the Software, and to permit persons to whom the Software is
21 // furnished to do so, subject to the following conditions:
22 //
23 // The above copyright notice and this permission notice shall be included in all
24 // copies or substantial portions of the Software.
25 //
26 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
31 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 // SOFTWARE.
33 
34 #ifndef NAV2_SMAC_PLANNER__THIRDPARTY__ROBIN_HOOD_H_
35 #define NAV2_SMAC_PLANNER__THIRDPARTY__ROBIN_HOOD_H_
36 
37 // see https://semver.org/
38 #define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes
39 #define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner
40 #define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes
41 
42 #include <algorithm>
43 #include <cstdlib>
44 #include <cstring>
45 #include <functional>
46 #include <limits>
47 #include <memory> // only to support hash of smart pointers
48 #include <stdexcept>
49 #include <string>
50 #include <type_traits>
51 #include <tuple>
52 #include <utility>
53 #if __cplusplus >= 201703L
54 # include <string_view>
55 #endif
56 /* *INDENT-OFF* */
57 
58 // #define ROBIN_HOOD_LOG_ENABLED
59 #ifdef ROBIN_HOOD_LOG_ENABLED
60 # include <iostream>
61 # define ROBIN_HOOD_LOG(...) \
62  std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl;
63 #else
64 # define ROBIN_HOOD_LOG(x)
65 #endif
66 
67 // #define ROBIN_HOOD_TRACE_ENABLED
68 #ifdef ROBIN_HOOD_TRACE_ENABLED
69 # include <iostream>
70 # define ROBIN_HOOD_TRACE(...) \
71  std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl;
72 #else
73 # define ROBIN_HOOD_TRACE(x)
74 #endif
75 
76 // #define ROBIN_HOOD_COUNT_ENABLED
77 #ifdef ROBIN_HOOD_COUNT_ENABLED
78 # include <iostream>
79 # define ROBIN_HOOD_COUNT(x) ++counts().x;
80 namespace robin_hood {
81 struct Counts {
82  uint64_t shiftUp{};
83  uint64_t shiftDown{};
84 };
85 inline std::ostream& operator<<(std::ostream& os, Counts const& c) {
86  return os << c.shiftUp << " shiftUp" << std::endl << c.shiftDown << " shiftDown" << std::endl;
87 }
88 
89 static Counts& counts() {
90  static Counts counts{};
91  return counts;
92 }
93 } // namespace robin_hood
94 #else
95 # define ROBIN_HOOD_COUNT(x)
96 #endif
97 
98 // all non-argument macros should use this facility. See
99 // https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/
100 #define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x()
101 
102 // mark unused members with this macro
103 #define ROBIN_HOOD_UNUSED(identifier)
104 
105 // bitness
106 #if SIZE_MAX == UINT32_MAX
107 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32
108 #elif SIZE_MAX == UINT64_MAX
109 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64
110 #else
111 # error Unsupported bitness
112 #endif
113 
114 // endianness
115 #ifdef _MSC_VER
116 # define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1
117 # define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0
118 #else
119 # define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \
120  (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
121 # define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
122 #endif
123 
124 // inline
125 #ifdef _MSC_VER
126 # define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline)
127 #else
128 # define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline))
129 #endif
130 
131 // exceptions
132 #if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND)
133 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0
134 #else
135 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1
136 #endif
137 
138 // count leading/trailing bits
139 #if !defined(ROBIN_HOOD_DISABLE_INTRINSICS)
140 # ifdef _MSC_VER
141 # if ROBIN_HOOD(BITNESS) == 32
142 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
143 # else
144 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
145 # endif
146 # include <intrin.h>
147 # pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
148 # define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \
149  [](size_t mask) noexcept -> int { \
150  unsigned long index; \ // NOLINT
151  return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \
152  : ROBIN_HOOD(BITNESS); \
153  }(x)
154 # else
155 # if ROBIN_HOOD(BITNESS) == 32
156 # define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
157 # define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
158 # else
159 # define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
160 # define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
161 # endif
162 # define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
163 # define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
164 # endif
165 #endif
166 
167 // fallthrough
168 #ifndef __has_cpp_attribute // For backwards compatibility
169 # define __has_cpp_attribute(x) 0
170 #endif
171 #if __has_cpp_attribute(clang::fallthrough)
172 # define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]]
173 #elif __has_cpp_attribute(gnu::fallthrough)
174 # define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]]
175 #else
176 # define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH()
177 #endif
178 
179 // likely/unlikely
180 #ifdef _MSC_VER
181 # define ROBIN_HOOD_LIKELY(condition) condition
182 # define ROBIN_HOOD_UNLIKELY(condition) condition
183 #else
184 # define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1)
185 # define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0)
186 #endif
187 
188 // detect if native wchar_t type is available in MSVC
189 #ifdef _MSC_VER
190 # ifdef _NATIVE_WCHAR_T_DEFINED
191 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
192 # else
193 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0
194 # endif
195 #else
196 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
197 #endif
198 
199 // detect if MSVC supports the pair(std::piecewise_construct_t,...) constructor being constexpr
200 #ifdef _MSC_VER
201 # if _MSC_VER <= 1900
202 # define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 1
203 # else
204 # define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0
205 # endif
206 #else
207 # define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0
208 #endif
209 
210 // workaround missing "is_trivially_copyable" in g++ < 5.0
211 // See https://stackoverflow.com/a/31798726/48181
212 #if defined(__GNUC__) && __GNUC__ < 5
213 # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
214 #else
215 # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
216 #endif
217 
218 // helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html
219 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus
220 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L
221 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L
222 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L
223 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L
224 
225 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
226 # define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]]
227 #else
228 # define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD()
229 #endif
230 
231 namespace robin_hood {
232 
233 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
234 # define ROBIN_HOOD_STD std
235 #else
236 
237 // c++11 compatibility layer
238 namespace ROBIN_HOOD_STD {
239 template <class T>
241  : std::integral_constant<std::size_t, alignof(typename std::remove_all_extents<T>::type)> {};
242 
243 template <class T, T... Ints>
245 public:
246  using value_type = T;
247  static_assert(std::is_integral<value_type>::value, "not integral type");
248  static constexpr std::size_t size() noexcept {
249  return sizeof...(Ints);
250  }
251 };
252 template <std::size_t... Inds>
253 using index_sequence = integer_sequence<std::size_t, Inds...>;
254 
255 namespace detail_ {
256 template <class T, T Begin, T End, bool>
257 struct IntSeqImpl {
258  using TValue = T;
259  static_assert(std::is_integral<TValue>::value, "not integral type");
260  static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)");
261 
262  template <class, class>
264 
265  template <TValue... Inds0, TValue... Inds1>
266  struct IntSeqCombiner<integer_sequence<TValue, Inds0...>, integer_sequence<TValue, Inds1...>> {
267  using TResult = integer_sequence<TValue, Inds0..., Inds1...>;
268  };
269 
270  using TResult =
271  typename IntSeqCombiner<typename IntSeqImpl<TValue, Begin, Begin + (End - Begin) / 2,
272  (End - Begin) / 2 == 1>::TResult,
273  typename IntSeqImpl<TValue, Begin + (End - Begin) / 2, End,
274  (End - Begin + 1) / 2 == 1>::TResult>::TResult;
275 };
276 
277 template <class T, T Begin>
278 struct IntSeqImpl<T, Begin, Begin, false> {
279  using TValue = T;
280  static_assert(std::is_integral<TValue>::value, "not integral type");
281  static_assert(Begin >= 0, "unexpected argument (Begin<0)");
283 };
284 
285 template <class T, T Begin, T End>
286 struct IntSeqImpl<T, Begin, End, true> {
287  using TValue = T;
288  static_assert(std::is_integral<TValue>::value, "not integral type");
289  static_assert(Begin >= 0, "unexpected argument (Begin<0)");
291 };
292 } // namespace detail_
293 
294 template <class T, T N>
295 using make_integer_sequence = typename detail_::IntSeqImpl<T, 0, N, (N - 0) == 1>::TResult;
296 
297 template <std::size_t N>
298 using make_index_sequence = make_integer_sequence<std::size_t, N>;
299 
300 template <class... T>
301 using index_sequence_for = make_index_sequence<sizeof...(T)>;
302 
303 } // namespace ROBIN_HOOD_STD
304 
305 #endif
306 
307 namespace detail {
308 
309 // make sure we static_cast to the correct type for hash_int
310 #if ROBIN_HOOD(BITNESS) == 64
311 using SizeT = uint64_t;
312 #else
313 using SizeT = uint32_t;
314 #endif
315 
316 template <typename T>
317 T rotr(T x, unsigned k) {
318  return (x >> k) | (x << (8U * sizeof(T) - k));
319 }
320 
321 // This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to
322 // 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with
323 // care!
324 template <typename T>
325 inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept {
326  return reinterpret_cast<T>(ptr);
327 }
328 
329 template <typename T>
330 inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept {
331  return reinterpret_cast<T>(ptr);
332 }
333 
334 // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
335 // inlinings more difficult. Throws are also generally the slow path.
336 template <typename E, typename... Args>
337 [[noreturn]] ROBIN_HOOD(NOINLINE)
338 #if ROBIN_HOOD(HAS_EXCEPTIONS)
339  void doThrow(Args&&... args) {
340  throw E(std::forward<Args>(args)...);
341 }
342 #else
343  void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) {
344  abort();
345 }
346 #endif
347 
348 template <typename E, typename T, typename... Args>
349 T* assertNotNull(T* t, Args&&... args) {
350  if (ROBIN_HOOD_UNLIKELY(nullptr == t)) {
351  doThrow<E>(std::forward<Args>(args)...);
352  }
353  return t;
354 }
355 
356 template <typename T>
357 inline T unaligned_load(void const* ptr) noexcept {
358  // using memcpy so we don't get into unaligned load problems.
359  // compiler should optimize this very well anyways.
360  T t;
361  std::memcpy(&t, ptr, sizeof(T));
362  return t;
363 }
364 
365 // Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor,
366 // and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a
367 // pointer.
368 template <typename T, size_t MinNumAllocs = 4, size_t MaxNumAllocs = 256>
370 public:
371  BulkPoolAllocator() noexcept = default;
372 
373  // does not copy anything, just creates a new allocator.
374  BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept
375  : mHead(nullptr)
376  , mListForFree(nullptr) {}
377 
379  : mHead(o.mHead)
380  , mListForFree(o.mListForFree) {
381  o.mListForFree = nullptr;
382  o.mHead = nullptr;
383  }
384 
385  BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept {
386  reset();
387  mHead = o.mHead;
388  mListForFree = o.mListForFree;
389  o.mListForFree = nullptr;
390  o.mHead = nullptr;
391  return *this;
392  }
393 
395  operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept {
396  // does not do anything
397  return *this;
398  }
399 
400  ~BulkPoolAllocator() noexcept {
401  reset();
402  }
403 
404  // Deallocates all allocated memory.
405  void reset() noexcept {
406  while (mListForFree) {
407  T* tmp = *mListForFree;
408  ROBIN_HOOD_LOG("std::free")
409  std::free(mListForFree);
410  mListForFree = reinterpret_cast_no_cast_align_warning<T**>(tmp);
411  }
412  mHead = nullptr;
413  }
414 
415  // allocates, but does NOT initialize. Use in-place new constructor, e.g.
416  // T* obj = pool.allocate();
417  // ::new (static_cast<void*>(obj)) T();
418  T* allocate() {
419  T* tmp = mHead;
420  if (!tmp) {
421  tmp = performAllocation();
422  }
423 
424  mHead = *reinterpret_cast_no_cast_align_warning<T**>(tmp);
425  return tmp;
426  }
427 
428  // does not actually deallocate but puts it in store.
429  // make sure you have already called the destructor! e.g. with
430  // obj->~T();
431  // pool.deallocate(obj);
432  void deallocate(T* obj) noexcept {
433  *reinterpret_cast_no_cast_align_warning<T**>(obj) = mHead;
434  mHead = obj;
435  }
436 
437  // Adds an already allocated block of memory to the allocator. This allocator is from now on
438  // responsible for freeing the data (with free()). If the provided data is not large enough to
439  // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor.
440  void addOrFree(void* ptr, const size_t numBytes) noexcept {
441  // calculate number of available elements in ptr
442  if (numBytes < ALIGNMENT + ALIGNED_SIZE) {
443  // not enough data for at least one element. Free and return.
444  ROBIN_HOOD_LOG("std::free")
445  std::free(ptr);
446  } else {
447  ROBIN_HOOD_LOG("add to buffer")
448  add(ptr, numBytes);
449  }
450  }
451 
452  void swap(BulkPoolAllocator<T, MinNumAllocs, MaxNumAllocs>& other) noexcept {
453  using std::swap;
454  swap(mHead, other.mHead);
455  swap(mListForFree, other.mListForFree);
456  }
457 
458 private:
459  // iterates the list of allocated memory to calculate how many to alloc next.
460  // Recalculating this each time saves us a size_t member.
461  // This ignores the fact that memory blocks might have been added manually with addOrFree. In
462  // practice, this should not matter much.
463  ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept {
464  auto tmp = mListForFree;
465  size_t numAllocs = MinNumAllocs;
466 
467  while (numAllocs * 2 <= MaxNumAllocs && tmp) {
468  auto x = reinterpret_cast<T***>(tmp); // NOLINT
469  tmp = *x;
470  numAllocs *= 2;
471  }
472 
473  return numAllocs;
474  }
475 
476  // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree().
477  void add(void* ptr, const size_t numBytes) noexcept {
478  const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE;
479  auto data = reinterpret_cast<T**>(ptr);
480 
481  // link free list
482  auto x = reinterpret_cast<T***>(data);
483  *x = mListForFree;
484  mListForFree = data;
485 
486  // create linked list for newly allocated data
487  auto* const headT =
488  reinterpret_cast_no_cast_align_warning<T*>(reinterpret_cast<char*>(ptr) + ALIGNMENT);
489 
490  auto* const head = reinterpret_cast<char*>(headT);
491 
492  // Visual Studio compiler automatically unrolls this loop, which is pretty cool
493  for (size_t i = 0; i < numElements; ++i) {
494  *reinterpret_cast_no_cast_align_warning<char**>(head + i * ALIGNED_SIZE) =
495  head + (i + 1) * ALIGNED_SIZE;
496  }
497 
498  // last one points to 0
499  *reinterpret_cast_no_cast_align_warning<T**>(head + (numElements - 1) * ALIGNED_SIZE) =
500  mHead;
501  mHead = headT;
502  }
503 
504  // Called when no memory is available (mHead == 0).
505  // Don't inline this slow path.
506  ROBIN_HOOD(NOINLINE) T* performAllocation() {
507  size_t const numElementsToAlloc = calcNumElementsToAlloc();
508 
509  // alloc new memory: [prev |T, T, ... T]
510  size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc;
511  ROBIN_HOOD_LOG("std::malloc " << bytes << " = " << ALIGNMENT << " + " << ALIGNED_SIZE
512  << " * " << numElementsToAlloc)
513  add(assertNotNull<std::bad_alloc>(std::malloc(bytes)), bytes);
514  return mHead;
515  }
516 
517  // enforce byte alignment of the T's
518 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
519  static constexpr size_t ALIGNMENT =
520  (std::max)(std::alignment_of<T>::value, std::alignment_of<T*>::value);
521 #else
522  static const size_t ALIGNMENT =
525  : +ROBIN_HOOD_STD::alignment_of<T*>::value; // the + is for walkarround
526 #endif
527 
528  static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT;
529 
530  static_assert(MinNumAllocs >= 1, "MinNumAllocs");
531  static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs");
532  static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE");
533  static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod");
534  static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT");
535 
536  T* mHead{nullptr};
537  T** mListForFree{nullptr};
538 };
539 
540 template <typename T, size_t MinSize, size_t MaxSize, bool IsFlat>
542 
543 // dummy allocator that does nothing
544 template <typename T, size_t MinSize, size_t MaxSize>
545 struct NodeAllocator<T, MinSize, MaxSize, true> {
546  // we are not using the data, so just free it.
547  void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes)/*unused*/) noexcept {
548  ROBIN_HOOD_LOG("std::free")
549  std::free(ptr);
550  }
551 };
552 
553 template <typename T, size_t MinSize, size_t MaxSize>
554 struct NodeAllocator<T, MinSize, MaxSize, false> : public BulkPoolAllocator<T, MinSize, MaxSize> {};
555 
556 // c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making
557 // my own here.
558 namespace swappable {
559 #if ROBIN_HOOD(CXX) < ROBIN_HOOD(CXX17)
560 using std::swap;
561 template <typename T>
562 struct nothrow {
563  static const bool value = noexcept(swap(std::declval<T&>(), std::declval<T&>()));
564 };
565 #else
566 template <typename T>
567 struct nothrow {
568  static const bool value = std::is_nothrow_swappable<T>::value;
569 };
570 #endif
571 } // namespace swappable
572 
573 } // namespace detail
574 
576 
577 // A custom pair implementation is used in the map because std::pair is not is_trivially_copyable,
578 // which means it would not be allowed to be used in std::memcpy. This struct is copiable, which is
579 // also tested.
580 template <typename T1, typename T2>
581 struct pair {
582  using first_type = T1;
583  using second_type = T2;
584 
585  template <typename U1 = T1, typename U2 = T2,
586  typename = typename std::enable_if<std::is_default_constructible<U1>::value &&
587  std::is_default_constructible<U2>::value>::type>
588  constexpr pair() noexcept(noexcept(U1()) && noexcept(U2()))
589  : first()
590  , second() {}
591 
592  // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
593  explicit constexpr pair(std::pair<T1, T2> const& o) noexcept(
594  noexcept(T1(std::declval<T1 const&>())) && noexcept(T2(std::declval<T2 const&>())))
595  : first(o.first)
596  , second(o.second) {}
597 
598  // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
599  explicit constexpr pair(std::pair<T1, T2>&& o) noexcept(noexcept(
600  T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>()))))
601  : first(std::move(o.first))
602  , second(std::move(o.second)) {}
603 
604  constexpr pair(T1&& a, T2&& b) noexcept(noexcept(
605  T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>()))))
606  : first(std::move(a))
607  , second(std::move(b)) {}
608 
609  template <typename U1, typename U2>
610  constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward<U1>(
611  std::declval<U1&&>()))) && noexcept(T2(std::forward<U2>(std::declval<U2&&>()))))
612  : first(std::forward<U1>(a))
613  , second(std::forward<U2>(b)) {}
614 
615  template <typename... U1, typename... U2>
616  // MSVC 2015 produces error "C2476: ‘constexpr’ constructor does not initialize all members"
617  // if this constructor is constexpr
618 #if !ROBIN_HOOD(BROKEN_CONSTEXPR)
619  constexpr
620 #endif
621  pair(std::piecewise_construct_t /*unused*/, std::tuple<U1...> a,
622  std::tuple<U2...>
623  b) noexcept(noexcept(pair(std::declval<std::tuple<U1...>&>(),
624  std::declval<std::tuple<U2...>&>(),
625  ROBIN_HOOD_STD::index_sequence_for<U1...>(),
626  ROBIN_HOOD_STD::index_sequence_for<U2...>())))
627  : pair(a, b, ROBIN_HOOD_STD::index_sequence_for<U1...>(),
628  ROBIN_HOOD_STD::index_sequence_for<U2...>()) {
629  }
630 
631  // constructor called from the std::piecewise_construct_t ctor
632  template <typename... U1, size_t... I1, typename... U2, size_t... I2>
633  pair(
634  std::tuple<U1...>& a, std::tuple<U2...>& b,
636  ROBIN_HOOD_STD::index_sequence<I2...> /*unused*/) noexcept(
637  noexcept(T1(std::forward<U1>(std::get<I1>(
638  std::declval<std::tuple<
639  U1...>&>()))...)) && noexcept(T2(std::
640  forward<U2>(std::get<I2>(
641  std::declval<std::tuple<U2...>&>()))...)))
642  : first(std::forward<U1>(std::get<I1>(a))...)
643  , second(std::forward<U2>(std::get<I2>(b))...) {
644  // make visual studio compiler happy about warning about unused a & b.
645  // Visual studio's pair implementation disables warning 4100.
646  (void)a;
647  (void)b;
648  }
649 
650  void swap(pair<T1, T2>& o) noexcept((detail::swappable::nothrow<T1>::value) &&
652  using std::swap;
653  swap(first, o.first);
654  swap(second, o.second);
655  }
656 
657  T1 first; // NOLINT
658  T2 second; // NOLINT
659 };
660 
661 template <typename A, typename B>
662 inline void swap(pair<A, B>& a, pair<A, B>& b) noexcept(
663  noexcept(std::declval<pair<A, B>&>().swap(std::declval<pair<A, B>&>()))) {
664  a.swap(b);
665 }
666 
667 template <typename A, typename B>
668 inline constexpr bool operator==(pair<A, B> const& x, pair<A, B> const& y) {
669  return (x.first == y.first) && (x.second == y.second);
670 }
671 template <typename A, typename B>
672 inline constexpr bool operator!=(pair<A, B> const& x, pair<A, B> const& y) {
673  return !(x == y);
674 }
675 template <typename A, typename B>
676 inline constexpr bool operator<(pair<A, B> const& x, pair<A, B> const& y) noexcept(noexcept(
677  std::declval<A const&>() < std::declval<A const&>()) && noexcept(std::declval<B const&>() <
678  std::declval<B const&>())) {
679  return x.first < y.first || (!(y.first < x.first) && x.second < y.second);
680 }
681 template <typename A, typename B>
682 inline constexpr bool operator>(pair<A, B> const& x, pair<A, B> const& y) {
683  return y < x;
684 }
685 template <typename A, typename B>
686 inline constexpr bool operator<=(pair<A, B> const& x, pair<A, B> const& y) {
687  return !(x > y);
688 }
689 template <typename A, typename B>
690 inline constexpr bool operator>=(pair<A, B> const& x, pair<A, B> const& y) {
691  return !(x < y);
692 }
693 
694 inline size_t hash_bytes(void const* ptr, size_t len) noexcept {
695  static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
696  static constexpr uint64_t seed = UINT64_C(0xe17a1465);
697  static constexpr unsigned int r = 47;
698 
699  auto const* const data64 = static_cast<uint64_t const*>(ptr);
700  uint64_t h = seed ^ (len * m);
701 
702  size_t const n_blocks = len / 8;
703  for (size_t i = 0; i < n_blocks; ++i) {
704  auto k = detail::unaligned_load<uint64_t>(data64 + i);
705 
706  k *= m;
707  k ^= k >> r;
708  k *= m;
709 
710  h ^= k;
711  h *= m;
712  }
713 
714  auto const* const data8 = reinterpret_cast<uint8_t const*>(data64 + n_blocks);
715  switch (len & 7U) {
716  case 7:
717  h ^= static_cast<uint64_t>(data8[6]) << 48U;
718  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
719  case 6:
720  h ^= static_cast<uint64_t>(data8[5]) << 40U;
721  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
722  case 5:
723  h ^= static_cast<uint64_t>(data8[4]) << 32U;
724  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
725  case 4:
726  h ^= static_cast<uint64_t>(data8[3]) << 24U;
727  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
728  case 3:
729  h ^= static_cast<uint64_t>(data8[2]) << 16U;
730  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
731  case 2:
732  h ^= static_cast<uint64_t>(data8[1]) << 8U;
733  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
734  case 1:
735  h ^= static_cast<uint64_t>(data8[0]);
736  h *= m;
737  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
738  default:
739  break;
740  }
741 
742  h ^= h >> r;
743 
744  // not doing the final step here, because this will be done by keyToIdx anyways
745  // h *= m;
746  // h ^= h >> r;
747  return static_cast<size_t>(h);
748 }
749 
750 inline size_t hash_int(uint64_t x) noexcept {
751  // tried lots of different hashes, let's stick with murmurhash3. It's simple, fast, well tested,
752  // and doesn't need any special 128bit operations.
753  x ^= x >> 33U;
754  x *= UINT64_C(0xff51afd7ed558ccd);
755  x ^= x >> 33U;
756 
757  // not doing the final step here, because this will be done by keyToIdx anyways
758  // x *= UINT64_C(0xc4ceb9fe1a85ec53);
759  // x ^= x >> 33U;
760  return static_cast<size_t>(x);
761 }
762 
763 // A thin wrapper around std::hash, performing an additional simple mixing step of the result.
764 template <typename T, typename Enable = void>
765 struct hash : public std::hash<T> {
766  size_t operator()(T const& obj) const
767  noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) {
768  // call base hash
769  auto result = std::hash<T>::operator()(obj);
770  // return mixed of that, to be save against identity has
771  return hash_int(static_cast<detail::SizeT>(result));
772  }
773 };
774 
775 template <typename CharT>
776 struct hash<std::basic_string<CharT>> {
777  size_t operator()(std::basic_string<CharT> const& str) const noexcept {
778  return hash_bytes(str.data(), sizeof(CharT) * str.size());
779  }
780 };
781 
782 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
783 template <typename CharT>
784 struct hash<std::basic_string_view<CharT>> {
785  size_t operator()(std::basic_string_view<CharT> const& sv) const noexcept {
786  return hash_bytes(sv.data(), sizeof(CharT) * sv.size());
787  }
788 };
789 #endif
790 
791 template <class T>
792 struct hash<T*> {
793  size_t operator()(T* ptr) const noexcept {
794  return hash_int(reinterpret_cast<detail::SizeT>(ptr));
795  }
796 };
797 
798 template <class T>
799 struct hash<std::unique_ptr<T>> {
800  size_t operator()(std::unique_ptr<T> const& ptr) const noexcept {
801  return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
802  }
803 };
804 
805 template <class T>
806 struct hash<std::shared_ptr<T>> {
807  size_t operator()(std::shared_ptr<T> const& ptr) const noexcept {
808  return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
809  }
810 };
811 
812 template <typename Enum>
813 struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
814  size_t operator()(Enum e) const noexcept {
815  using Underlying = typename std::underlying_type<Enum>::type;
816  return hash<Underlying>{}(static_cast<Underlying>(e));
817  }
818 };
819 
820 #define ROBIN_HOOD_HASH_INT(T) \
821  template <> \
822  struct hash<T> { \
823  size_t operator()(T const& obj) const noexcept { \
824  return hash_int(static_cast<uint64_t>(obj)); \
825  } \
826  }
827 
828 #if defined(__GNUC__) && !defined(__clang__)
829 # pragma GCC diagnostic push
830 # pragma GCC diagnostic ignored "-Wuseless-cast"
831 #endif
832 // see https://en.cppreference.com/w/cpp/utility/hash
833 ROBIN_HOOD_HASH_INT(bool);
834 ROBIN_HOOD_HASH_INT(char);
835 ROBIN_HOOD_HASH_INT(signed char);
836 ROBIN_HOOD_HASH_INT(unsigned char);
837 ROBIN_HOOD_HASH_INT(char16_t);
838 ROBIN_HOOD_HASH_INT(char32_t);
839 #if ROBIN_HOOD(HAS_NATIVE_WCHART)
840 ROBIN_HOOD_HASH_INT(wchar_t);
841 #endif
842 ROBIN_HOOD_HASH_INT(short); // NOLINT
843 ROBIN_HOOD_HASH_INT(unsigned short); // NOLINT
844 ROBIN_HOOD_HASH_INT(int);
845 ROBIN_HOOD_HASH_INT(unsigned int);
846 ROBIN_HOOD_HASH_INT(long); // NOLINT
847 ROBIN_HOOD_HASH_INT(long long); // NOLINT
848 ROBIN_HOOD_HASH_INT(unsigned long); // NOLINT
849 ROBIN_HOOD_HASH_INT(unsigned long long); // NOLINT
850 #if defined(__GNUC__) && !defined(__clang__)
851 # pragma GCC diagnostic pop
852 #endif
853 namespace detail {
854 
855 template <typename T>
856 struct void_type {
857  using type = void;
858 };
859 
860 template <typename T, typename = void>
861 struct has_is_transparent : public std::false_type {};
862 
863 template <typename T>
864 struct has_is_transparent<T, typename void_type<typename T::is_transparent>::type>
865  : public std::true_type {};
866 
867 // using wrapper classes for hash and key_equal prevents the diamond problem when the same type
868 // is used. see https://stackoverflow.com/a/28771920/48181
869 template <typename T>
870 struct WrapHash : public T {
871  WrapHash() = default;
872  explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
873  : T(o) {}
874 };
875 
876 template <typename T>
877 struct WrapKeyEqual : public T {
878  WrapKeyEqual() = default;
879  explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
880  : T(o) {}
881 };
882 
883 // A highly optimized hashmap implementation, using the Robin Hood algorithm.
884 //
885 // In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but
886 // be about 2x faster in most cases and require much less allocations.
887 //
888 // This implementation uses the following memory layout:
889 //
890 // [Node, Node, ... Node | info, info, ... infoSentinel ]
891 //
892 // * Node: either a DataNode that directly has the std::pair<key, val> as member,
893 // or a DataNode with a pointer to std::pair<key,val>. Which DataNode representation to use
894 // depends on how fast the swap() operation is. Heuristically, this is automatically chosen
895 // based on sizeof(). there are always 2^n Nodes.
896 //
897 // * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes.
898 // Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the
899 // corresponding node contains data. Set to 2 means the corresponding Node is filled, but it
900 // actually belongs to the previous position and was pushed out because that place is already
901 // taken.
902 //
903 // * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the
904 // need for a idx variable.
905 //
906 // According to STL, order of templates has effect on throughput. That's why I've moved the
907 // boolean to the front.
908 // https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/
909 template <bool IsFlat, size_t MaxLoadFactor100, typename Key, typename T, typename Hash,
910  typename KeyEqual>
911 class Table
912  : public WrapHash<Hash>,
913  public WrapKeyEqual<KeyEqual>,
915  typename std::conditional<
916  std::is_void<T>::value, Key,
917  robin_hood::pair<typename std::conditional<IsFlat, Key, Key const>::type, T>>::type,
918  4, 16384, IsFlat> {
919 public:
920  static constexpr bool is_flat = IsFlat;
921  static constexpr bool is_map = !std::is_void<T>::value;
922  static constexpr bool is_set = !is_map;
923  static constexpr bool is_transparent =
925 
926  using key_type = Key;
927  using mapped_type = T;
928  using value_type = typename std::conditional<
929  is_set, Key,
931  using size_type = size_t;
932  using hasher = Hash;
933  using key_equal = KeyEqual;
935 
936 private:
937  static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100,
938  "MaxLoadFactor100 needs to be >10 && < 100");
939 
940  using WHash = WrapHash<Hash>;
942 
943  // configuration defaults
944 
945  // make sure we have 8 elements, needed to quickly rehash mInfo
946  static constexpr size_t InitialNumElements = sizeof(uint64_t);
947  static constexpr uint32_t InitialInfoNumBits = 5;
948  static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits;
949  static constexpr size_t InfoMask = InitialInfoInc - 1U;
950  static constexpr uint8_t InitialInfoHashShift = 0;
952 
953  // type needs to be wider than uint8_t.
954  using InfoType = uint32_t;
955 
956  // DataNode ////////////////////////////////////////////////////////
957 
958  // Primary template for the data node. We have special implementations for small and big
959  // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these
960  // on the heap so swap merely swaps a pointer.
961  template <typename M, bool>
962  class DataNode {};
963 
964  // Small: just allocate on the stack.
965  template <typename M>
966  class DataNode<M, true> final {
967  public:
968  template <typename... Args>
969  explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept(
970  noexcept(value_type(std::forward<Args>(args)...)))
971  : mData(std::forward<Args>(args)...) {}
972 
973  DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, true>&& n) noexcept(
974  std::is_nothrow_move_constructible<value_type>::value)
975  : mData(std::move(n.mData)) {}
976 
977  // doesn't do anything
978  void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {}
979  void destroyDoNotDeallocate() noexcept {}
980 
981  value_type const* operator->() const noexcept {
982  return &mData;
983  }
984  value_type* operator->() noexcept {
985  return &mData;
986  }
987 
988  const value_type& operator*() const noexcept {
989  return mData;
990  }
991 
992  value_type& operator*() noexcept {
993  return mData;
994  }
995 
996  template <typename VT = value_type>
997  ROBIN_HOOD(NODISCARD)
998  typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept {
999  return mData.first;
1000  }
1001  template <typename VT = value_type>
1002  ROBIN_HOOD(NODISCARD)
1003  typename std::enable_if<is_set, VT&>::type getFirst() noexcept {
1004  return mData;
1005  }
1006 
1007  template <typename VT = value_type>
1008  ROBIN_HOOD(NODISCARD)
1009  typename std::enable_if<is_map, typename VT::first_type const&>::type
1010  getFirst() const noexcept {
1011  return mData.first;
1012  }
1013  template <typename VT = value_type>
1014  ROBIN_HOOD(NODISCARD)
1015  typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept {
1016  return mData;
1017  }
1018 
1019  template <typename MT = mapped_type>
1020  ROBIN_HOOD(NODISCARD)
1021  typename std::enable_if<is_map, MT&>::type getSecond() noexcept {
1022  return mData.second;
1023  }
1024 
1025  template <typename MT = mapped_type>
1026  ROBIN_HOOD(NODISCARD)
1027  typename std::enable_if<is_set, MT const&>::type getSecond() const noexcept {
1028  return mData.second;
1029  }
1030 
1031  void swap(DataNode<M, true>& o) noexcept(
1032  noexcept(std::declval<value_type>().swap(std::declval<value_type>()))) {
1033  mData.swap(o.mData);
1034  }
1035 
1036  private:
1037  value_type mData;
1038  };
1039 
1040  // big object: allocate on heap.
1041  template <typename M>
1042  class DataNode<M, false> {
1043  public:
1044  template <typename... Args>
1045  explicit DataNode(M& map, Args&&... args)
1046  : mData(map.allocate()) {
1047  ::new (static_cast<void*>(mData)) value_type(std::forward<Args>(args)...);
1048  }
1049 
1050  DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, false>&& n) noexcept
1051  : mData(std::move(n.mData)) {}
1052 
1053  void destroy(M& map) noexcept {
1054  // don't deallocate, just put it into list of datapool.
1055  mData->~value_type();
1056  map.deallocate(mData);
1057  }
1058 
1059  void destroyDoNotDeallocate() noexcept {
1060  mData->~value_type();
1061  }
1062 
1063  value_type const* operator->() const noexcept {
1064  return mData;
1065  }
1066 
1067  value_type* operator->() noexcept {
1068  return mData;
1069  }
1070 
1071  const value_type& operator*() const {
1072  return *mData;
1073  }
1074 
1075  value_type& operator*() {
1076  return *mData;
1077  }
1078 
1079  template <typename VT = value_type>
1080  ROBIN_HOOD(NODISCARD)
1081  typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept {
1082  return mData->first;
1083  }
1084  template <typename VT = value_type>
1085  ROBIN_HOOD(NODISCARD)
1086  typename std::enable_if<is_set, VT&>::type getFirst() noexcept {
1087  return *mData;
1088  }
1089 
1090  template <typename VT = value_type>
1091  ROBIN_HOOD(NODISCARD)
1092  typename std::enable_if<is_map, typename VT::first_type const&>::type
1093  getFirst() const noexcept {
1094  return mData->first;
1095  }
1096  template <typename VT = value_type>
1097  ROBIN_HOOD(NODISCARD)
1098  typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept {
1099  return *mData;
1100  }
1101 
1102  template <typename MT = mapped_type>
1103  ROBIN_HOOD(NODISCARD)
1104  typename std::enable_if<is_map, MT&>::type getSecond() noexcept {
1105  return mData->second;
1106  }
1107 
1108  template <typename MT = mapped_type>
1109  ROBIN_HOOD(NODISCARD)
1110  typename std::enable_if<is_map, MT const&>::type getSecond() const noexcept {
1111  return mData->second;
1112  }
1113 
1114  void swap(DataNode<M, false>& o) noexcept {
1115  using std::swap;
1116  swap(mData, o.mData);
1117  }
1118 
1119  private:
1120  value_type* mData;
1121  };
1122 
1123  using Node = DataNode<Self, IsFlat>;
1124 
1125  // helpers for insertKeyPrepareEmptySpot: extract first entry (only const required)
1126  ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(Node const& n) const noexcept {
1127  return n.getFirst();
1128  }
1129 
1130  // in case we have void mapped_type, we are not using a pair, thus we just route k through.
1131  // No need to disable this because it's just not used if not applicable.
1132  ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(key_type const& k) const noexcept {
1133  return k;
1134  }
1135 
1136  // in case we have non-void mapped_type, we have a standard robin_hood::pair
1137  template <typename Q = mapped_type>
1138  ROBIN_HOOD(NODISCARD)
1139  typename std::enable_if<!std::is_void<Q>::value, key_type const&>::type
1140  getFirstConst(value_type const& vt) const noexcept {
1141  return vt.first;
1142  }
1143 
1144  // Cloner //////////////////////////////////////////////////////////
1145 
1146  template <typename M, bool UseMemcpy>
1147  struct Cloner;
1148 
1149  // fast path: Just copy data, without allocating anything.
1150  template <typename M>
1151  struct Cloner<M, true> {
1152  void operator()(M const& source, M& target) const {
1153  auto const* const src = reinterpret_cast<char const*>(source.mKeyVals);
1154  auto* tgt = reinterpret_cast<char*>(target.mKeyVals);
1155  auto const numElementsWithBuffer = target.calcNumElementsWithBuffer(target.mMask + 1);
1156  std::copy(src, src + target.calcNumBytesTotal(numElementsWithBuffer), tgt);
1157  }
1158  };
1159 
1160  template <typename M>
1161  struct Cloner<M, false> {
1162  void operator()(M const& s, M& t) const {
1163  auto const numElementsWithBuffer = t.calcNumElementsWithBuffer(t.mMask + 1);
1164  std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(numElementsWithBuffer), t.mInfo);
1165 
1166  for (size_t i = 0; i < numElementsWithBuffer; ++i) {
1167  if (t.mInfo[i]) {
1168  ::new (static_cast<void*>(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]);
1169  }
1170  }
1171  }
1172  };
1173 
1174  // Destroyer ///////////////////////////////////////////////////////
1175 
1176  template <typename M, bool IsFlatAndTrivial>
1177  struct Destroyer {};
1178 
1179  template <typename M>
1180  struct Destroyer<M, true> {
1181  void nodes(M& m) const noexcept {
1182  m.mNumElements = 0;
1183  }
1184 
1185  void nodesDoNotDeallocate(M& m) const noexcept {
1186  m.mNumElements = 0;
1187  }
1188  };
1189 
1190  template <typename M>
1191  struct Destroyer<M, false> {
1192  void nodes(M& m) const noexcept {
1193  m.mNumElements = 0;
1194  // clear also resets mInfo to 0, that's sometimes not necessary.
1195  auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1);
1196 
1197  for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) {
1198  if (0 != m.mInfo[idx]) {
1199  Node& n = m.mKeyVals[idx];
1200  n.destroy(m);
1201  n.~Node();
1202  }
1203  }
1204  }
1205 
1206  void nodesDoNotDeallocate(M& m) const noexcept {
1207  m.mNumElements = 0;
1208  // clear also resets mInfo to 0, that's sometimes not necessary.
1209  auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1);
1210  for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) {
1211  if (0 != m.mInfo[idx]) {
1212  Node& n = m.mKeyVals[idx];
1213  n.destroyDoNotDeallocate();
1214  n.~Node();
1215  }
1216  }
1217  }
1218  };
1219 
1220  // Iter ////////////////////////////////////////////////////////////
1221 
1222  struct fast_forward_tag {};
1223 
1224  // generic iterator for both const_iterator and iterator.
1225  template <bool IsConst>
1226  class Iter {
1227  private:
1228  using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type;
1229 
1230  public:
1231  using difference_type = std::ptrdiff_t;
1232  using value_type = typename Self::value_type;
1233  using reference = typename std::conditional<IsConst, value_type const&, value_type&>::type;
1234  using pointer = typename std::conditional<IsConst, value_type const*, value_type*>::type;
1235  using iterator_category = std::forward_iterator_tag;
1236 
1237  // default constructed iterator can be compared to itself, but WON'T return true when
1238  // compared to end().
1239  Iter() = default;
1240 
1241  // Rule of zero: nothing specified. The conversion constructor is only enabled for
1242  // iterator to const_iterator, so it doesn't accidentally work as a copy ctor.
1243 
1244  // Conversion constructor from iterator to const_iterator.
1245  template <bool OtherIsConst,
1246  typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
1247  Iter(Iter<OtherIsConst> const& other) noexcept
1248  : mKeyVals(other.mKeyVals)
1249  , mInfo(other.mInfo) {}
1250 
1251  Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept
1252  : mKeyVals(valPtr)
1253  , mInfo(infoPtr) {}
1254 
1255  Iter(NodePtr valPtr, uint8_t const* infoPtr,
1256  fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept
1257  : mKeyVals(valPtr)
1258  , mInfo(infoPtr) {
1259  fastForward();
1260  }
1261 
1262  template <bool OtherIsConst,
1263  typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
1264  Iter& operator=(Iter<OtherIsConst> const& other) noexcept {
1265  mKeyVals = other.mKeyVals;
1266  mInfo = other.mInfo;
1267  return *this;
1268  }
1269 
1270  // prefix increment. Undefined behavior if we are at end()!
1271  Iter& operator++() noexcept {
1272  mInfo++;
1273  mKeyVals++;
1274  fastForward();
1275  return *this;
1276  }
1277 
1278  Iter operator++(int) noexcept {
1279  Iter tmp = *this;
1280  ++(*this);
1281  return tmp;
1282  }
1283 
1284  reference operator*() const {
1285  return **mKeyVals;
1286  }
1287 
1288  pointer operator->() const {
1289  return &**mKeyVals;
1290  }
1291 
1292  template <bool O>
1293  bool operator==(Iter<O> const& o) const noexcept {
1294  return mKeyVals == o.mKeyVals;
1295  }
1296 
1297  template <bool O>
1298  bool operator!=(Iter<O> const& o) const noexcept {
1299  return mKeyVals != o.mKeyVals;
1300  }
1301 
1302  private:
1303  // fast forward to the next non-free info byte
1304  // I've tried a few variants that don't depend on intrinsics, but unfortunately they are
1305  // quite a bit slower than this one. So I've reverted that change again. See map_benchmark.
1306  void fastForward() noexcept {
1307  size_t n = 0;
1308  while (0U == (n = detail::unaligned_load<size_t>(mInfo))) {
1309  mInfo += sizeof(size_t);
1310  mKeyVals += sizeof(size_t);
1311  }
1312 #if defined(ROBIN_HOOD_DISABLE_INTRINSICS)
1313  // we know for certain that within the next 8 bytes we'll find a non-zero one.
1314  if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint32_t>(mInfo))) {
1315  mInfo += 4;
1316  mKeyVals += 4;
1317  }
1318  if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint16_t>(mInfo))) {
1319  mInfo += 2;
1320  mKeyVals += 2;
1321  }
1322  if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) {
1323  mInfo += 1;
1324  mKeyVals += 1;
1325  }
1326 #else
1327 # if ROBIN_HOOD(LITTLE_ENDIAN)
1328  auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
1329 # else
1330  auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8;
1331 # endif
1332  mInfo += inc;
1333  mKeyVals += inc;
1334 #endif
1335  }
1336 
1337  friend class Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
1338  NodePtr mKeyVals{nullptr};
1339  uint8_t const* mInfo{nullptr};
1340  };
1341 
1343 
1344  // highly performance relevant code.
1345  // Lower bits are used for indexing into the array (2^n size)
1346  // The upper 1-5 bits need to be a reasonable good hash, to save comparisons.
1347  template <typename HashKey>
1348  void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const {
1349  // In addition to whatever hash is used, add another mul & shift so we get better hashing.
1350  // This serves as a bad hash prevention, if the given data is
1351  // badly mixed.
1352  auto h = static_cast<uint64_t>(WHash::operator()(key));
1353 
1354  h *= mHashMultiplier;
1355  h ^= h >> 33U;
1356 
1357  // the lower InitialInfoNumBits are reserved for info.
1358  *info = mInfoInc + static_cast<InfoType>((h & InfoMask) >> mInfoHashShift);
1359  *idx = (static_cast<size_t>(h) >> InitialInfoNumBits) & mMask;
1360  }
1361 
1362  // forwards the index by one, wrapping around at the end
1363  void next(InfoType* info, size_t* idx) const noexcept {
1364  *idx = *idx + 1;
1365  *info += mInfoInc;
1366  }
1367 
1368  void nextWhileLess(InfoType* info, size_t* idx) const noexcept {
1369  // unrolling this by hand did not bring any speedups.
1370  while (*info < mInfo[*idx]) {
1371  next(info, idx);
1372  }
1373  }
1374 
1375  // Shift everything up by one element. Tries to move stuff around.
1376  void
1377  shiftUp(size_t startIdx,
1378  size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
1379  auto idx = startIdx;
1380  ::new (static_cast<void*>(mKeyVals + idx)) Node(std::move(mKeyVals[idx - 1]));
1381  while (--idx != insertion_idx) {
1382  mKeyVals[idx] = std::move(mKeyVals[idx - 1]);
1383  }
1384 
1385  idx = startIdx;
1386  while (idx != insertion_idx) {
1387  ROBIN_HOOD_COUNT(shiftUp)
1388  mInfo[idx] = static_cast<uint8_t>(mInfo[idx - 1] + mInfoInc);
1389  if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) {
1390  mMaxNumElementsAllowed = 0;
1391  }
1392  --idx;
1393  }
1394  }
1395 
1396  void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
1397  // until we find one that is either empty or has zero offset.
1398  // TODO(martinus) we don't need to move everything, just the last one for the same
1399  // bucket.
1400  mKeyVals[idx].destroy(*this);
1401 
1402  // until we find one that is either empty or has zero offset.
1403  while (mInfo[idx + 1] >= 2 * mInfoInc) {
1404  ROBIN_HOOD_COUNT(shiftDown)
1405  mInfo[idx] = static_cast<uint8_t>(mInfo[idx + 1] - mInfoInc);
1406  mKeyVals[idx] = std::move(mKeyVals[idx + 1]);
1407  ++idx;
1408  }
1409 
1410  mInfo[idx] = 0;
1411  // don't destroy, we've moved it
1412  // mKeyVals[idx].destroy(*this);
1413  mKeyVals[idx].~Node();
1414  }
1415 
1416  // copy of find(), except that it returns iterator instead of const_iterator.
1417  template <typename Other>
1418  ROBIN_HOOD(NODISCARD)
1419  size_t findIdx(Other const& key) const {
1420  size_t idx{};
1421  InfoType info{};
1422  keyToIdx(key, &idx, &info);
1423 
1424  do {
1425  // unrolling this twice gives a bit of a speedup. More unrolling did not help.
1426  if (info == mInfo[idx] &&
1427  ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) {
1428  return idx;
1429  }
1430  next(&info, &idx);
1431  if (info == mInfo[idx] &&
1432  ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) {
1433  return idx;
1434  }
1435  next(&info, &idx);
1436  } while (info <= mInfo[idx]);
1437 
1438  // nothing found!
1439  return mMask == 0 ? 0
1440  : static_cast<size_t>(std::distance(
1441  mKeyVals, reinterpret_cast_no_cast_align_warning<Node*>(mInfo)));
1442  }
1443 
1444  void cloneData(const Table& o) {
1445  Cloner<Table, IsFlat && ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(Node)>()(o, *this);
1446  }
1447 
1448  // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized.
1449  // @return True on success, false if something went wrong
1450  void insert_move(Node&& keyval) {
1451  // we don't retry, fail if overflowing
1452  // don't need to check max num elements
1453  if (0 == mMaxNumElementsAllowed && !try_increase_info()) {
1454  throwOverflowError();
1455  }
1456 
1457  size_t idx{};
1458  InfoType info{};
1459  keyToIdx(keyval.getFirst(), &idx, &info);
1460 
1461  // skip forward. Use <= because we are certain that the element is not there.
1462  while (info <= mInfo[idx]) {
1463  idx = idx + 1;
1464  info += mInfoInc;
1465  }
1466 
1467  // key not found, so we are now exactly where we want to insert it.
1468  auto const insertion_idx = idx;
1469  auto const insertion_info = static_cast<uint8_t>(info);
1470  if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1471  mMaxNumElementsAllowed = 0;
1472  }
1473 
1474  // find an empty spot
1475  while (0 != mInfo[idx]) {
1476  next(&info, &idx);
1477  }
1478 
1479  auto& l = mKeyVals[insertion_idx];
1480  if (idx == insertion_idx) {
1481  ::new (static_cast<void*>(&l)) Node(std::move(keyval));
1482  } else {
1483  shiftUp(idx, insertion_idx);
1484  l = std::move(keyval);
1485  }
1486 
1487  // put at empty spot
1488  mInfo[insertion_idx] = insertion_info;
1489 
1490  ++mNumElements;
1491  }
1492 
1493 public:
1494  using iterator = Iter<false>;
1495  using const_iterator = Iter<true>;
1496 
1497  Table() noexcept(noexcept(Hash()) && noexcept(KeyEqual()))
1498  : WHash()
1499  , WKeyEqual() {
1500  ROBIN_HOOD_TRACE(this)
1501  }
1502 
1503  // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert.
1504  // This tremendously speeds up ctor & dtor of a map that never receives an element. The
1505  // penalty is paid at the first insert, and not before. Lookup of this empty map works
1506  // because everybody points to DummyInfoByte::b. parameter bucket_count is dictated by the
1507  // standard, but we can ignore it.
1508  explicit Table(
1509  size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/, const Hash& h = Hash{},
1510  const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal)))
1511  : WHash(h)
1512  , WKeyEqual(equal) {
1513  ROBIN_HOOD_TRACE(this)
1514  }
1515 
1516  template <typename Iter>
1517  Table(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0,
1518  const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{})
1519  : WHash(h)
1520  , WKeyEqual(equal) {
1521  ROBIN_HOOD_TRACE(this)
1522  insert(first, last);
1523  }
1524 
1525  Table(std::initializer_list<value_type> initlist,
1526  size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{},
1527  const KeyEqual& equal = KeyEqual{})
1528  : WHash(h)
1529  , WKeyEqual(equal) {
1530  ROBIN_HOOD_TRACE(this)
1531  insert(initlist.begin(), initlist.end());
1532  }
1533 
1534  Table(Table&& o) noexcept
1535  : WHash(std::move(static_cast<WHash&>(o)))
1536  , WKeyEqual(std::move(static_cast<WKeyEqual&>(o)))
1537  , DataPool(std::move(static_cast<DataPool&>(o))) {
1538  ROBIN_HOOD_TRACE(this)
1539  if (o.mMask) {
1540  mHashMultiplier = std::move(o.mHashMultiplier);
1541  mKeyVals = std::move(o.mKeyVals);
1542  mInfo = std::move(o.mInfo);
1543  mNumElements = std::move(o.mNumElements);
1544  mMask = std::move(o.mMask);
1545  mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
1546  mInfoInc = std::move(o.mInfoInc);
1547  mInfoHashShift = std::move(o.mInfoHashShift);
1548  // set other's mask to 0 so its destructor won't do anything
1549  o.init();
1550  }
1551  }
1552 
1553  Table& operator=(Table&& o) noexcept {
1554  ROBIN_HOOD_TRACE(this)
1555  if (&o != this) {
1556  if (o.mMask) {
1557  // only move stuff if the other map actually has some data
1558  destroy();
1559  mHashMultiplier = std::move(o.mHashMultiplier);
1560  mKeyVals = std::move(o.mKeyVals);
1561  mInfo = std::move(o.mInfo);
1562  mNumElements = std::move(o.mNumElements);
1563  mMask = std::move(o.mMask);
1564  mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
1565  mInfoInc = std::move(o.mInfoInc);
1566  mInfoHashShift = std::move(o.mInfoHashShift);
1567  WHash::operator=(std::move(static_cast<WHash&>(o)));
1568  WKeyEqual::operator=(std::move(static_cast<WKeyEqual&>(o)));
1569  DataPool::operator=(std::move(static_cast<DataPool&>(o)));
1570 
1571  o.init();
1572 
1573  } else {
1574  // nothing in the other map => just clear us.
1575  clear();
1576  }
1577  }
1578  return *this;
1579  }
1580 
1581  Table(const Table& o)
1582  : WHash(static_cast<const WHash&>(o))
1583  , WKeyEqual(static_cast<const WKeyEqual&>(o))
1584  , DataPool(static_cast<const DataPool&>(o)) {
1585  ROBIN_HOOD_TRACE(this)
1586  if (!o.empty()) {
1587  // not empty: create an exact copy. it is also possible to just iterate through all
1588  // elements and insert them, but copying is probably faster.
1589 
1590  auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
1591  auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
1592 
1593  ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal("
1594  << numElementsWithBuffer << ")")
1595  mHashMultiplier = o.mHashMultiplier;
1596  mKeyVals = static_cast<Node*>(
1597  detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
1598  // no need for calloc because clonData does memcpy
1599  mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
1600  mNumElements = o.mNumElements;
1601  mMask = o.mMask;
1602  mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
1603  mInfoInc = o.mInfoInc;
1604  mInfoHashShift = o.mInfoHashShift;
1605  cloneData(o);
1606  }
1607  }
1608 
1609  // Creates a copy of the given map. Copy constructor of each entry is used.
1610  // Not sure why clang-tidy thinks this doesn't handle self assignment, it does
1611  Table& operator=(Table const& o) {
1612  ROBIN_HOOD_TRACE(this)
1613  if (&o == this) {
1614  // prevent assigning of itself
1615  return *this;
1616  }
1617 
1618  // we keep using the old allocator and not assign the new one, because we want to keep
1619  // the memory available. when it is the same size.
1620  if (o.empty()) {
1621  if (0 == mMask) {
1622  // nothing to do, we are empty too
1623  return *this;
1624  }
1625 
1626  // not empty: destroy what we have there
1627  // clear also resets mInfo to 0, that's sometimes not necessary.
1628  destroy();
1629  init();
1630  WHash::operator=(static_cast<const WHash&>(o));
1631  WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
1632  DataPool::operator=(static_cast<DataPool const&>(o));
1633 
1634  return *this;
1635  }
1636 
1637  // clean up old stuff
1638  Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
1639 
1640  if (mMask != o.mMask) {
1641  // no luck: we don't have the same array size allocated, so we need to realloc.
1642  if (0 != mMask) {
1643  // only deallocate if we actually have data!
1644  ROBIN_HOOD_LOG("std::free")
1645  std::free(mKeyVals);
1646  }
1647 
1648  auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
1649  auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
1650  ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal("
1651  << numElementsWithBuffer << ")")
1652  mKeyVals = static_cast<Node*>(
1653  detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
1654 
1655  // no need for calloc here because cloneData performs a memcpy.
1656  mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
1657  // sentinel is set in cloneData
1658  }
1659  WHash::operator=(static_cast<const WHash&>(o));
1660  WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
1661  DataPool::operator=(static_cast<DataPool const&>(o));
1662  mHashMultiplier = o.mHashMultiplier;
1663  mNumElements = o.mNumElements;
1664  mMask = o.mMask;
1665  mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
1666  mInfoInc = o.mInfoInc;
1667  mInfoHashShift = o.mInfoHashShift;
1668  cloneData(o);
1669 
1670  return *this;
1671  }
1672 
1673  // Swaps everything between the two maps.
1674  void swap(Table& o) {
1675  ROBIN_HOOD_TRACE(this)
1676  using std::swap;
1677  swap(o, *this);
1678  }
1679 
1680  // Clears all data, without resizing.
1681  void clear() {
1682  ROBIN_HOOD_TRACE(this)
1683  if (empty()) {
1684  // don't do anything! also important because we don't want to write to
1685  // DummyInfoByte::b, even though we would just write 0 to it.
1686  return;
1687  }
1688 
1689  Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
1690 
1691  auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
1692  // clear everything, then set the sentinel again
1693  uint8_t const z = 0;
1694  std::fill(mInfo, mInfo + calcNumBytesInfo(numElementsWithBuffer), z);
1695  mInfo[numElementsWithBuffer] = 1;
1696 
1697  mInfoInc = InitialInfoInc;
1698  mInfoHashShift = InitialInfoHashShift;
1699  }
1700 
1701  // Destroys the map and all it's contents.
1702  ~Table() {
1703  ROBIN_HOOD_TRACE(this)
1704  destroy();
1705  }
1706 
1707  // Checks if both tables contain the same entries. Order is irrelevant.
1708  bool operator==(const Table& other) const {
1709  ROBIN_HOOD_TRACE(this)
1710  if (other.size() != size()) {
1711  return false;
1712  }
1713  for (auto const& otherEntry : other) {
1714  if (!has(otherEntry)) {
1715  return false;
1716  }
1717  }
1718 
1719  return true;
1720  }
1721 
1722  bool operator!=(const Table& other) const {
1723  ROBIN_HOOD_TRACE(this)
1724  return !operator==(other);
1725  }
1726 
1727  template <typename Q = mapped_type>
1728  typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](const key_type& key) {
1729  ROBIN_HOOD_TRACE(this)
1730  auto idxAndState = insertKeyPrepareEmptySpot(key);
1731  switch (idxAndState.second) {
1732  case InsertionState::key_found:
1733  break;
1734 
1735  case InsertionState::new_node:
1736  ::new (static_cast<void*>(&mKeyVals[idxAndState.first]))
1737  Node(*this, std::piecewise_construct, std::forward_as_tuple(key),
1738  std::forward_as_tuple());
1739  break;
1740 
1741  case InsertionState::overwrite_node:
1742  mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
1743  std::forward_as_tuple(key), std::forward_as_tuple());
1744  break;
1745 
1746  case InsertionState::overflow_error:
1747  throwOverflowError();
1748  }
1749 
1750  return mKeyVals[idxAndState.first].getSecond();
1751  }
1752 
1753  template <typename Q = mapped_type>
1754  typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](key_type&& key) {
1755  ROBIN_HOOD_TRACE(this)
1756  auto idxAndState = insertKeyPrepareEmptySpot(key);
1757  switch (idxAndState.second) {
1758  case InsertionState::key_found:
1759  break;
1760 
1761  case InsertionState::new_node:
1762  ::new (static_cast<void*>(&mKeyVals[idxAndState.first]))
1763  Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)),
1764  std::forward_as_tuple());
1765  break;
1766 
1767  case InsertionState::overwrite_node:
1768  mKeyVals[idxAndState.first] =
1769  Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)),
1770  std::forward_as_tuple());
1771  break;
1772 
1773  case InsertionState::overflow_error:
1774  throwOverflowError();
1775  }
1776 
1777  return mKeyVals[idxAndState.first].getSecond();
1778  }
1779 
1780  template <typename Iter>
1781  void insert(Iter first, Iter last) {
1782  for (; first != last; ++first) {
1783  // value_type ctor needed because this might be called with std::pair's
1784  insert(value_type(*first));
1785  }
1786  }
1787 
1788  void insert(std::initializer_list<value_type> ilist) {
1789  for (auto&& vt : ilist) {
1790  insert(std::move(vt));
1791  }
1792  }
1793 
1794  template <typename... Args>
1795  std::pair<iterator, bool> emplace(Args&&... args) {
1796  ROBIN_HOOD_TRACE(this)
1797  Node n{*this, std::forward<Args>(args)...};
1798  auto idxAndState = insertKeyPrepareEmptySpot(getFirstConst(n));
1799  switch (idxAndState.second) {
1800  case InsertionState::key_found:
1801  n.destroy(*this);
1802  break;
1803 
1804  case InsertionState::new_node:
1805  ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(*this, std::move(n));
1806  break;
1807 
1808  case InsertionState::overwrite_node:
1809  mKeyVals[idxAndState.first] = std::move(n);
1810  break;
1811 
1812  case InsertionState::overflow_error:
1813  n.destroy(*this);
1814  throwOverflowError();
1815  break;
1816  }
1817 
1818  return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
1819  InsertionState::key_found != idxAndState.second);
1820  }
1821 
1822  template <typename... Args>
1823  iterator emplace_hint(const_iterator position, Args&&... args) {
1824  (void)position;
1825  return emplace(std::forward<Args>(args)...).first;
1826  }
1827 
1828  template <typename... Args>
1829  std::pair<iterator, bool> try_emplace(const key_type& key, Args&&... args) {
1830  return try_emplace_impl(key, std::forward<Args>(args)...);
1831  }
1832 
1833  template <typename... Args>
1834  std::pair<iterator, bool> try_emplace(key_type&& key, Args&&... args) {
1835  return try_emplace_impl(std::move(key), std::forward<Args>(args)...);
1836  }
1837 
1838  template <typename... Args>
1839  iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) {
1840  (void)hint;
1841  return try_emplace_impl(key, std::forward<Args>(args)...).first;
1842  }
1843 
1844  template <typename... Args>
1845  iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) {
1846  (void)hint;
1847  return try_emplace_impl(std::move(key), std::forward<Args>(args)...).first;
1848  }
1849 
1850  template <typename Mapped>
1851  std::pair<iterator, bool> insert_or_assign(const key_type& key, Mapped&& obj) {
1852  return insertOrAssignImpl(key, std::forward<Mapped>(obj));
1853  }
1854 
1855  template <typename Mapped>
1856  std::pair<iterator, bool> insert_or_assign(key_type&& key, Mapped&& obj) {
1857  return insertOrAssignImpl(std::move(key), std::forward<Mapped>(obj));
1858  }
1859 
1860  template <typename Mapped>
1861  iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) {
1862  (void)hint;
1863  return insertOrAssignImpl(key, std::forward<Mapped>(obj)).first;
1864  }
1865 
1866  template <typename Mapped>
1867  iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) {
1868  (void)hint;
1869  return insertOrAssignImpl(std::move(key), std::forward<Mapped>(obj)).first;
1870  }
1871 
1872  std::pair<iterator, bool> insert(const value_type& keyval) {
1873  ROBIN_HOOD_TRACE(this)
1874  return emplace(keyval);
1875  }
1876 
1877  iterator insert(const_iterator hint, const value_type& keyval) {
1878  (void)hint;
1879  return emplace(keyval).first;
1880  }
1881 
1882  std::pair<iterator, bool> insert(value_type&& keyval) {
1883  return emplace(std::move(keyval));
1884  }
1885 
1886  iterator insert(const_iterator hint, value_type&& keyval) {
1887  (void)hint;
1888  return emplace(std::move(keyval)).first;
1889  }
1890 
1891  // Returns 1 if key is found, 0 otherwise.
1892  size_t count(const key_type& key) const { // NOLINT
1893  ROBIN_HOOD_TRACE(this)
1894  auto kv = mKeyVals + findIdx(key);
1895  if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1896  return 1;
1897  }
1898  return 0;
1899  }
1900 
1901  template <typename OtherKey, typename Self_ = Self>
1902  typename std::enable_if<Self_::is_transparent, size_t>::type count(const OtherKey& key) const {
1903  ROBIN_HOOD_TRACE(this)
1904  auto kv = mKeyVals + findIdx(key);
1905  if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1906  return 1;
1907  }
1908  return 0;
1909  }
1910 
1911  bool contains(const key_type& key) const { // NOLINT
1912  return 1U == count(key);
1913  }
1914 
1915  template <typename OtherKey, typename Self_ = Self>
1916  typename std::enable_if<Self_::is_transparent, bool>::type contains(const OtherKey& key) const {
1917  return 1U == count(key);
1918  }
1919 
1920  // Returns a reference to the value found for key.
1921  // Throws std::out_of_range if element cannot be found
1922  template <typename Q = mapped_type>
1923  typename std::enable_if<!std::is_void<Q>::value, Q&>::type at(key_type const& key) {
1924  ROBIN_HOOD_TRACE(this)
1925  auto kv = mKeyVals + findIdx(key);
1926  if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1927  doThrow<std::out_of_range>("key not found");
1928  }
1929  return kv->getSecond();
1930  }
1931 
1932  // Returns a reference to the value found for key.
1933  // Throws std::out_of_range if element cannot be found
1934  template <typename Q = mapped_type>
1935  typename std::enable_if<!std::is_void<Q>::value, Q const&>::type at(key_type const& key) const {
1936  ROBIN_HOOD_TRACE(this)
1937  auto kv = mKeyVals + findIdx(key);
1938  if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1939  doThrow<std::out_of_range>("key not found");
1940  }
1941  return kv->getSecond();
1942  }
1943 
1944  const_iterator find(const key_type& key) const { // NOLINT
1945  ROBIN_HOOD_TRACE(this)
1946  const size_t idx = findIdx(key);
1947  return const_iterator{mKeyVals + idx, mInfo + idx};
1948  }
1949 
1950  template <typename OtherKey>
1951  const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const {
1952  ROBIN_HOOD_TRACE(this)
1953  const size_t idx = findIdx(key);
1954  return const_iterator{mKeyVals + idx, mInfo + idx};
1955  }
1956 
1957  template <typename OtherKey, typename Self_ = Self>
1958  typename std::enable_if<Self_::is_transparent, // NOLINT
1959  const_iterator>::type // NOLINT
1960  find(const OtherKey& key) const { // NOLINT
1961  ROBIN_HOOD_TRACE(this)
1962  const size_t idx = findIdx(key);
1963  return const_iterator{mKeyVals + idx, mInfo + idx};
1964  }
1965 
1966  iterator find(const key_type& key) {
1967  ROBIN_HOOD_TRACE(this)
1968  const size_t idx = findIdx(key);
1969  return iterator{mKeyVals + idx, mInfo + idx};
1970  }
1971 
1972  template <typename OtherKey>
1973  iterator find(const OtherKey& key, is_transparent_tag/*unused*/) {
1974  ROBIN_HOOD_TRACE(this)
1975  const size_t idx = findIdx(key);
1976  return iterator{mKeyVals + idx, mInfo + idx};
1977  }
1978 
1979  template <typename OtherKey, typename Self_ = Self>
1980  typename std::enable_if<Self_::is_transparent, iterator>::type find(const OtherKey& key) {
1981  ROBIN_HOOD_TRACE(this)
1982  const size_t idx = findIdx(key);
1983  return iterator{mKeyVals + idx, mInfo + idx};
1984  }
1985 
1986  iterator begin() {
1987  ROBIN_HOOD_TRACE(this)
1988  if (empty()) {
1989  return end();
1990  }
1991  return iterator(mKeyVals, mInfo, fast_forward_tag{});
1992  }
1993  const_iterator begin() const { // NOLINT
1994  ROBIN_HOOD_TRACE(this)
1995  return cbegin();
1996  }
1997  const_iterator cbegin() const { // NOLINT
1998  ROBIN_HOOD_TRACE(this)
1999  if (empty()) {
2000  return cend();
2001  }
2002  return const_iterator(mKeyVals, mInfo, fast_forward_tag{});
2003  }
2004 
2005  iterator end() {
2006  ROBIN_HOOD_TRACE(this)
2007  // no need to supply valid info pointer: end() must not be dereferenced, and only node
2008  // pointer is compared.
2009  return iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
2010  }
2011  const_iterator end() const { // NOLINT
2012  ROBIN_HOOD_TRACE(this)
2013  return cend();
2014  }
2015  const_iterator cend() const { // NOLINT
2016  ROBIN_HOOD_TRACE(this)
2017  return const_iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
2018  }
2019 
2020  iterator erase(const_iterator pos) {
2021  ROBIN_HOOD_TRACE(this)
2022  // its safe to perform const cast here
2023  return erase(iterator{const_cast<Node*>(pos.mKeyVals), const_cast<uint8_t*>(pos.mInfo)});
2024  }
2025 
2026  // Erases element at pos, returns iterator to the next element.
2027  iterator erase(iterator pos) {
2028  ROBIN_HOOD_TRACE(this)
2029  // we assume that pos always points to a valid entry, and not end().
2030  auto const idx = static_cast<size_t>(pos.mKeyVals - mKeyVals);
2031 
2032  shiftDown(idx);
2033  --mNumElements;
2034 
2035  if (*pos.mInfo) {
2036  // we've backward shifted, return this again
2037  return pos;
2038  }
2039 
2040  // no backward shift, return next element
2041  return ++pos;
2042  }
2043 
2044  size_t erase(const key_type& key) {
2045  ROBIN_HOOD_TRACE(this)
2046  size_t idx{};
2047  InfoType info{};
2048  keyToIdx(key, &idx, &info);
2049 
2050  // check while info matches with the source idx
2051  do {
2052  if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
2053  shiftDown(idx);
2054  --mNumElements;
2055  return 1;
2056  }
2057  next(&info, &idx);
2058  } while (info <= mInfo[idx]);
2059 
2060  // nothing found to delete
2061  return 0;
2062  }
2063 
2064  // reserves space for the specified number of elements. Makes sure the old data fits.
2065  // exactly the same as reserve(c).
2066  void rehash(size_t c) {
2067  // forces a reserve
2068  reserve(c, true);
2069  }
2070 
2071  // reserves space for the specified number of elements. Makes sure the old data fits.
2072  // Exactly the same as rehash(c). Use rehash(0) to shrink to fit.
2073  void reserve(size_t c) {
2074  // reserve, but don't force rehash
2075  reserve(c, false);
2076  }
2077 
2078  // If possible reallocates the map to a smaller one. This frees the underlying table.
2079  // Does not do anything if load_factor is too large for decreasing the table's size.
2080  void compact() {
2081  ROBIN_HOOD_TRACE(this)
2082  auto newSize = InitialNumElements;
2083  while (calcMaxNumElementsAllowed(newSize) < mNumElements && newSize != 0) {
2084  newSize *= 2;
2085  }
2086  if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
2087  throwOverflowError();
2088  }
2089 
2090  ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1")
2091 
2092  // only actually do anything when the new size is bigger than the old one. This prevents to
2093  // continuously allocate for each reserve() call.
2094  if (newSize < mMask + 1) {
2095  rehashPowerOfTwo(newSize, true);
2096  }
2097  }
2098 
2099  size_type size() const noexcept { // NOLINT
2100  ROBIN_HOOD_TRACE(this)
2101  return mNumElements;
2102  }
2103 
2104  size_type max_size() const noexcept { // NOLINT
2105  ROBIN_HOOD_TRACE(this)
2106  return static_cast<size_type>(-1);
2107  }
2108 
2109  ROBIN_HOOD(NODISCARD) bool empty() const noexcept {
2110  ROBIN_HOOD_TRACE(this)
2111  return 0 == mNumElements;
2112  }
2113 
2114  float max_load_factor() const noexcept { // NOLINT
2115  ROBIN_HOOD_TRACE(this)
2116  return MaxLoadFactor100 / 100.0F;
2117  }
2118 
2119  // Average number of elements per bucket. Since we allow only 1 per bucket
2120  float load_factor() const noexcept { // NOLINT
2121  ROBIN_HOOD_TRACE(this)
2122  return static_cast<float>(size()) / static_cast<float>(mMask + 1);
2123  }
2124 
2125  ROBIN_HOOD(NODISCARD) size_t mask() const noexcept {
2126  ROBIN_HOOD_TRACE(this)
2127  return mMask;
2128  }
2129 
2130  ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept {
2131  if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits<size_t>::max)() / 100)) {
2132  return maxElements * MaxLoadFactor100 / 100;
2133  }
2134 
2135  // we might be a bit imprecise, but since maxElements is quite large that doesn't matter
2136  return (maxElements / 100) * MaxLoadFactor100;
2137  }
2138 
2139  ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const noexcept {
2140  // we add a uint64_t, which houses the sentinel (first byte) and padding so we can load
2141  // 64bit types.
2142  return numElements + sizeof(uint64_t);
2143  }
2144 
2145  ROBIN_HOOD(NODISCARD)
2146  size_t calcNumElementsWithBuffer(size_t numElements) const noexcept {
2147  auto maxNumElementsAllowed = calcMaxNumElementsAllowed(numElements);
2148  return numElements + (std::min)(maxNumElementsAllowed, (static_cast<size_t>(0xFF)));
2149  }
2150 
2151  // calculation only allowed for 2^n values
2152  ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const {
2153 #if ROBIN_HOOD(BITNESS) == 64
2154  return numElements * sizeof(Node) + calcNumBytesInfo(numElements);
2155 #else
2156  // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows.
2157  auto const ne = static_cast<uint64_t>(numElements);
2158  auto const s = static_cast<uint64_t>(sizeof(Node));
2159  auto const infos = static_cast<uint64_t>(calcNumBytesInfo(numElements));
2160 
2161  auto const total64 = ne * s + infos;
2162  auto const total = static_cast<size_t>(total64);
2163 
2164  if (ROBIN_HOOD_UNLIKELY(static_cast<uint64_t>(total) != total64)) {
2165  throwOverflowError();
2166  }
2167  return total;
2168 #endif
2169  }
2170 
2171 private:
2172  template <typename Q = mapped_type>
2173  ROBIN_HOOD(NODISCARD)
2174  typename std::enable_if<!std::is_void<Q>::value, bool>::type has(const value_type& e) const {
2175  ROBIN_HOOD_TRACE(this)
2176  auto it = find(e.first);
2177  return it != end() && it->second == e.second;
2178  }
2179 
2180  template <typename Q = mapped_type>
2181  ROBIN_HOOD(NODISCARD)
2182  typename std::enable_if<std::is_void<Q>::value, bool>::type has(const value_type& e) const {
2183  ROBIN_HOOD_TRACE(this)
2184  return find(e) != end();
2185  }
2186 
2187  void reserve(size_t c, bool forceRehash) {
2188  ROBIN_HOOD_TRACE(this)
2189  auto const minElementsAllowed = (std::max)(c, mNumElements);
2190  auto newSize = InitialNumElements;
2191  while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) {
2192  newSize *= 2;
2193  }
2194  if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
2195  throwOverflowError();
2196  }
2197 
2198  ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1")
2199 
2200  // only actually do anything when the new size is bigger than the old one. This prevents to
2201  // continuously allocate for each reserve() call.
2202  if (forceRehash || newSize > mMask + 1) {
2203  rehashPowerOfTwo(newSize, false);
2204  }
2205  }
2206 
2207  // reserves space for at least the specified number of elements.
2208  // only works if numBuckets if power of two
2209  // True on success, false otherwise
2210  void rehashPowerOfTwo(size_t numBuckets, bool forceFree) {
2211  ROBIN_HOOD_TRACE(this)
2212 
2213  Node* const oldKeyVals = mKeyVals;
2214  uint8_t const* const oldInfo = mInfo;
2215 
2216  const size_t oldMaxElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
2217 
2218  // resize operation: move stuff
2219  initData(numBuckets);
2220  if (oldMaxElementsWithBuffer > 1) {
2221  for (size_t i = 0; i < oldMaxElementsWithBuffer; ++i) {
2222  if (oldInfo[i] != 0) {
2223  // might throw an exception, which is really bad since we are in the middle of
2224  // moving stuff.
2225  insert_move(std::move(oldKeyVals[i]));
2226  // destroy the node but DON'T destroy the data.
2227  oldKeyVals[i].~Node();
2228  }
2229  }
2230 
2231  // this check is not necessary as it's guarded by the previous if, but it helps
2232  // silence g++'s overeager "attempt to free a non-heap object 'map'
2233  // [-Werror=free-nonheap-object]" warning.
2234  if (oldKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
2235  // don't destroy old data: put it into the pool instead
2236  if (forceFree) {
2237  std::free(oldKeyVals);
2238  } else {
2239  DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElementsWithBuffer));
2240  }
2241  }
2242  }
2243  }
2244 
2245  ROBIN_HOOD(NOINLINE) void throwOverflowError() const {
2246 #if ROBIN_HOOD(HAS_EXCEPTIONS)
2247  throw std::overflow_error("robin_hood::map overflow");
2248 #else
2249  abort();
2250 #endif
2251  }
2252 
2253  template <typename OtherKey, typename... Args>
2254  std::pair<iterator, bool> try_emplace_impl(OtherKey&& key, Args&&... args) {
2255  ROBIN_HOOD_TRACE(this)
2256  auto idxAndState = insertKeyPrepareEmptySpot(key);
2257  switch (idxAndState.second) {
2258  case InsertionState::key_found:
2259  break;
2260 
2261  case InsertionState::new_node:
2262  ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(
2263  *this, std::piecewise_construct, std::forward_as_tuple(std::forward<OtherKey>(key)),
2264  std::forward_as_tuple(std::forward<Args>(args)...));
2265  break;
2266 
2267  case InsertionState::overwrite_node:
2268  mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
2269  std::forward_as_tuple(std::forward<OtherKey>(key)),
2270  std::forward_as_tuple(std::forward<Args>(args)...));
2271  break;
2272 
2273  case InsertionState::overflow_error:
2274  throwOverflowError();
2275  break;
2276  }
2277 
2278  return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
2279  InsertionState::key_found != idxAndState.second);
2280  }
2281 
2282  template <typename OtherKey, typename Mapped>
2283  std::pair<iterator, bool> insertOrAssignImpl(OtherKey&& key, Mapped&& obj) {
2284  ROBIN_HOOD_TRACE(this)
2285  auto idxAndState = insertKeyPrepareEmptySpot(key);
2286  switch (idxAndState.second) {
2287  case InsertionState::key_found:
2288  mKeyVals[idxAndState.first].getSecond() = std::forward<Mapped>(obj);
2289  break;
2290 
2291  case InsertionState::new_node:
2292  ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(
2293  *this, std::piecewise_construct, std::forward_as_tuple(std::forward<OtherKey>(key)),
2294  std::forward_as_tuple(std::forward<Mapped>(obj)));
2295  break;
2296 
2297  case InsertionState::overwrite_node:
2298  mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
2299  std::forward_as_tuple(std::forward<OtherKey>(key)),
2300  std::forward_as_tuple(std::forward<Mapped>(obj)));
2301  break;
2302 
2303  case InsertionState::overflow_error:
2304  throwOverflowError();
2305  break;
2306  }
2307 
2308  return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
2309  InsertionState::key_found != idxAndState.second);
2310  }
2311 
2312  void initData(size_t max_elements) {
2313  mNumElements = 0;
2314  mMask = max_elements - 1;
2315  mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements);
2316 
2317  auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements);
2318 
2319  // malloc & zero mInfo. Faster than calloc everything.
2320  auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
2321  ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal("
2322  << numElementsWithBuffer << ")")
2323  mKeyVals = reinterpret_cast<Node*>(
2324  detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
2325  mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
2326  std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node));
2327 
2328  // set sentinel
2329  mInfo[numElementsWithBuffer] = 1;
2330 
2331  mInfoInc = InitialInfoInc;
2332  mInfoHashShift = InitialInfoHashShift;
2333  }
2334 
2335  enum class InsertionState { overflow_error, key_found, new_node, overwrite_node };
2336 
2337  // Finds key, and if not already present prepares a spot where to pot the key & value.
2338  // This potentially shifts nodes out of the way, updates mInfo and number of inserted
2339  // elements, so the only operation left to do is create/assign a new node at that spot.
2340  template <typename OtherKey>
2341  std::pair<size_t, InsertionState> insertKeyPrepareEmptySpot(OtherKey&& key) {
2342  for (int i = 0; i < 256; ++i) {
2343  size_t idx{};
2344  InfoType info{};
2345  keyToIdx(key, &idx, &info);
2346  nextWhileLess(&info, &idx);
2347 
2348  // while we potentially have a match
2349  while (info == mInfo[idx]) {
2350  if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
2351  // key already exists, do NOT insert.
2352  // see http://en.cppreference.com/w/cpp/container/unordered_map/insert
2353  return std::make_pair(idx, InsertionState::key_found);
2354  }
2355  next(&info, &idx);
2356  }
2357 
2358  // unlikely that this evaluates to true
2359  if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
2360  if (!increase_size()) {
2361  return std::make_pair(size_t(0), InsertionState::overflow_error);
2362  }
2363  continue;
2364  }
2365 
2366  // key not found, so we are now exactly where we want to insert it.
2367  auto const insertion_idx = idx;
2368  auto const insertion_info = info;
2369  if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
2370  mMaxNumElementsAllowed = 0;
2371  }
2372 
2373  // find an empty spot
2374  while (0 != mInfo[idx]) {
2375  next(&info, &idx);
2376  }
2377 
2378  if (idx != insertion_idx) {
2379  shiftUp(idx, insertion_idx);
2380  }
2381  // put at empty spot
2382  mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
2383  ++mNumElements;
2384  return std::make_pair(insertion_idx, idx == insertion_idx
2385  ? InsertionState::new_node
2386  : InsertionState::overwrite_node);
2387  }
2388 
2389  // enough attempts failed, so finally give up.
2390  return std::make_pair(size_t(0), InsertionState::overflow_error);
2391  }
2392 
2393  bool try_increase_info() {
2394  ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements
2395  << ", maxNumElementsAllowed="
2396  << calcMaxNumElementsAllowed(mMask + 1))
2397  if (mInfoInc <= 2) {
2398  // need to be > 2 so that shift works (otherwise undefined behavior!)
2399  return false;
2400  }
2401  // we got space left, try to make info smaller
2402  mInfoInc = static_cast<uint8_t>(mInfoInc >> 1U);
2403 
2404  // remove one bit of the hash, leaving more space for the distance info.
2405  // This is extremely fast because we can operate on 8 bytes at once.
2406  ++mInfoHashShift;
2407  auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
2408 
2409  for (size_t i = 0; i < numElementsWithBuffer; i += 8) {
2410  auto val = unaligned_load<uint64_t>(mInfo + i);
2411  val = (val >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f);
2412  std::memcpy(mInfo + i, &val, sizeof(val));
2413  }
2414  // update sentinel, which might have been cleared out!
2415  mInfo[numElementsWithBuffer] = 1;
2416 
2417  mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
2418  return true;
2419  }
2420 
2421  // True if resize was possible, false otherwise
2422  bool increase_size() {
2423  // nothing allocated yet? just allocate InitialNumElements
2424  if (0 == mMask) {
2425  initData(InitialNumElements);
2426  return true;
2427  }
2428 
2429  auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
2430  if (mNumElements < maxNumElementsAllowed && try_increase_info()) {
2431  return true;
2432  }
2433 
2434  ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed="
2435  << maxNumElementsAllowed << ", load="
2436  << (static_cast<double>(mNumElements) * 100.0 /
2437  (static_cast<double>(mMask) + 1)))
2438 
2439  if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) {
2440  // we have to resize, even though there would still be plenty of space left!
2441  // Try to rehash instead. Delete freed memory so we don't steadyily increase mem in case
2442  // we have to rehash a few times
2443  nextHashMultiplier();
2444  rehashPowerOfTwo(mMask + 1, true);
2445  } else {
2446  // we've reached the capacity of the map, so the hash seems to work nice. Keep using it.
2447  rehashPowerOfTwo((mMask + 1) * 2, false);
2448  }
2449  return true;
2450  }
2451 
2452  void nextHashMultiplier() {
2453  // adding an *even* number, so that the multiplier will always stay odd. This is necessary
2454  // so that the hash stays a mixing function (and thus doesn't have any information loss).
2455  mHashMultiplier += UINT64_C(0xc4ceb9fe1a85ec54);
2456  }
2457 
2458  void destroy() {
2459  if (0 == mMask) {
2460  // don't deallocate!
2461  return;
2462  }
2463 
2464  Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}
2465  .nodesDoNotDeallocate(*this);
2466 
2467  // This protection against not deleting mMask shouldn't be needed as it's sufficiently
2468  // protected with the 0==mMask check, but I have this anyways because g++ 7 otherwise
2469  // reports a compile error: attempt to free a non-heap object 'fm'
2470  // [-Werror=free-nonheap-object]
2471  if (mKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
2472  ROBIN_HOOD_LOG("std::free")
2473  std::free(mKeyVals);
2474  }
2475  }
2476 
2477  void init() noexcept {
2478  mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask);
2479  mInfo = reinterpret_cast<uint8_t*>(&mMask);
2480  mNumElements = 0;
2481  mMask = 0;
2482  mMaxNumElementsAllowed = 0;
2483  mInfoInc = InitialInfoInc;
2484  mInfoHashShift = InitialInfoHashShift;
2485  }
2486 
2487  // members are sorted so no padding occurs
2488  uint64_t mHashMultiplier = UINT64_C(0xc4ceb9fe1a85ec53); // 8 byte 8
2489  Node* mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask); // 8 byte 16
2490  uint8_t* mInfo = reinterpret_cast<uint8_t*>(&mMask); // 8 byte 24
2491  size_t mNumElements = 0; // 8 byte 32
2492  size_t mMask = 0; // 8 byte 40
2493  size_t mMaxNumElementsAllowed = 0; // 8 byte 48
2494  InfoType mInfoInc = InitialInfoInc; // 4 byte 52
2495  InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 56
2496  // 16 byte 56 if NodeAllocator
2497 };
2498 
2499 } // namespace detail
2500 
2501 // map
2502 
2503 template <typename Key, typename T, typename Hash = hash<Key>,
2504  typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
2506 
2507 template <typename Key, typename T, typename Hash = hash<Key>,
2508  typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
2510 
2511 template <typename Key, typename T, typename Hash = hash<Key>,
2512  typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
2513 using unordered_map =
2514  detail::Table<sizeof(robin_hood::pair<Key, T>) <= sizeof(size_t) * 6 &&
2515  std::is_nothrow_move_constructible<robin_hood::pair<Key, T>>::value &&
2516  std::is_nothrow_move_assignable<robin_hood::pair<Key, T>>::value,
2517  MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2518 
2519 // set
2520 
2521 template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
2522  size_t MaxLoadFactor100 = 80>
2524 
2525 template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
2526  size_t MaxLoadFactor100 = 80>
2528 
2529 template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
2530  size_t MaxLoadFactor100 = 80>
2531 using unordered_set = detail::Table < sizeof(Key) <= sizeof(size_t) * 6 &&
2532  std::is_nothrow_move_constructible<Key>::value &&
2533  std::is_nothrow_move_assignable<Key>::value,
2534  MaxLoadFactor100, Key, void, Hash, KeyEqual>;
2535 
2536 } // namespace robin_hood
2537 /* *INDENT-ON* */
2538 
2539 #endif // NAV2_SMAC_PLANNER__THIRDPARTY__ROBIN_HOOD_H_