Kigs Framework  Doc version 0.8
Open source multi purpose Rapid Application Development framework
robin_hood.h
1 // ______ _____ ______ _________
2 // ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ /
3 // __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ /
4 // _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ /
5 // /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/
6 // _/_____/
7 //
8 // Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20
9 // version 3.4.1
10 // https://github.com/martinus/robin-hood-hashing
11 //
12 // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
13 // SPDX-License-Identifier: MIT
14 // Copyright (c) 2018-2019 Martin Ankerl <http://martin.ankerl.com>
15 //
16 // Permission is hereby granted, free of charge, to any person obtaining a copy
17 // of this software and associated documentation files (the "Software"), to deal
18 // in the Software without restriction, including without limitation the rights
19 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
20 // copies of the Software, and to permit persons to whom the Software is
21 // furnished to do so, subject to the following conditions:
22 //
23 // The above copyright notice and this permission notice shall be included in all
24 // copies or substantial portions of the Software.
25 //
26 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
31 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 // SOFTWARE.
33 
34 #ifndef ROBIN_HOOD_H_INCLUDED
35 #define ROBIN_HOOD_H_INCLUDED
36 
37 // see https://semver.org/
38 #define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes
39 #define ROBIN_HOOD_VERSION_MINOR 4 // for adding functionality in a backwards-compatible manner
40 #define ROBIN_HOOD_VERSION_PATCH 1 // for backwards-compatible bug fixes
41 
42 #include <algorithm>
43 #include <cstdlib>
44 #include <cstring>
45 #include <functional>
46 #include <stdexcept>
47 #include <string>
48 #include <type_traits>
49 #include <utility>
50 
51 // #define ROBIN_HOOD_LOG_ENABLED
52 #ifdef ROBIN_HOOD_LOG_ENABLED
53 # include <iostream>
54 # define ROBIN_HOOD_LOG(x) std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl
55 #else
56 # define ROBIN_HOOD_LOG(x)
57 #endif
58 
59 // #define ROBIN_HOOD_TRACE_ENABLED
60 #ifdef ROBIN_HOOD_TRACE_ENABLED
61 # include <iostream>
62 # define ROBIN_HOOD_TRACE(x) \
63  std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl
64 #else
65 # define ROBIN_HOOD_TRACE(x)
66 #endif
67 
68 // all non-argument macros should use this facility. See
69 // https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/
70 #define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x()
71 
72 // mark unused members with this macro
73 #define ROBIN_HOOD_UNUSED(identifier)
74 
75 // bitness
76 #if SIZE_MAX == UINT32_MAX
77 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32
78 #elif SIZE_MAX == UINT64_MAX
79 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64
80 #else
81 # error Unsupported bitness
82 #endif
83 
84 // endianess
85 #ifdef _WIN32
86 # define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1
87 # define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0
88 #else
89 # define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \
90  (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
91 # define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
92 #endif
93 
94 // inline
95 #ifdef _WIN32
96 # define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline)
97 #else
98 # define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline))
99 #endif
100 
101 // exceptions
102 #if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND)
103 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0
104 #else
105 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1
106 #endif
107 
108 // count leading/trailing bits
109 #ifdef _WIN32
110 # if ROBIN_HOOD(BITNESS) == 32
111 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
112 # else
113 # define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
114 # endif
115 # include <intrin.h>
116 # pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
117 # define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \
118  [](size_t mask) noexcept->int { \
119  unsigned long index; \
120  return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \
121  : ROBIN_HOOD(BITNESS); \
122  } \
123  (x)
124 #else
125 # if ROBIN_HOOD(BITNESS) == 32
126 # define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
127 # define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
128 # else
129 # define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
130 # define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
131 # endif
132 # define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
133 # define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
134 #endif
135 
136 // fallthrough
137 #ifndef __has_cpp_attribute // For backwards compatibility
138 # define __has_cpp_attribute(x) 0
139 #endif
140 #if __has_cpp_attribute(clang::fallthrough)
141 # define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]]
142 #elif __has_cpp_attribute(gnu::fallthrough)
143 # define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]]
144 #else
145 # define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH()
146 #endif
147 
148 // likely/unlikely
149 #if defined(_WIN32)
150 # define ROBIN_HOOD_LIKELY(condition) condition
151 # define ROBIN_HOOD_UNLIKELY(condition) condition
152 #else
153 # define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1)
154 # define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0)
155 #endif
156 
157 // workaround missing "is_trivially_copyable" in g++ < 5.0
158 // See https://stackoverflow.com/a/31798726/48181
159 #if defined(__GNUC__) && __GNUC__ < 5
160 # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
161 #else
162 # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
163 #endif
164 
165 // helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html
166 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus
167 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L
168 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L
169 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L
170 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L
171 
172 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
173 # define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]]
174 #else
175 # define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD()
176 #endif
177 
178 namespace robin_hood {
179 
180 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
181 # define ROBIN_HOOD_STD std
182 #else
183 
184 // c++11 compatibility layer
185 namespace ROBIN_HOOD_STD {
186 template <class T>
187 struct alignment_of
188  : std::integral_constant<std::size_t, alignof(typename std::remove_all_extents<T>::type)> {};
189 
190 template <class T, T... Ints>
191 class integer_sequence {
192 public:
193  using value_type = T;
194  static_assert(std::is_integral<value_type>::value, "not integral type");
195  static constexpr std::size_t size() noexcept {
196  return sizeof...(Ints);
197  }
198 };
199 template <std::size_t... Inds>
200 using index_sequence = integer_sequence<std::size_t, Inds...>;
201 
202 namespace detail_ {
203 template <class T, T Begin, T End, bool>
204 struct IntSeqImpl {
205  using TValue = T;
206  static_assert(std::is_integral<TValue>::value, "not integral type");
207  static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)");
208 
209  template <class, class>
210  struct IntSeqCombiner;
211 
212  template <TValue... Inds0, TValue... Inds1>
213  struct IntSeqCombiner<integer_sequence<TValue, Inds0...>, integer_sequence<TValue, Inds1...>> {
214  using TResult = integer_sequence<TValue, Inds0..., Inds1...>;
215  };
216 
217  using TResult =
218  typename IntSeqCombiner<typename IntSeqImpl<TValue, Begin, Begin + (End - Begin) / 2,
219  (End - Begin) / 2 == 1>::TResult,
220  typename IntSeqImpl<TValue, Begin + (End - Begin) / 2, End,
221  (End - Begin + 1) / 2 == 1>::TResult>::TResult;
222 };
223 
224 template <class T, T Begin>
225 struct IntSeqImpl<T, Begin, Begin, false> {
226  using TValue = T;
227  static_assert(std::is_integral<TValue>::value, "not integral type");
228  static_assert(Begin >= 0, "unexpected argument (Begin<0)");
229  using TResult = integer_sequence<TValue>;
230 };
231 
232 template <class T, T Begin, T End>
233 struct IntSeqImpl<T, Begin, End, true> {
234  using TValue = T;
235  static_assert(std::is_integral<TValue>::value, "not integral type");
236  static_assert(Begin >= 0, "unexpected argument (Begin<0)");
237  using TResult = integer_sequence<TValue, Begin>;
238 };
239 } // namespace detail_
240 
241 template <class T, T N>
242 using make_integer_sequence = typename detail_::IntSeqImpl<T, 0, N, (N - 0) == 1>::TResult;
243 
244 template <std::size_t N>
245 using make_index_sequence = make_integer_sequence<std::size_t, N>;
246 
247 template <class... T>
248 using index_sequence_for = make_index_sequence<sizeof...(T)>;
249 
250 } // namespace ROBIN_HOOD_STD
251 
252 #endif
253 
254 namespace detail {
255 
256 // umul
257 #if defined(__SIZEOF_INT128__)
258 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_UMUL128() 1
259 # if defined(__GNUC__) || defined(__clang__)
260 # pragma GCC diagnostic push
261 # pragma GCC diagnostic ignored "-Wpedantic"
262 using uint128_t = unsigned __int128;
263 # pragma GCC diagnostic pop
264 # endif
265 inline uint64_t umul128(uint64_t a, uint64_t b, uint64_t* high) noexcept {
266  auto result = static_cast<uint128_t>(a) * static_cast<uint128_t>(b);
267  *high = static_cast<uint64_t>(result >> 64U);
268  return static_cast<uint64_t>(result);
269 }
270 #elif (defined(_WIN32) && ROBIN_HOOD(BITNESS) == 64)
271 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_UMUL128() 1
272 # include <intrin.h> // for __umulh
273 # pragma intrinsic(__umulh)
274 # ifndef _M_ARM64
275 # pragma intrinsic(_umul128)
276 # endif
277 inline uint64_t umul128(uint64_t a, uint64_t b, uint64_t* high) noexcept {
278 # ifdef _M_ARM64
279  *high = __umulh(a, b);
280  return ((uint64_t)(a)) * (b);
281 # else
282  return _umul128(a, b, high);
283 # endif
284 }
285 #else
286 # define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_UMUL128() 0
287 #endif
288 
289 // This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to
290 // 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with
291 // care!
292 template <typename T>
293 inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept {
294  return reinterpret_cast<T>(ptr);
295 }
296 
297 template <typename T>
298 inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept {
299  return reinterpret_cast<T>(ptr);
300 }
301 
302 // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
303 // inlinings more difficult. Throws are also generally the slow path.
304 template <typename E, typename... Args>
305 ROBIN_HOOD(NOINLINE)
306 #if ROBIN_HOOD(HAS_EXCEPTIONS)
307 void doThrow(Args&&... args) {
308  // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
309  throw E(std::forward<Args>(args)...);
310 }
311 #else
312 void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) {
313  abort();
314 }
315 #endif
316 
317 template <typename E, typename T, typename... Args>
318 T* assertNotNull(T* t, Args&&... args) {
319  if (ROBIN_HOOD_UNLIKELY(nullptr == t)) {
320  doThrow<E>(std::forward<Args>(args)...);
321  }
322  return t;
323 }
324 
325 template <typename T>
326 inline T unaligned_load(void const* ptr) noexcept {
327  // using memcpy so we don't get into unaligned load problems.
328  // compiler should optimize this very well anyways.
329  T t;
330  std::memcpy(&t, ptr, sizeof(T));
331  return t;
332 }
333 
334 // Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor,
335 // and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a
336 // pointer.
337 template <typename T, size_t MinNumAllocs = 4, size_t MaxNumAllocs = 256>
338 class BulkPoolAllocator {
339 public:
340  BulkPoolAllocator() noexcept = default;
341 
342  // does not copy anything, just creates a new allocator.
343  BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept
344  : mHead(nullptr)
345  , mListForFree(nullptr) {}
346 
347  BulkPoolAllocator(BulkPoolAllocator&& o) noexcept
348  : mHead(o.mHead)
349  , mListForFree(o.mListForFree) {
350  o.mListForFree = nullptr;
351  o.mHead = nullptr;
352  }
353 
354  BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept {
355  reset();
356  mHead = o.mHead;
357  mListForFree = o.mListForFree;
358  o.mListForFree = nullptr;
359  o.mHead = nullptr;
360  return *this;
361  }
362 
363  BulkPoolAllocator&
364  operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept {
365  // does not do anything
366  return *this;
367  }
368 
369  ~BulkPoolAllocator() noexcept {
370  reset();
371  }
372 
373  // Deallocates all allocated memory.
374  void reset() noexcept {
375  while (mListForFree) {
376  T* tmp = *mListForFree;
377  free(mListForFree);
378  mListForFree = reinterpret_cast_no_cast_align_warning<T**>(tmp);
379  }
380  mHead = nullptr;
381  }
382 
383  // allocates, but does NOT initialize. Use in-place new constructor, e.g.
384  // T* mObj = pool.allocate();
385  // ::new (static_cast<void*>(mObj)) T();
386  T* allocate() {
387  T* tmp = mHead;
388  if (!tmp) {
389  tmp = performAllocation();
390  }
391 
392  mHead = *reinterpret_cast_no_cast_align_warning<T**>(tmp);
393  return tmp;
394  }
395 
396  // does not actually deallocate but puts it in store.
397  // make sure you have already called the destructor! e.g. with
398  // mObj->~T();
399  // pool.deallocate(mObj);
400  void deallocate(T* obj) noexcept {
401  *reinterpret_cast_no_cast_align_warning<T**>(obj) = mHead;
402  mHead = obj;
403  }
404 
405  // Adds an already allocated block of memory to the allocator. This allocator is from now on
406  // responsible for freeing the data (with free()). If the provided data is not large enough to
407  // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor.
408  void addOrFree(void* ptr, const size_t numBytes) noexcept {
409  // calculate number of available elements in ptr
410  if (numBytes < ALIGNMENT + ALIGNED_SIZE) {
411  // not enough data for at least one element. Free and return.
412  free(ptr);
413  } else {
414  add(ptr, numBytes);
415  }
416  }
417 
418  void swap(BulkPoolAllocator<T, MinNumAllocs, MaxNumAllocs>& other) noexcept {
419  using std::swap;
420  swap(mHead, other.mHead);
421  swap(mListForFree, other.mListForFree);
422  }
423 
424 private:
425  // iterates the list of allocated memory to calculate how many to alloc next.
426  // Recalculating this each time saves us a size_t member.
427  // This ignores the fact that memory blocks might have been added manually with addOrFree. In
428  // practice, this should not matter much.
429  ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept {
430  auto tmp = mListForFree;
431  size_t numAllocs = MinNumAllocs;
432 
433  while (numAllocs * 2 <= MaxNumAllocs && tmp) {
434  auto x = reinterpret_cast<T***>(tmp);
435  tmp = *x;
436  numAllocs *= 2;
437  }
438 
439  return numAllocs;
440  }
441 
442  // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree().
443  void add(void* ptr, const size_t numBytes) noexcept {
444  const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE;
445 
446  auto data = reinterpret_cast<T**>(ptr);
447 
448  // link free list
449  auto x = reinterpret_cast<T***>(data);
450  *x = mListForFree;
451  mListForFree = data;
452 
453  // create linked list for newly allocated data
454  auto const headT =
455  reinterpret_cast_no_cast_align_warning<T*>(reinterpret_cast<char*>(ptr) + ALIGNMENT);
456 
457  auto const head = reinterpret_cast<char*>(headT);
458 
459  // Visual Studio compiler automatically unrolls this loop, which is pretty cool
460  for (size_t i = 0; i < numElements; ++i) {
461  *reinterpret_cast_no_cast_align_warning<char**>(head + i * ALIGNED_SIZE) =
462  head + (i + 1) * ALIGNED_SIZE;
463  }
464 
465  // last one points to 0
466  *reinterpret_cast_no_cast_align_warning<T**>(head + (numElements - 1) * ALIGNED_SIZE) =
467  mHead;
468  mHead = headT;
469  }
470 
471  // Called when no memory is available (mHead == 0).
472  // Don't inline this slow path.
473  ROBIN_HOOD(NOINLINE) T* performAllocation() {
474  size_t const numElementsToAlloc = calcNumElementsToAlloc();
475 
476  // alloc new memory: [prev |T, T, ... T]
477  // std::cout << (sizeof(T*) + ALIGNED_SIZE * numElementsToAlloc) << " bytes" << std::endl;
478  size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc;
479  add(assertNotNull<std::bad_alloc>(malloc(bytes)), bytes);
480  return mHead;
481  }
482 
483  // enforce byte alignment of the T's
484 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
485  static constexpr size_t ALIGNMENT =
486  (std::max)(std::alignment_of<T>::value, std::alignment_of<T*>::value);
487 #else
488  static const size_t ALIGNMENT =
489  (ROBIN_HOOD_STD::alignment_of<T>::value > ROBIN_HOOD_STD::alignment_of<T*>::value)
490  ? ROBIN_HOOD_STD::alignment_of<T>::value
491  : +ROBIN_HOOD_STD::alignment_of<T*>::value; // the + is for walkarround
492 #endif
493 
494  static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT;
495 
496  static_assert(MinNumAllocs >= 1, "MinNumAllocs");
497  static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs");
498  static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE");
499  static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod");
500  static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT");
501 
502  T* mHead{nullptr};
503  T** mListForFree{nullptr};
504 };
505 
506 template <typename T, size_t MinSize, size_t MaxSize, bool IsFlatMap>
507 struct NodeAllocator;
508 
509 // dummy allocator that does nothing
510 template <typename T, size_t MinSize, size_t MaxSize>
511 struct NodeAllocator<T, MinSize, MaxSize, true> {
512 
513  // we are not using the data, so just free it.
514  void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept {
515  free(ptr);
516  }
517 };
518 
519 template <typename T, size_t MinSize, size_t MaxSize>
520 struct NodeAllocator<T, MinSize, MaxSize, false> : public BulkPoolAllocator<T, MinSize, MaxSize> {};
521 
522 // dummy hash, unsed as mixer when robin_hood::hash is already used
523 template <typename T>
524 struct identity_hash {
525  constexpr size_t operator()(T const& obj) const noexcept {
526  return static_cast<size_t>(obj);
527  }
528 };
529 
530 // c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making
531 // my own here.
532 namespace swappable {
533 using std::swap;
534 template <typename T>
535 struct nothrow {
536  static const bool value = noexcept(swap(std::declval<T&>(), std::declval<T&>()));
537 };
538 
539 } // namespace swappable
540 
541 } // namespace detail
542 
543 struct is_transparent_tag {};
544 
545 // A custom pair implementation is used in the map because std::pair is not is_trivially_copyable,
546 // which means it would not be allowed to be used in std::memcpy. This struct is copyable, which is
547 // also tested.
548 template <typename T1, typename T2>
549 struct pair {
550  using first_type = T1;
551  using second_type = T2;
552 
553  template <typename U1 = T1, typename U2 = T2,
554  typename = typename std::enable_if<std::is_default_constructible<U1>::value &&
555  std::is_default_constructible<U2>::value>::type>
556  constexpr pair() noexcept(noexcept(U1()) && noexcept(U2()))
557  : first()
558  , second() {}
559 
560  // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
561  explicit constexpr pair(std::pair<T1, T2> const& o) noexcept(
562  noexcept(T1(std::declval<T1 const&>())) && noexcept(T2(std::declval<T2 const&>())))
563  : first(o.first)
564  , second(o.second) {}
565 
566  // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
567  explicit constexpr pair(std::pair<T1, T2>&& o) noexcept(
568  noexcept(T1(std::move(std::declval<T1&&>()))) &&
569  noexcept(T2(std::move(std::declval<T2&&>()))))
570  : first(std::move(o.first))
571  , second(std::move(o.second)) {}
572 
573  constexpr pair(T1&& a, T2&& b) noexcept(noexcept(T1(std::move(std::declval<T1&&>()))) &&
574  noexcept(T2(std::move(std::declval<T2&&>()))))
575  : first(std::move(a))
576  , second(std::move(b)) {}
577 
578  template <typename U1, typename U2>
579  constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward<U1>(std::declval<U1&&>()))) &&
580  noexcept(T2(std::forward<U2>(std::declval<U2&&>()))))
581  : first(std::forward<U1>(a))
582  , second(std::forward<U2>(b)) {}
583 
584  template <typename... U1, typename... U2>
585  constexpr pair(
586  std::piecewise_construct_t /*unused*/, std::tuple<U1...> a,
587  std::tuple<U2...> b) noexcept(noexcept(pair(std::declval<std::tuple<U1...>&>(),
588  std::declval<std::tuple<U2...>&>(),
589  ROBIN_HOOD_STD::index_sequence_for<U1...>(),
590  ROBIN_HOOD_STD::index_sequence_for<U2...>())))
591  : pair(a, b, ROBIN_HOOD_STD::index_sequence_for<U1...>(),
592  ROBIN_HOOD_STD::index_sequence_for<U2...>()) {}
593 
594  // constructor called from the std::piecewise_construct_t ctor
595  template <typename... U1, size_t... I1, typename... U2, size_t... I2>
596  pair(std::tuple<U1...>& a, std::tuple<U2...>& b,
597  ROBIN_HOOD_STD::index_sequence<I1...> /*unused*/,
598  ROBIN_HOOD_STD::index_sequence<
599  I2...> /*unused*/) noexcept(noexcept(T1(std::
600  forward<U1>(std::get<I1>(
601  std::declval<
602  std::tuple<U1...>&>()))...)) &&
603  noexcept(T2(std::forward<U2>(
604  std::get<I2>(std::declval<std::tuple<U2...>&>()))...)))
605  : first(std::forward<U1>(std::get<I1>(a))...)
606  , second(std::forward<U2>(std::get<I2>(b))...) {
607  // make visual studio compiler happy about warning about unused a & b.
608  // Visual studio's pair implementation disables warning 4100.
609  (void)a;
610  (void)b;
611  }
612 
613  ROBIN_HOOD(NODISCARD) first_type& getFirst() noexcept {
614  return first;
615  }
616  ROBIN_HOOD(NODISCARD) first_type const& getFirst() const noexcept {
617  return first;
618  }
619  ROBIN_HOOD(NODISCARD) second_type& getSecond() noexcept {
620  return second;
621  }
622  ROBIN_HOOD(NODISCARD) second_type const& getSecond() const noexcept {
623  return second;
624  }
625 
626  void swap(pair<T1, T2>& o) noexcept((detail::swappable::nothrow<T1>::value) &&
627  (detail::swappable::nothrow<T2>::value)) {
628  using std::swap;
629  swap(first, o.first);
630  swap(second, o.second);
631  }
632 
633  T1 first; // NOLINT(misc-non-private-member-variables-in-classes)
634  T2 second; // NOLINT(misc-non-private-member-variables-in-classes)
635 };
636 
637 template <typename A, typename B>
638 void swap(pair<A, B>& a, pair<A, B>& b) noexcept(
639  noexcept(std::declval<pair<A, B>&>().swap(std::declval<pair<A, B>&>()))) {
640  a.swap(b);
641 }
642 
643 // Hash an arbitrary amount of bytes. This is basically Murmur2 hash without caring about big
644 // endianness. TODO(martinus) add a fallback for very large strings?
645 static size_t hash_bytes(void const* ptr, size_t const len) noexcept {
646  static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
647  static constexpr uint64_t seed = UINT64_C(0xe17a1465);
648  static constexpr unsigned int r = 47;
649 
650  auto const data64 = static_cast<uint64_t const*>(ptr);
651  uint64_t h = seed ^ (len * m);
652 
653  size_t const n_blocks = len / 8;
654  for (size_t i = 0; i < n_blocks; ++i) {
655  auto k = detail::unaligned_load<uint64_t>(data64 + i);
656 
657  k *= m;
658  k ^= k >> r;
659  k *= m;
660 
661  h ^= k;
662  h *= m;
663  }
664 
665  auto const data8 = reinterpret_cast<uint8_t const*>(data64 + n_blocks);
666  switch (len & 7U) {
667  case 7:
668  h ^= static_cast<uint64_t>(data8[6]) << 48U;
669  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
670  case 6:
671  h ^= static_cast<uint64_t>(data8[5]) << 40U;
672  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
673  case 5:
674  h ^= static_cast<uint64_t>(data8[4]) << 32U;
675  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
676  case 4:
677  h ^= static_cast<uint64_t>(data8[3]) << 24U;
678  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
679  case 3:
680  h ^= static_cast<uint64_t>(data8[2]) << 16U;
681  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
682  case 2:
683  h ^= static_cast<uint64_t>(data8[1]) << 8U;
684  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
685  case 1:
686  h ^= static_cast<uint64_t>(data8[0]);
687  h *= m;
688  ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
689  default:
690  break;
691  }
692 
693  h ^= h >> r;
694  h *= m;
695  h ^= h >> r;
696  return static_cast<size_t>(h);
697 }
698 
699 inline size_t hash_int(uint64_t obj) noexcept {
700 #if ROBIN_HOOD(HAS_UMUL128)
701  // 167079903232 masksum, 120428523 ops best: 0xde5fb9d2630458e9
702  static constexpr uint64_t k = UINT64_C(0xde5fb9d2630458e9);
703  uint64_t h;
704  uint64_t l = detail::umul128(obj, k, &h);
705  return h + l;
706 #elif ROBIN_HOOD(BITNESS) == 32
707  uint64_t const r = obj * UINT64_C(0xca4bcaa75ec3f625);
708  auto h = static_cast<uint32_t>(r >> 32U);
709  auto l = static_cast<uint32_t>(r);
710  return h + l;
711 #else
712  // murmurhash 3 finalizer
713  uint64_t h = obj;
714  h ^= h >> 33;
715  h *= 0xff51afd7ed558ccd;
716  h ^= h >> 33;
717  h *= 0xc4ceb9fe1a85ec53;
718  h ^= h >> 33;
719  return static_cast<size_t>(h);
720 #endif
721 }
722 
723 // A thin wrapper around std::hash, performing an additional simple mixing step of the result.
724 template <typename T>
725 struct hash : public std::hash<T> {
726  size_t operator()(T const& obj) const
727  noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) {
728  // call base hash
729  auto result = std::hash<T>::operator()(obj);
730  // return mixed of that, to be save against identity has
731  return hash_int(static_cast<uint64_t>(result));
732  }
733 };
734 
735 template <>
736 struct hash<std::string> {
737  size_t operator()(std::string const& str) const noexcept {
738  return hash_bytes(str.data(), str.size());
739  }
740 };
741 
742 template <class T>
743 struct hash<T*> {
744  size_t operator()(T* ptr) const noexcept {
745  return hash_int(reinterpret_cast<size_t>(ptr));
746  }
747 };
748 
749 #define ROBIN_HOOD_HASH_INT(T) \
750  template <> \
751  struct hash<T> { \
752  size_t operator()(T obj) const noexcept { \
753  return hash_int(static_cast<uint64_t>(obj)); \
754  } \
755  }
756 
757 #if defined(__GNUC__) && !defined(__clang__)
758 # pragma GCC diagnostic push
759 # pragma GCC diagnostic ignored "-Wuseless-cast"
760 #endif
761 // see https://en.cppreference.com/w/cpp/utility/hash
762 ROBIN_HOOD_HASH_INT(bool);
763 ROBIN_HOOD_HASH_INT(char);
764 ROBIN_HOOD_HASH_INT(signed char);
765 ROBIN_HOOD_HASH_INT(unsigned char);
766 ROBIN_HOOD_HASH_INT(char16_t);
767 ROBIN_HOOD_HASH_INT(char32_t);
768 ROBIN_HOOD_HASH_INT(wchar_t);
769 ROBIN_HOOD_HASH_INT(short);
770 ROBIN_HOOD_HASH_INT(unsigned short);
771 ROBIN_HOOD_HASH_INT(int);
772 ROBIN_HOOD_HASH_INT(unsigned int);
773 ROBIN_HOOD_HASH_INT(long);
774 ROBIN_HOOD_HASH_INT(long long);
775 ROBIN_HOOD_HASH_INT(unsigned long);
776 ROBIN_HOOD_HASH_INT(unsigned long long);
777 #if defined(__GNUC__) && !defined(__clang__)
778 # pragma GCC diagnostic pop
779 #endif
780 namespace detail {
781 
782 // using wrapper classes for hash and key_equal prevents the diamond problem when the same type is
783 // used. see https://stackoverflow.com/a/28771920/48181
784 template <typename T>
785 struct WrapHash : public T {
786  WrapHash() = default;
787  explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
788  : T(o) {}
789 };
790 
791 template <typename T>
792 struct WrapKeyEqual : public T {
793  WrapKeyEqual() = default;
794  explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
795  : T(o) {}
796 };
797 
798 // A highly optimized hashmap implementation, using the Robin Hood algorithm.
799 //
800 // In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but be
801 // about 2x faster in most cases and require much less allocations.
802 //
803 // This implementation uses the following memory layout:
804 //
805 // [Node, Node, ... Node | info, info, ... infoSentinel ]
806 //
807 // * Node: either a DataNode that directly has the std::pair<key, val> as member,
808 // or a DataNode with a pointer to std::pair<key,val>. Which DataNode representation to use
809 // depends on how fast the swap() operation is. Heuristically, this is automatically choosen based
810 // on sizeof(). there are always 2^n Nodes.
811 //
812 // * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes.
813 // Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the
814 // corresponding node contains data. Set to 2 means the corresponding Node is filled, but it
815 // actually belongs to the previous position and was pushed out because that place is already
816 // taken.
817 //
818 // * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the need
819 // for a idx
820 // variable.
821 //
822 // According to STL, order of templates has effect on throughput. That's why I've moved the boolean
823 // to the front.
824 // https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/
825 template <bool IsFlatMap, size_t MaxLoadFactor100, typename Key, typename T, typename Hash,
826  typename KeyEqual>
827 class unordered_map
828  : public WrapHash<Hash>,
829  public WrapKeyEqual<KeyEqual>,
830  detail::NodeAllocator<
831  robin_hood::pair<typename std::conditional<IsFlatMap, Key, Key const>::type, T>, 4, 16384,
832  IsFlatMap> {
833 public:
834  using key_type = Key;
835  using mapped_type = T;
836  using value_type =
837  robin_hood::pair<typename std::conditional<IsFlatMap, Key, Key const>::type, T>;
838  using size_type = size_t;
839  using hasher = Hash;
840  using key_equal = KeyEqual;
841  using Self =
842  unordered_map<IsFlatMap, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
843  static constexpr bool is_flat_map = IsFlatMap;
844 
845 private:
846  static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100,
847  "MaxLoadFactor100 needs to be >10 && < 100");
848 
849  using WHash = WrapHash<Hash>;
850  using WKeyEqual = WrapKeyEqual<KeyEqual>;
851 
852  // configuration defaults
853 
854  // make sure we have 8 elements, needed to quickly rehash mInfo
855  static constexpr size_t InitialNumElements = sizeof(uint64_t);
856  static constexpr uint32_t InitialInfoNumBits = 5;
857  static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits;
858  static constexpr uint8_t InitialInfoHashShift = sizeof(size_t) * 8 - InitialInfoNumBits;
859  using DataPool = detail::NodeAllocator<value_type, 4, 16384, IsFlatMap>;
860 
861  // type needs to be wider than uint8_t.
862  using InfoType = uint32_t;
863 
864  // DataNode ////////////////////////////////////////////////////////
865 
866  // Primary template for the data node. We have special implementations for small and big
867  // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these on
868  // the heap so swap merely swaps a pointer.
869  template <typename M, bool>
870  class DataNode {};
871 
872  // Small: just allocate on the stack.
873  template <typename M>
874  class DataNode<M, true> final {
875  public:
876  template <typename... Args>
877  explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept(
878  noexcept(value_type(std::forward<Args>(args)...)))
879  : mData(std::forward<Args>(args)...) {}
880 
881  DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, true>&& n) noexcept(
882  std::is_nothrow_move_constructible<value_type>::value)
883  : mData(std::move(n.mData)) {}
884 
885  // doesn't do anything
886  void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {}
887  void destroyDoNotDeallocate() noexcept {}
888 
889  value_type const* operator->() const noexcept {
890  return &mData;
891  }
892  value_type* operator->() noexcept {
893  return &mData;
894  }
895 
896  const value_type& operator*() const noexcept {
897  return mData;
898  }
899 
900  value_type& operator*() noexcept {
901  return mData;
902  }
903 
904  ROBIN_HOOD(NODISCARD) typename value_type::first_type& getFirst() noexcept {
905  return mData.first;
906  }
907 
908  ROBIN_HOOD(NODISCARD) typename value_type::first_type const& getFirst() const noexcept {
909  return mData.first;
910  }
911 
912  ROBIN_HOOD(NODISCARD) typename value_type::second_type& getSecond() noexcept {
913  return mData.second;
914  }
915 
916  ROBIN_HOOD(NODISCARD) typename value_type::second_type const& getSecond() const noexcept {
917  return mData.second;
918  }
919 
920  void swap(DataNode<M, true>& o) noexcept(
921  noexcept(std::declval<value_type>().swap(std::declval<value_type>()))) {
922  mData.swap(o.mData);
923  }
924 
925  private:
926  value_type mData;
927  };
928 
929  // big object: allocate on heap.
930  template <typename M>
931  class DataNode<M, false> {
932  public:
933  template <typename... Args>
934  explicit DataNode(M& map, Args&&... args)
935  : mData(map.allocate()) {
936  ::new (static_cast<void*>(mData)) value_type(std::forward<Args>(args)...);
937  }
938 
939  DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, false>&& n) noexcept
940  : mData(std::move(n.mData)) {}
941 
942  void destroy(M& map) noexcept {
943  // don't deallocate, just put it into list of datapool.
944  mData->~value_type();
945  map.deallocate(mData);
946  }
947 
948  void destroyDoNotDeallocate() noexcept {
949  mData->~value_type();
950  }
951 
952  value_type const* operator->() const noexcept {
953  return mData;
954  }
955 
956  value_type* operator->() noexcept {
957  return mData;
958  }
959 
960  const value_type& operator*() const {
961  return *mData;
962  }
963 
964  value_type& operator*() {
965  return *mData;
966  }
967 
968  ROBIN_HOOD(NODISCARD) typename value_type::first_type& getFirst() {
969  return mData->first;
970  }
971 
972  ROBIN_HOOD(NODISCARD) typename value_type::first_type const& getFirst() const {
973  return mData->first;
974  }
975 
976  ROBIN_HOOD(NODISCARD) typename value_type::second_type& getSecond() {
977  return mData->second;
978  }
979 
980  ROBIN_HOOD(NODISCARD) typename value_type::second_type const& getSecond() const {
981  return mData->second;
982  }
983 
984  void swap(DataNode<M, false>& o) noexcept {
985  using std::swap;
986  swap(mData, o.mData);
987  }
988 
989  private:
990  value_type* mData;
991  };
992 
993  using Node = DataNode<Self, IsFlatMap>;
994 
995  // Cloner //////////////////////////////////////////////////////////
996 
997  template <typename M, bool UseMemcpy>
998  struct Cloner;
999 
1000  // fast path: Just copy data, without allocating anything.
1001  template <typename M>
1002  struct Cloner<M, true> {
1003  void operator()(M const& source, M& target) const {
1004  // std::memcpy(target.mKeyVals, source.mKeyVals,
1005  // target.calcNumBytesTotal(target.mMask + 1));
1006  auto src = reinterpret_cast<char const*>(source.mKeyVals);
1007  auto tgt = reinterpret_cast<char*>(target.mKeyVals);
1008  std::copy(src, src + target.calcNumBytesTotal(target.mMask + 1), tgt);
1009  }
1010  };
1011 
1012  template <typename M>
1013  struct Cloner<M, false> {
1014  void operator()(M const& s, M& t) const {
1015  std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(t.mMask + 1), t.mInfo);
1016 
1017  for (size_t i = 0; i < t.mMask + 1; ++i) {
1018  if (t.mInfo[i]) {
1019  ::new (static_cast<void*>(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]);
1020  }
1021  }
1022  }
1023  };
1024 
1025  // Destroyer ///////////////////////////////////////////////////////
1026 
1027  template <typename M, bool IsFlatMapAndTrivial>
1028  struct Destroyer {};
1029 
1030  template <typename M>
1031  struct Destroyer<M, true> {
1032  void nodes(M& m) const noexcept {
1033  m.mNumElements = 0;
1034  }
1035 
1036  void nodesDoNotDeallocate(M& m) const noexcept {
1037  m.mNumElements = 0;
1038  }
1039  };
1040 
1041  template <typename M>
1042  struct Destroyer<M, false> {
1043  void nodes(M& m) const noexcept {
1044  m.mNumElements = 0;
1045  // clear also resets mInfo to 0, that's sometimes not necessary.
1046  for (size_t idx = 0; idx <= m.mMask; ++idx) {
1047  if (0 != m.mInfo[idx]) {
1048  Node& n = m.mKeyVals[idx];
1049  n.destroy(m);
1050  n.~Node();
1051  }
1052  }
1053  }
1054 
1055  void nodesDoNotDeallocate(M& m) const noexcept {
1056  m.mNumElements = 0;
1057  // clear also resets mInfo to 0, that's sometimes not necessary.
1058  for (size_t idx = 0; idx <= m.mMask; ++idx) {
1059  if (0 != m.mInfo[idx]) {
1060  Node& n = m.mKeyVals[idx];
1061  n.destroyDoNotDeallocate();
1062  n.~Node();
1063  }
1064  }
1065  }
1066  };
1067 
1068  // Iter ////////////////////////////////////////////////////////////
1069 
1070  struct fast_forward_tag {};
1071 
1072  // generic iterator for both const_iterator and iterator.
1073  template <bool IsConst>
1074  // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions)
1075  class Iter {
1076  private:
1077  using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type;
1078 
1079  public:
1080  using difference_type = std::ptrdiff_t;
1081  using value_type = typename Self::value_type;
1082  using reference = typename std::conditional<IsConst, value_type const&, value_type&>::type;
1083  using pointer = typename std::conditional<IsConst, value_type const*, value_type*>::type;
1084  using iterator_category = std::forward_iterator_tag;
1085 
1086  // default constructed iterator can be compared to itself, but WON'T return true when
1087  // compared to end().
1088  Iter() = default;
1089 
1090  // Rule of zero: nothing specified. The conversion constructor is only enabled for iterator
1091  // to const_iterator, so it doesn't accidentally work as a copy ctor.
1092 
1093  // Conversion constructor from iterator to const_iterator.
1094  template <bool OtherIsConst,
1095  typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
1096  // NOLINTNEXTLINE(hicpp-explicit-conversions)
1097  Iter(Iter<OtherIsConst> const& other) noexcept
1098  : mKeyVals(other.mKeyVals)
1099  , mInfo(other.mInfo) {}
1100 
1101  Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept
1102  : mKeyVals(valPtr)
1103  , mInfo(infoPtr) {}
1104 
1105  Iter(NodePtr valPtr, uint8_t const* infoPtr,
1106  fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept
1107  : mKeyVals(valPtr)
1108  , mInfo(infoPtr) {
1109  fastForward();
1110  }
1111 
1112  template <bool OtherIsConst,
1113  typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
1114  Iter& operator=(Iter<OtherIsConst> const& other) noexcept {
1115  mKeyVals = other.mKeyVals;
1116  mInfo = other.mInfo;
1117  return *this;
1118  }
1119 
1120  // prefix increment. Undefined behavior if we are at end()!
1121  Iter& operator++() noexcept {
1122  mInfo++;
1123  mKeyVals++;
1124  fastForward();
1125  return *this;
1126  }
1127 
1128  reference operator*() const {
1129  return **mKeyVals;
1130  }
1131 
1132  pointer operator->() const {
1133  return &**mKeyVals;
1134  }
1135 
1136  template <bool O>
1137  bool operator==(Iter<O> const& o) const noexcept {
1138  return mKeyVals == o.mKeyVals;
1139  }
1140 
1141  template <bool O>
1142  bool operator!=(Iter<O> const& o) const noexcept {
1143  return mKeyVals != o.mKeyVals;
1144  }
1145 
1146  private:
1147  // fast forward to the next non-free info byte
1148  void fastForward() noexcept {
1149  int inc;
1150  do {
1151  auto const n = detail::unaligned_load<size_t>(mInfo);
1152 #if ROBIN_HOOD(LITTLE_ENDIAN)
1153  inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
1154 #else
1155  inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8;
1156 #endif
1157  mInfo += inc;
1158  mKeyVals += inc;
1159  } while (inc == static_cast<int>(sizeof(size_t)));
1160  }
1161 
1162  friend class unordered_map<IsFlatMap, MaxLoadFactor100, key_type, mapped_type, hasher,
1163  key_equal>;
1164  NodePtr mKeyVals{nullptr};
1165  uint8_t const* mInfo{nullptr};
1166  };
1167 
1169 
1170  // highly performance relevant code.
1171  // Lower bits are used for indexing into the array (2^n size)
1172  // The upper 1-5 bits need to be a reasonable good hash, to save comparisons.
1173  template <typename HashKey>
1174  void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const {
1175  // for a user-specified hash that is *not* robin_hood::hash, apply robin_hood::hash as an
1176  // additional mixing step. This serves as a bad hash prevention, if the given data is badly
1177  // mixed.
1178  using Mix =
1179  typename std::conditional<std::is_same<::robin_hood::hash<key_type>, hasher>::value,
1180  ::robin_hood::detail::identity_hash<size_t>,
1181  ::robin_hood::hash<size_t>>::type;
1182  *idx = Mix{}(WHash::operator()(key));
1183 
1184  *info = mInfoInc + static_cast<InfoType>(*idx >> mInfoHashShift);
1185  *idx &= mMask;
1186  }
1187 
1188  // forwards the index by one, wrapping around at the end
1189  void next(InfoType* info, size_t* idx) const noexcept {
1190  *idx = (*idx + 1) & mMask;
1191  *info += mInfoInc;
1192  }
1193 
1194  void nextWhileLess(InfoType* info, size_t* idx) const noexcept {
1195  // unrolling this by hand did not bring any speedups.
1196  while (*info < mInfo[*idx]) {
1197  next(info, idx);
1198  }
1199  }
1200 
1201  // Shift everything up by one element. Tries to move stuff around.
1202  // True if some shifting has occured (mEntry under idx is a constructed object)
1203  // Fals if no shift has occured (mEntry under idx is unconstructed memory)
1204  void
1205  shiftUp(size_t idx,
1206  size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
1207  while (idx != insertion_idx) {
1208  size_t prev_idx = (idx - 1) & mMask;
1209  if (mInfo[idx]) {
1210  mKeyVals[idx] = std::move(mKeyVals[prev_idx]);
1211  } else {
1212  ::new (static_cast<void*>(mKeyVals + idx)) Node(std::move(mKeyVals[prev_idx]));
1213  }
1214  mInfo[idx] = static_cast<uint8_t>(mInfo[prev_idx] + mInfoInc);
1215  if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) {
1216  mMaxNumElementsAllowed = 0;
1217  }
1218  idx = prev_idx;
1219  }
1220  }
1221 
1222  void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
1223  // until we find one that is either empty or has zero offset.
1224  // TODO(martinus) we don't need to move everything, just the last one for the same bucket.
1225  mKeyVals[idx].destroy(*this);
1226 
1227  // until we find one that is either empty or has zero offset.
1228  size_t nextIdx = (idx + 1) & mMask;
1229  while (mInfo[nextIdx] >= 2 * mInfoInc) {
1230  mInfo[idx] = static_cast<uint8_t>(mInfo[nextIdx] - mInfoInc);
1231  mKeyVals[idx] = std::move(mKeyVals[nextIdx]);
1232  idx = nextIdx;
1233  nextIdx = (idx + 1) & mMask;
1234  }
1235 
1236  mInfo[idx] = 0;
1237  // don't destroy, we've moved it
1238  // mKeyVals[idx].destroy(*this);
1239  mKeyVals[idx].~Node();
1240  }
1241 
1242  // copy of find(), except that it returns iterator instead of const_iterator.
1243  template <typename Other>
1244  ROBIN_HOOD(NODISCARD)
1245  size_t findIdx(Other const& key) const {
1246  size_t idx;
1247  InfoType info;
1248  keyToIdx(key, &idx, &info);
1249 
1250  do {
1251  // unrolling this twice gives a bit of a speedup. More unrolling did not help.
1252  if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1253  return idx;
1254  }
1255  next(&info, &idx);
1256  if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1257  return idx;
1258  }
1259  next(&info, &idx);
1260  } while (info <= mInfo[idx]);
1261 
1262  // nothing found!
1263  return mMask == 0 ? 0 : mMask + 1;
1264  }
1265 
1266  void cloneData(const unordered_map& o) {
1267  Cloner<unordered_map, IsFlatMap && ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(Node)>()(o, *this);
1268  }
1269 
1270  // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized.
1271  // @return index where the element was created
1272  size_t insert_move(Node&& keyval) {
1273  // we don't retry, fail if overflowing
1274  // don't need to check max num elements
1275  if (0 == mMaxNumElementsAllowed && !try_increase_info()) {
1276  throwOverflowError();
1277  }
1278 
1279  size_t idx;
1280  InfoType info;
1281  keyToIdx(keyval.getFirst(), &idx, &info);
1282 
1283  // skip forward. Use <= because we are certain that the element is not there.
1284  while (info <= mInfo[idx]) {
1285  idx = (idx + 1) & mMask;
1286  info += mInfoInc;
1287  }
1288 
1289  // key not found, so we are now exactly where we want to insert it.
1290  auto const insertion_idx = idx;
1291  auto const insertion_info = static_cast<uint8_t>(info);
1292  if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1293  mMaxNumElementsAllowed = 0;
1294  }
1295 
1296  // find an empty spot
1297  while (0 != mInfo[idx]) {
1298  next(&info, &idx);
1299  }
1300 
1301  auto& l = mKeyVals[insertion_idx];
1302  if (idx == insertion_idx) {
1303  ::new (static_cast<void*>(&l)) Node(std::move(keyval));
1304  } else {
1305  shiftUp(idx, insertion_idx);
1306  l = std::move(keyval);
1307  }
1308 
1309  // put at empty spot
1310  mInfo[insertion_idx] = insertion_info;
1311 
1312  ++mNumElements;
1313  return insertion_idx;
1314  }
1315 
1316 public:
1317  using iterator = Iter<false>;
1318  using const_iterator = Iter<true>;
1319 
1320  // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert. This
1321  // tremendously speeds up ctor & dtor of a map that never receives an element. The penalty is
1322  // payed at the first insert, and not before. Lookup of this empty map works because everybody
1323  // points to DummyInfoByte::b. parameter bucket_count is dictated by the standard, but we can
1324  // ignore it.
1325  explicit unordered_map(size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0,
1326  const Hash& h = Hash{},
1327  const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) &&
1328  noexcept(KeyEqual(equal)))
1329  : WHash(h)
1330  , WKeyEqual(equal) {
1331  ROBIN_HOOD_TRACE(this);
1332  }
1333 
1334  template <typename Iter>
1335  unordered_map(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0,
1336  const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{})
1337  : WHash(h)
1338  , WKeyEqual(equal) {
1339  ROBIN_HOOD_TRACE(this);
1340  insert(first, last);
1341  }
1342 
1343  unordered_map(std::initializer_list<value_type> initlist,
1344  size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{},
1345  const KeyEqual& equal = KeyEqual{})
1346  : WHash(h)
1347  , WKeyEqual(equal) {
1348  ROBIN_HOOD_TRACE(this);
1349  insert(initlist.begin(), initlist.end());
1350  }
1351 
1352  unordered_map(unordered_map&& o) noexcept
1353  : WHash(std::move(static_cast<WHash&>(o)))
1354  , WKeyEqual(std::move(static_cast<WKeyEqual&>(o)))
1355  , DataPool(std::move(static_cast<DataPool&>(o))) {
1356  ROBIN_HOOD_TRACE(this);
1357  if (o.mMask) {
1358  mKeyVals = std::move(o.mKeyVals);
1359  mInfo = std::move(o.mInfo);
1360  mNumElements = std::move(o.mNumElements);
1361  mMask = std::move(o.mMask);
1362  mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
1363  mInfoInc = std::move(o.mInfoInc);
1364  mInfoHashShift = std::move(o.mInfoHashShift);
1365  // set other's mask to 0 so its destructor won't do anything
1366  o.init();
1367  }
1368  }
1369 
1370  unordered_map& operator=(unordered_map&& o) noexcept {
1371  ROBIN_HOOD_TRACE(this);
1372  if (&o != this) {
1373  if (o.mMask) {
1374  // only move stuff if the other map actually has some data
1375  destroy();
1376  mKeyVals = std::move(o.mKeyVals);
1377  mInfo = std::move(o.mInfo);
1378  mNumElements = std::move(o.mNumElements);
1379  mMask = std::move(o.mMask);
1380  mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
1381  mInfoInc = std::move(o.mInfoInc);
1382  mInfoHashShift = std::move(o.mInfoHashShift);
1383  WHash::operator=(std::move(static_cast<WHash&>(o)));
1384  WKeyEqual::operator=(std::move(static_cast<WKeyEqual&>(o)));
1385  DataPool::operator=(std::move(static_cast<DataPool&>(o)));
1386 
1387  o.init();
1388 
1389  } else {
1390  // nothing in the other map => just clear us.
1391  clear();
1392  }
1393  }
1394  return *this;
1395  }
1396 
1397  unordered_map(const unordered_map& o)
1398  : WHash(static_cast<const WHash&>(o))
1399  , WKeyEqual(static_cast<const WKeyEqual&>(o))
1400  , DataPool(static_cast<const DataPool&>(o)) {
1401  ROBIN_HOOD_TRACE(this);
1402  if (!o.empty()) {
1403  // not empty: create an exact copy. it is also possible to just iterate through all
1404  // elements and insert them, but copying is probably faster.
1405 
1406  mKeyVals = static_cast<Node*>(
1407  detail::assertNotNull<std::bad_alloc>(malloc(calcNumBytesTotal(o.mMask + 1))));
1408  // no need for calloc because clonData does memcpy
1409  mInfo = reinterpret_cast<uint8_t*>(mKeyVals + o.mMask + 1);
1410  mNumElements = o.mNumElements;
1411  mMask = o.mMask;
1412  mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
1413  mInfoInc = o.mInfoInc;
1414  mInfoHashShift = o.mInfoHashShift;
1415  cloneData(o);
1416  }
1417  }
1418 
1419  // Creates a copy of the given map. Copy constructor of each mEntry is used.
1420  unordered_map& operator=(unordered_map const& o) {
1421  ROBIN_HOOD_TRACE(this);
1422  if (&o == this) {
1423  // prevent assigning of itself
1424  return *this;
1425  }
1426 
1427  // we keep using the old allocator and not assign the new one, because we want to keep the
1428  // memory available. when it is the same size.
1429  if (o.empty()) {
1430  if (0 == mMask) {
1431  // nothing to do, we are empty too
1432  return *this;
1433  }
1434 
1435  // not empty: destroy what we have there
1436  // clear also resets mInfo to 0, that's sometimes not necessary.
1437  destroy();
1438  init();
1439  WHash::operator=(static_cast<const WHash&>(o));
1440  WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
1441  DataPool::operator=(static_cast<DataPool const&>(o));
1442 
1443  return *this;
1444  }
1445 
1446  // clean up old stuff
1447  Destroyer<Self, IsFlatMap && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
1448 
1449  if (mMask != o.mMask) {
1450  // no luck: we don't have the same array size allocated, so we need to realloc.
1451  if (0 != mMask) {
1452  // only deallocate if we actually have data!
1453  free(mKeyVals);
1454  }
1455 
1456  mKeyVals = static_cast<Node*>(
1457  detail::assertNotNull<std::bad_alloc>(malloc(calcNumBytesTotal(o.mMask + 1))));
1458 
1459  // no need for calloc here because cloneData performs a memcpy.
1460  mInfo = reinterpret_cast<uint8_t*>(mKeyVals + o.mMask + 1);
1461  // sentinel is set in cloneData
1462  }
1463  WHash::operator=(static_cast<const WHash&>(o));
1464  WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
1465  DataPool::operator=(static_cast<DataPool const&>(o));
1466  mNumElements = o.mNumElements;
1467  mMask = o.mMask;
1468  mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
1469  mInfoInc = o.mInfoInc;
1470  mInfoHashShift = o.mInfoHashShift;
1471  cloneData(o);
1472 
1473  return *this;
1474  }
1475 
1476  // Swaps everything between the two maps.
1477  void swap(unordered_map& o) {
1478  ROBIN_HOOD_TRACE(this);
1479  using std::swap;
1480  swap(o, *this);
1481  }
1482 
1483  // Clears all data, without resizing.
1484  void clear() {
1485  ROBIN_HOOD_TRACE(this);
1486  if (empty()) {
1487  // don't do anything! also important because we don't want to write to DummyInfoByte::b,
1488  // even though we would just write 0 to it.
1489  return;
1490  }
1491 
1492  Destroyer<Self, IsFlatMap && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
1493 
1494  // clear everything except the sentinel
1495  // std::memset(mInfo, 0, sizeof(uint8_t) * (mMask + 1));
1496  uint8_t const z = 0;
1497  std::fill(mInfo, mInfo + (sizeof(uint8_t) * (mMask + 1)), z);
1498 
1499  mInfoInc = InitialInfoInc;
1500  mInfoHashShift = InitialInfoHashShift;
1501  }
1502 
1503  // Destroys the map and all it's contents.
1504  ~unordered_map() {
1505  ROBIN_HOOD_TRACE(this);
1506  destroy();
1507  }
1508 
1509  // Checks if both maps contain the same entries. Order is irrelevant.
1510  bool operator==(const unordered_map& other) const {
1511  ROBIN_HOOD_TRACE(this);
1512  if (other.size() != size()) {
1513  return false;
1514  }
1515  for (auto const& otherEntry : other) {
1516  auto const myIt = find(otherEntry.first);
1517  if (myIt == end() || !(myIt->second == otherEntry.second)) {
1518  return false;
1519  }
1520  }
1521 
1522  return true;
1523  }
1524 
1525  bool operator!=(const unordered_map& other) const {
1526  ROBIN_HOOD_TRACE(this);
1527  return !operator==(other);
1528  }
1529 
1530  mapped_type& operator[](const key_type& key) {
1531  ROBIN_HOOD_TRACE(this);
1532  return doCreateByKey(key);
1533  }
1534 
1535  mapped_type& operator[](key_type&& key) {
1536  ROBIN_HOOD_TRACE(this);
1537  return doCreateByKey(std::move(key));
1538  }
1539 
1540  template <typename Iter>
1541  void insert(Iter first, Iter last) {
1542  for (; first != last; ++first) {
1543  // value_type ctor needed because this might be called with std::pair's
1544  insert(value_type(*first));
1545  }
1546  }
1547 
1548  template <typename... Args>
1549  std::pair<iterator, bool> emplace(Args&&... args) {
1550  ROBIN_HOOD_TRACE(this);
1551  Node n{*this, std::forward<Args>(args)...};
1552  auto r = doInsert(std::move(n));
1553  if (!r.second) {
1554  // insertion not possible: destroy node
1555  // NOLINTNEXTLINE(bugprone-use-after-move)
1556  n.destroy(*this);
1557  }
1558  return r;
1559  }
1560 
1561  std::pair<iterator, bool> insert(const value_type& keyval) {
1562  ROBIN_HOOD_TRACE(this);
1563  return doInsert(keyval);
1564  }
1565 
1566  std::pair<iterator, bool> insert(value_type&& keyval) {
1567  return doInsert(std::move(keyval));
1568  }
1569 
1570  // Returns 1 if key is found, 0 otherwise.
1571  size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
1572  ROBIN_HOOD_TRACE(this);
1573  auto kv = mKeyVals + findIdx(key);
1574  if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1575  return 1;
1576  }
1577  return 0;
1578  }
1579 
1580  // Returns a reference to the value found for key.
1581  // Throws std::out_of_range if element cannot be found
1582  mapped_type& at(key_type const& key) {
1583  ROBIN_HOOD_TRACE(this);
1584  auto kv = mKeyVals + findIdx(key);
1585  if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1586  doThrow<std::out_of_range>("key not found");
1587  }
1588  return kv->getSecond();
1589  }
1590 
1591  // Returns a reference to the value found for key.
1592  // Throws std::out_of_range if element cannot be found
1593  mapped_type const& at(key_type const& key) const { // NOLINT(modernize-use-nodiscard)
1594  ROBIN_HOOD_TRACE(this);
1595  auto kv = mKeyVals + findIdx(key);
1596  if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
1597  doThrow<std::out_of_range>("key not found");
1598  }
1599  return kv->getSecond();
1600  }
1601 
1602  const_iterator find(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
1603  ROBIN_HOOD_TRACE(this);
1604  const size_t idx = findIdx(key);
1605  return const_iterator{mKeyVals + idx, mInfo + idx};
1606  }
1607 
1608  template <typename OtherKey>
1609  const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const {
1610  ROBIN_HOOD_TRACE(this);
1611  const size_t idx = findIdx(key);
1612  return const_iterator{mKeyVals + idx, mInfo + idx};
1613  }
1614 
1615  iterator find(const key_type& key) {
1616  ROBIN_HOOD_TRACE(this);
1617  const size_t idx = findIdx(key);
1618  return iterator{mKeyVals + idx, mInfo + idx};
1619  }
1620 
1621  template <typename OtherKey>
1622  iterator find(const OtherKey& key, is_transparent_tag /*unused*/) {
1623  ROBIN_HOOD_TRACE(this);
1624  const size_t idx = findIdx(key);
1625  return iterator{mKeyVals + idx, mInfo + idx};
1626  }
1627 
1628  iterator begin() {
1629  ROBIN_HOOD_TRACE(this);
1630  if (empty()) {
1631  return end();
1632  }
1633  return iterator(mKeyVals, mInfo, fast_forward_tag{});
1634  }
1635  const_iterator begin() const { // NOLINT(modernize-use-nodiscard)
1636  ROBIN_HOOD_TRACE(this);
1637  return cbegin();
1638  }
1639  const_iterator cbegin() const { // NOLINT(modernize-use-nodiscard)
1640  ROBIN_HOOD_TRACE(this);
1641  if (empty()) {
1642  return cend();
1643  }
1644  return const_iterator(mKeyVals, mInfo, fast_forward_tag{});
1645  }
1646 
1647  iterator end() {
1648  ROBIN_HOOD_TRACE(this);
1649  // no need to supply valid info pointer: end() must not be dereferenced, and only node
1650  // pointer is compared.
1651  return iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
1652  }
1653  const_iterator end() const { // NOLINT(modernize-use-nodiscard)
1654  ROBIN_HOOD_TRACE(this);
1655  return cend();
1656  }
1657  const_iterator cend() const { // NOLINT(modernize-use-nodiscard)
1658  ROBIN_HOOD_TRACE(this);
1659  return const_iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
1660  }
1661 
1662  iterator erase(const_iterator pos) {
1663  ROBIN_HOOD_TRACE(this);
1664  // its safe to perform const cast here
1665  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
1666  return erase(iterator{const_cast<Node*>(pos.mKeyVals), const_cast<uint8_t*>(pos.mInfo)});
1667  }
1668 
1669  // Erases element at pos, returns iterator to the next element.
1670  iterator erase(iterator pos) {
1671  ROBIN_HOOD_TRACE(this);
1672  // we assume that pos always points to a valid mEntry, and not end().
1673  auto const idx = static_cast<size_t>(pos.mKeyVals - mKeyVals);
1674 
1675  shiftDown(idx);
1676  --mNumElements;
1677 
1678  if (*pos.mInfo) {
1679  // we've backward shifted, return this again
1680  return pos;
1681  }
1682 
1683  // no backward shift, return next element
1684  return ++pos;
1685  }
1686 
1687  size_t erase(const key_type& key) {
1688  ROBIN_HOOD_TRACE(this);
1689  size_t idx;
1690  InfoType info;
1691  keyToIdx(key, &idx, &info);
1692 
1693  // check while info matches with the source idx
1694  do {
1695  if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1696  shiftDown(idx);
1697  --mNumElements;
1698  return 1;
1699  }
1700  next(&info, &idx);
1701  } while (info <= mInfo[idx]);
1702 
1703  // nothing found to delete
1704  return 0;
1705  }
1706 
1707  // reserves space for the specified number of elements. Makes sure the old data fits.
1708  // exactly the same as reserve(c).
1709  void rehash(size_t c) {
1710  reserve(c);
1711  }
1712 
1713  // reserves space for the specified number of elements. Makes sure the old data fits.
1714  // Exactly the same as resize(c). Use resize(0) to shrink to fit.
1715  void reserve(size_t c) {
1716  ROBIN_HOOD_TRACE(this);
1717  auto const minElementsAllowed = (std::max)(c, mNumElements);
1718  auto newSize = InitialNumElements;
1719  while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) {
1720  newSize *= 2;
1721  }
1722  if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
1723  throwOverflowError();
1724  }
1725 
1726  rehashPowerOfTwo(newSize);
1727  }
1728 
1729  size_type size() const noexcept { // NOLINT(modernize-use-nodiscard)
1730  ROBIN_HOOD_TRACE(this);
1731  return mNumElements;
1732  }
1733 
1734  size_type max_size() const noexcept { // NOLINT(modernize-use-nodiscard)
1735  ROBIN_HOOD_TRACE(this);
1736  return static_cast<size_type>(-1);
1737  }
1738 
1739  ROBIN_HOOD(NODISCARD) bool empty() const noexcept {
1740  ROBIN_HOOD_TRACE(this);
1741  return 0 == mNumElements;
1742  }
1743 
1744  float max_load_factor() const noexcept { // NOLINT(modernize-use-nodiscard)
1745  ROBIN_HOOD_TRACE(this);
1746  return MaxLoadFactor100 / 100.0F;
1747  }
1748 
1749  // Average number of elements per bucket. Since we allow only 1 per bucket
1750  float load_factor() const noexcept { // NOLINT(modernize-use-nodiscard)
1751  ROBIN_HOOD_TRACE(this);
1752  return static_cast<float>(size()) / static_cast<float>(mMask + 1);
1753  }
1754 
1755  ROBIN_HOOD(NODISCARD) size_t mask() const noexcept {
1756  ROBIN_HOOD_TRACE(this);
1757  return mMask;
1758  }
1759 
1760  ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept {
1761  if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits<size_t>::max)() / 100)) {
1762  return maxElements * MaxLoadFactor100 / 100;
1763  }
1764 
1765  // we might be a bit inprecise, but since maxElements is quite large that doesn't matter
1766  return (maxElements / 100) * MaxLoadFactor100;
1767  }
1768 
1769  ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const {
1770  return numElements + sizeof(uint64_t);
1771  }
1772 
1773  // calculation ony allowed for 2^n values
1774  ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const {
1775 #if ROBIN_HOOD(BITNESS) == 64
1776  return numElements * sizeof(Node) + calcNumBytesInfo(numElements);
1777 #else
1778  // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows.
1779  auto const ne = static_cast<uint64_t>(numElements);
1780  auto const s = static_cast<uint64_t>(sizeof(Node));
1781  auto const infos = static_cast<uint64_t>(calcNumBytesInfo(numElements));
1782 
1783  auto const total64 = ne * s + infos;
1784  auto const total = static_cast<size_t>(total64);
1785 
1786  if (ROBIN_HOOD_UNLIKELY(static_cast<uint64_t>(total) != total64)) {
1787  throwOverflowError();
1788  }
1789  return total;
1790 #endif
1791  }
1792 
1793 private:
1794  // reserves space for at least the specified number of elements.
1795  // only works if numBuckets if power of two
1796  void rehashPowerOfTwo(size_t numBuckets) {
1797  ROBIN_HOOD_TRACE(this);
1798 
1799  Node* const oldKeyVals = mKeyVals;
1800  uint8_t const* const oldInfo = mInfo;
1801 
1802  const size_t oldMaxElements = mMask + 1;
1803 
1804  // resize operation: move stuff
1805  init_data(numBuckets);
1806  if (oldMaxElements > 1) {
1807  for (size_t i = 0; i < oldMaxElements; ++i) {
1808  if (oldInfo[i] != 0) {
1809  insert_move(std::move(oldKeyVals[i]));
1810  // destroy the node but DON'T destroy the data.
1811  oldKeyVals[i].~Node();
1812  }
1813  }
1814 
1815  // don't destroy old data: put it into the pool instead
1816  DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElements));
1817  }
1818  }
1819 
1820  ROBIN_HOOD(NOINLINE) void throwOverflowError() const {
1821 #if ROBIN_HOOD(HAS_EXCEPTIONS)
1822  throw std::overflow_error("robin_hood::map overflow");
1823 #else
1824  abort();
1825 #endif
1826  }
1827 
1828  void init_data(size_t max_elements) {
1829  mNumElements = 0;
1830  mMask = max_elements - 1;
1831  mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements);
1832 
1833  // calloc also zeroes everything
1834  mKeyVals = reinterpret_cast<Node*>(
1835  detail::assertNotNull<std::bad_alloc>(calloc(1, calcNumBytesTotal(max_elements))));
1836  mInfo = reinterpret_cast<uint8_t*>(mKeyVals + max_elements);
1837 
1838  // set sentinel
1839  mInfo[max_elements] = 1;
1840 
1841  mInfoInc = InitialInfoInc;
1842  mInfoHashShift = InitialInfoHashShift;
1843  }
1844 
1845  template <typename Arg>
1846  mapped_type& doCreateByKey(Arg&& key) {
1847  while (true) {
1848  size_t idx;
1849  InfoType info;
1850  keyToIdx(key, &idx, &info);
1851  nextWhileLess(&info, &idx);
1852 
1853  // while we potentially have a match. Can't do a do-while here because when mInfo is 0
1854  // we don't want to skip forward
1855  while (info == mInfo[idx]) {
1856  if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1857  // key already exists, do not insert.
1858  return mKeyVals[idx].getSecond();
1859  }
1860  next(&info, &idx);
1861  }
1862 
1863  // unlikely that this evaluates to true
1864  if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
1865  increase_size();
1866  continue;
1867  }
1868 
1869  // key not found, so we are now exactly where we want to insert it.
1870  auto const insertion_idx = idx;
1871  auto const insertion_info = info;
1872  if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1873  mMaxNumElementsAllowed = 0;
1874  }
1875 
1876  // find an empty spot
1877  while (0 != mInfo[idx]) {
1878  next(&info, &idx);
1879  }
1880 
1881  auto& l = mKeyVals[insertion_idx];
1882  if (idx == insertion_idx) {
1883  // put at empty spot. This forwards all arguments into the node where the object is
1884  // constructed exactly where it is needed.
1885  ::new (static_cast<void*>(&l))
1886  Node(*this, std::piecewise_construct,
1887  std::forward_as_tuple(std::forward<Arg>(key)), std::forward_as_tuple());
1888  } else {
1889  shiftUp(idx, insertion_idx);
1890  l = Node(*this, std::piecewise_construct,
1891  std::forward_as_tuple(std::forward<Arg>(key)), std::forward_as_tuple());
1892  }
1893 
1894  // mKeyVals[idx].getFirst() = std::move(key);
1895  mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
1896 
1897  ++mNumElements;
1898  return mKeyVals[insertion_idx].getSecond();
1899  }
1900  }
1901 
1902  // This is exactly the same code as operator[], except for the return values
1903  template <typename Arg>
1904  std::pair<iterator, bool> doInsert(Arg&& keyval) {
1905  while (true) {
1906  size_t idx;
1907  InfoType info;
1908  keyToIdx(keyval.getFirst(), &idx, &info);
1909  nextWhileLess(&info, &idx);
1910 
1911  // while we potentially have a match
1912  while (info == mInfo[idx]) {
1913  if (WKeyEqual::operator()(keyval.getFirst(), mKeyVals[idx].getFirst())) {
1914  // key already exists, do NOT insert.
1915  // see http://en.cppreference.com/w/cpp/container/unordered_map/insert
1916  return std::make_pair<iterator, bool>(iterator(mKeyVals + idx, mInfo + idx),
1917  false);
1918  }
1919  next(&info, &idx);
1920  }
1921 
1922  // unlikely that this evaluates to true
1923  if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
1924  increase_size();
1925  continue;
1926  }
1927 
1928  // key not found, so we are now exactly where we want to insert it.
1929  auto const insertion_idx = idx;
1930  auto const insertion_info = info;
1931  if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1932  mMaxNumElementsAllowed = 0;
1933  }
1934 
1935  // find an empty spot
1936  while (0 != mInfo[idx]) {
1937  next(&info, &idx);
1938  }
1939 
1940  auto& l = mKeyVals[insertion_idx];
1941  if (idx == insertion_idx) {
1942  ::new (static_cast<void*>(&l)) Node(*this, std::forward<Arg>(keyval));
1943  } else {
1944  shiftUp(idx, insertion_idx);
1945  l = Node(*this, std::forward<Arg>(keyval));
1946  }
1947 
1948  // put at empty spot
1949  mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
1950 
1951  ++mNumElements;
1952  return std::make_pair(iterator(mKeyVals + insertion_idx, mInfo + insertion_idx), true);
1953  }
1954  }
1955 
1956  bool try_increase_info() {
1957  ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements
1958  << ", maxNumElementsAllowed="
1959  << calcMaxNumElementsAllowed(mMask + 1));
1960  if (mInfoInc <= 2) {
1961  // need to be > 2 so that shift works (otherwise undefined behavior!)
1962  return false;
1963  }
1964  // we got space left, try to make info smaller
1965  mInfoInc = static_cast<uint8_t>(mInfoInc >> 1U);
1966 
1967  // remove one bit of the hash, leaving more space for the distance info.
1968  // This is extremely fast because we can operate on 8 bytes at once.
1969  ++mInfoHashShift;
1970  auto const data = reinterpret_cast_no_cast_align_warning<uint64_t*>(mInfo);
1971  auto const numEntries = (mMask + 1) / 8;
1972 
1973  for (size_t i = 0; i < numEntries; ++i) {
1974  data[i] = (data[i] >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f);
1975  }
1976  mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
1977  return true;
1978  }
1979 
1980  void increase_size() {
1981  // nothing allocated yet? just allocate InitialNumElements
1982  if (0 == mMask) {
1983  init_data(InitialNumElements);
1984  return;
1985  }
1986 
1987  auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
1988  if (mNumElements < maxNumElementsAllowed && try_increase_info()) {
1989  return;
1990  }
1991 
1992  ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed="
1993  << maxNumElementsAllowed << ", load="
1994  << (static_cast<double>(mNumElements) * 100.0 /
1995  (static_cast<double>(mMask) + 1)));
1996  // it seems we have a really bad hash function! don't try to resize again
1997  if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) {
1998  throwOverflowError();
1999  }
2000 
2001  rehashPowerOfTwo((mMask + 1) * 2);
2002  }
2003 
2004  void destroy() {
2005  if (0 == mMask) {
2006  // don't deallocate!
2007  return;
2008  }
2009 
2010  Destroyer<Self, IsFlatMap && std::is_trivially_destructible<Node>::value>{}
2011  .nodesDoNotDeallocate(*this);
2012  free(mKeyVals);
2013  }
2014 
2015  void init() noexcept {
2016  mKeyVals = reinterpret_cast<Node*>(&mMask);
2017  mInfo = reinterpret_cast<uint8_t*>(&mMask);
2018  mNumElements = 0;
2019  mMask = 0;
2020  mMaxNumElementsAllowed = 0;
2021  mInfoInc = InitialInfoInc;
2022  mInfoHashShift = InitialInfoHashShift;
2023  }
2024 
2025  // members are sorted so no padding occurs
2026  Node* mKeyVals = reinterpret_cast<Node*>(&mMask); // 8 byte 8
2027  uint8_t* mInfo = reinterpret_cast<uint8_t*>(&mMask); // 8 byte 16
2028  size_t mNumElements = 0; // 8 byte 24
2029  size_t mMask = 0; // 8 byte 32
2030  size_t mMaxNumElementsAllowed = 0; // 8 byte 40
2031  InfoType mInfoInc = InitialInfoInc; // 4 byte 44
2032  InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 48
2033  // 16 byte 56 if NodeAllocator
2034 };
2035 
2036 } // namespace detail
2037 
2038 template <typename Key, typename T, typename Hash = hash<Key>,
2039  typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
2040 using unordered_flat_map = detail::unordered_map<true, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2041 
2042 template <typename Key, typename T, typename Hash = hash<Key>,
2043  typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
2044 using unordered_node_map = detail::unordered_map<false, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2045 
2046 template <typename Key, typename T, typename Hash = hash<Key>,
2047  typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
2048 using unordered_map =
2049  detail::unordered_map<sizeof(robin_hood::pair<Key, T>) <= sizeof(size_t) * 6 &&
2050  std::is_nothrow_move_constructible<robin_hood::pair<Key, T>>::value &&
2051  std::is_nothrow_move_assignable<robin_hood::pair<Key, T>>::value,
2052  MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2053 
2054 } // namespace robin_hood
2055 
2056 #endif