From: Simon Marchi Date: Mon, 27 Oct 2025 19:25:51 +0000 (-0400) Subject: gdbsupport: bump unordered_dense to 4.8.0 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ddaee713f51c9fa12e12849ccdaf6739b2f43e2c;p=thirdparty%2Fbinutils-gdb.git gdbsupport: bump unordered_dense to 4.8.0 We don't need anything in this release, but I think it doesn't hurt to just stay up to date. The new version has a new include file, stl.h. To keep things clean and separated, move the imported files to a new sub-directory. This requires a small change in gdb/check-include-guards.py, to be able to ignore the whole new directory. Change-Id: Ic8c5d0dd5ea8b6691c99975d6ca78f637175ef42 Approved-By: Tom Tromey --- diff --git a/gdb/check-include-guards.py b/gdb/check-include-guards.py index 366ae002858..b6efa1287b3 100755 --- a/gdb/check-include-guards.py +++ b/gdb/check-include-guards.py @@ -23,6 +23,7 @@ # When --update is used, rewrite the files in place as needed. +import fnmatch import re import sys from typing import List @@ -32,7 +33,9 @@ OLDDEF = re.compile("^#if !defined *\\(([A-Za-z0-9_]+)\\)\n") # Some headers -- in particular, ones that aren't maintained by gdb -- # should be excluded from the checks. -EXCLUDED = frozenset(["gdbsupport/unordered_dense.h"]) +# +# This is interpreted as a list of patterns as interpreted by fnmatch. +EXCLUDED = ("gdbsupport/unordered_dense/*",) # See if @@ -68,8 +71,9 @@ def write_header(filename: str, contents: List[str]): def check_header(filename: str): - if filename in EXCLUDED: - return + for pat in EXCLUDED: + if fnmatch.fnmatch(filename, pat): + return # Turn x/y-z.h into X_Y_Z_H. assert filename.endswith(".h") diff --git a/gdbsupport/unordered_dense/stl.h b/gdbsupport/unordered_dense/stl.h new file mode 100644 index 00000000000..8c198773195 --- /dev/null +++ b/gdbsupport/unordered_dense/stl.h @@ -0,0 +1,83 @@ +///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// + +// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. +// Version 4.8.0 +// https://github.com/martinus/unordered_dense +// +// Licensed under the MIT License . +// SPDX-License-Identifier: MIT +// Copyright (c) 2022-2024 Martin Leitner-Ankerl +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_STL_H +#define ANKERL_STL_H + +#include // for array +#include // for uint64_t, uint32_t, std::uint8_t, UINT64_C +#include // for size_t, memcpy, memset +#include // for equal_to, hash +#include // for initializer_list +#include // for pair, distance +#include // for numeric_limits +#include // for allocator, allocator_traits, shared_ptr +#include // for optional +#include // for out_of_range +#include // for basic_string +#include // for basic_string_view, hash +#include // for forward_as_tuple +#include // for enable_if_t, declval, conditional_t, ena... +#include // for forward, exchange, pair, as_const, piece... +#include // for vector + +// includes , which fails to compile if +// targeting GCC >= 13 with the (rewritten) win32 thread model, and +// targeting Windows earlier than Vista (0x600). GCC predefines +// _REENTRANT when using the 'posix' model, and doesn't when using the +// 'win32' model. +#if defined __MINGW64__ && defined __GNUC__ && __GNUC__ >= 13 && !defined _REENTRANT +// _WIN32_WINNT is guaranteed to be defined here because of the +// inclusion above. +# ifndef _WIN32_WINNT +# error "_WIN32_WINNT not defined" +# endif +# if _WIN32_WINNT < 0x600 +# define ANKERL_MEMORY_RESOURCE_IS_BAD() 1 // NOLINT(cppcoreguidelines-macro-usage) +# endif +#endif +#ifndef ANKERL_MEMORY_RESOURCE_IS_BAD +# define ANKERL_MEMORY_RESOURCE_IS_BAD() 0 // NOLINT(cppcoreguidelines-macro-usage) +#endif + +#if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR) +# if __has_include() && !ANKERL_MEMORY_RESOURCE_IS_BAD() +# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage) +# include // for polymorphic_allocator +# elif __has_include() +# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage) +# include // for polymorphic_allocator +# endif +#endif + +#if defined(_MSC_VER) && defined(_M_X64) +# include +# pragma intrinsic(_umul128) +#endif + +#endif diff --git a/gdbsupport/unordered_dense.h b/gdbsupport/unordered_dense/unordered_dense.h similarity index 85% rename from gdbsupport/unordered_dense.h rename to gdbsupport/unordered_dense/unordered_dense.h index 64766f254fc..56383b6ce8f 100644 --- a/gdbsupport/unordered_dense.h +++ b/gdbsupport/unordered_dense/unordered_dense.h @@ -1,7 +1,7 @@ ///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// // A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. -// Version 4.6.0 +// Version 4.8.0 // https://github.com/martinus/unordered_dense // // Licensed under the MIT License . @@ -31,7 +31,7 @@ // see https://semver.org/spec/v2.0.0.html #define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes -#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 6 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality +#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 8 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality #define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes // API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ @@ -81,66 +81,17 @@ # define ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK #endif -// defined in unordered_dense.cpp -#if !defined(ANKERL_UNORDERED_DENSE_EXPORT) -# define ANKERL_UNORDERED_DENSE_EXPORT -#endif - #if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L # error ankerl::unordered_dense requires C++17 or higher #else -# include // for array -# include // for uint64_t, uint32_t, uint8_t, UINT64_C -# include // for size_t, memcpy, memset -# include // for equal_to, hash -# include // for initializer_list -# include // for pair, distance -# include // for numeric_limits -# include // for allocator, allocator_traits, shared_ptr -# include // for optional -# include // for out_of_range -# include // for basic_string -# include // for basic_string_view, hash -# include // for forward_as_tuple -# include // for enable_if_t, declval, conditional_t, ena... -# include // for forward, exchange, pair, as_const, piece... -# include // for vector -# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0 -# include // for abort -# endif - -// includes , which fails to compile if -// targeting GCC >= 13 with the (rewritten) win32 thread model, and -// targeting Windows earlier than Vista (0x600). GCC predefines -// _REENTRANT when using the 'posix' model, and doesn't when using the -// 'win32' model. -# if defined __MINGW64__ && defined __GNUC__ && __GNUC__ >= 13 && !defined _REENTRANT -// _WIN32_WINNT is guaranteed to be defined here because of the -// inclusion above. -# ifndef _WIN32_WINNT -# error "_WIN32_WINNT not defined" -# endif -# if _WIN32_WINNT < 0x600 -# define ANKERL_MEMORY_RESOURCE_IS_BAD() 1 // NOLINT(cppcoreguidelines-macro-usage) -# endif -# endif -# ifndef ANKERL_MEMORY_RESOURCE_IS_BAD -# define ANKERL_MEMORY_RESOURCE_IS_BAD() 0 // NOLINT(cppcoreguidelines-macro-usage) -# endif -# if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR) -# if __has_include() && !ANKERL_MEMORY_RESOURCE_IS_BAD() -# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage) -# include // for polymorphic_allocator -# elif __has_include() -# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage) -# include // for polymorphic_allocator -# endif +# if !defined(ANKERL_UNORDERED_DENSE_STD_MODULE) +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +# define ANKERL_UNORDERED_DENSE_STD_MODULE 0 # endif -# if defined(_MSC_VER) && defined(_M_X64) -# include -# pragma intrinsic(_umul128) +# if !ANKERL_UNORDERED_DENSE_STD_MODULE +# include "stl.h" # endif # if __has_cpp_attribute(likely) && __has_cpp_attribute(unlikely) && ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L @@ -204,29 +155,29 @@ namespace detail { // hardcodes seed and the secret, reformats the code, and clang-tidy fixes. namespace detail::wyhash { -inline void mum(uint64_t* a, uint64_t* b) { +inline void mum(std::uint64_t* a, std::uint64_t* b) { # if defined(__SIZEOF_INT128__) __uint128_t r = *a; r *= *b; - *a = static_cast(r); - *b = static_cast(r >> 64U); + *a = static_cast(r); + *b = static_cast(r >> 64U); # elif defined(_MSC_VER) && defined(_M_X64) *a = _umul128(*a, *b, b); # else - uint64_t ha = *a >> 32U; - uint64_t hb = *b >> 32U; - uint64_t la = static_cast(*a); - uint64_t lb = static_cast(*b); - uint64_t hi{}; - uint64_t lo{}; - uint64_t rh = ha * hb; - uint64_t rm0 = ha * lb; - uint64_t rm1 = hb * la; - uint64_t rl = la * lb; - uint64_t t = rl + (rm0 << 32U); - auto c = static_cast(t < rl); + std::uint64_t ha = *a >> 32U; + std::uint64_t hb = *b >> 32U; + std::uint64_t la = static_cast(*a); + std::uint64_t lb = static_cast(*b); + std::uint64_t hi{}; + std::uint64_t lo{}; + std::uint64_t rh = ha * hb; + std::uint64_t rm0 = ha * lb; + std::uint64_t rm1 = hb * la; + std::uint64_t rl = la * lb; + std::uint64_t t = rl + (rm0 << 32U); + auto c = static_cast(t < rl); lo = t + (rm1 << 32U); - c += static_cast(lo < t); + c += static_cast(lo < t); hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; *a = lo; *b = hi; @@ -234,39 +185,39 @@ inline void mum(uint64_t* a, uint64_t* b) { } // multiply and xor mix function, aka MUM -[[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t { +[[nodiscard]] inline auto mix(std::uint64_t a, std::uint64_t b) -> std::uint64_t { mum(&a, &b); return a ^ b; } // read functions. WARNING: we don't care about endianness, so results are different on big endian! -[[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t { - uint64_t v{}; +[[nodiscard]] inline auto r8(const std::uint8_t* p) -> std::uint64_t { + std::uint64_t v{}; std::memcpy(&v, p, 8U); return v; } -[[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t { - uint32_t v{}; +[[nodiscard]] inline auto r4(const std::uint8_t* p) -> std::uint64_t { + std::uint32_t v{}; std::memcpy(&v, p, 4); return v; } // reads 1, 2, or 3 bytes -[[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t { - return (static_cast(p[0]) << 16U) | (static_cast(p[k >> 1U]) << 8U) | p[k - 1]; +[[nodiscard]] inline auto r3(const std::uint8_t* p, std::size_t k) -> std::uint64_t { + return (static_cast(p[0]) << 16U) | (static_cast(p[k >> 1U]) << 8U) | p[k - 1]; } -[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t { +[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, std::size_t len) -> std::uint64_t { static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f), UINT64_C(0xe7037ed1a0b428db), UINT64_C(0x8ebc6af09c88c6e3), UINT64_C(0x589965cc75374cc3)}; auto const* p = static_cast(key); - uint64_t seed = secret[0]; - uint64_t a{}; - uint64_t b{}; + std::uint64_t seed = secret[0]; + std::uint64_t a{}; + std::uint64_t b{}; if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) ANKERL_UNORDERED_DENSE_LIKELY_ATTR { if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) @@ -285,11 +236,11 @@ inline void mum(uint64_t* a, uint64_t* b) { } } else { - size_t i = len; + std::size_t i = len; if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) ANKERL_UNORDERED_DENSE_UNLIKELY_ATTR { - uint64_t see1 = seed; - uint64_t see2 = seed; + std::uint64_t see1 = seed; + std::uint64_t see2 = seed; do { seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); @@ -312,16 +263,16 @@ inline void mum(uint64_t* a, uint64_t* b) { return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); } -[[nodiscard]] inline auto hash(uint64_t x) -> uint64_t { +[[nodiscard]] inline auto hash(std::uint64_t x) -> std::uint64_t { return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); } } // namespace detail::wyhash -ANKERL_UNORDERED_DENSE_EXPORT template +template struct hash { auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) - -> uint64_t { + -> std::uint64_t { return std::hash{}(obj); } }; @@ -330,7 +281,7 @@ template struct hash::is_avalanching> { using is_avalanching = void; auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) - -> uint64_t { + -> std::uint64_t { return std::hash{}(obj); } }; @@ -338,7 +289,7 @@ struct hash::is_avalanching> { template struct hash> { using is_avalanching = void; - auto operator()(std::basic_string const& str) const noexcept -> uint64_t { + auto operator()(std::basic_string const& str) const noexcept -> std::uint64_t { return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size()); } }; @@ -346,7 +297,7 @@ struct hash> { template struct hash> { using is_avalanching = void; - auto operator()(std::basic_string_view const& sv) const noexcept -> uint64_t { + auto operator()(std::basic_string_view const& sv) const noexcept -> std::uint64_t { return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size()); } }; @@ -354,34 +305,34 @@ struct hash> { template struct hash { using is_avalanching = void; - auto operator()(T* ptr) const noexcept -> uint64_t { + auto operator()(T* ptr) const noexcept -> std::uint64_t { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr)); + return detail::wyhash::hash(reinterpret_cast(ptr)); } }; template struct hash> { using is_avalanching = void; - auto operator()(std::unique_ptr const& ptr) const noexcept -> uint64_t { + auto operator()(std::unique_ptr const& ptr) const noexcept -> std::uint64_t { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr.get())); + return detail::wyhash::hash(reinterpret_cast(ptr.get())); } }; template struct hash> { using is_avalanching = void; - auto operator()(std::shared_ptr const& ptr) const noexcept -> uint64_t { + auto operator()(std::shared_ptr const& ptr) const noexcept -> std::uint64_t { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr.get())); + return detail::wyhash::hash(reinterpret_cast(ptr.get())); } }; template struct hash>> { using is_avalanching = void; - auto operator()(Enum e) const noexcept -> uint64_t { + auto operator()(Enum e) const noexcept -> std::uint64_t { using underlying = std::underlying_type_t; return detail::wyhash::hash(static_cast(e)); } @@ -392,25 +343,26 @@ struct tuple_hash_helper { // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest. // If it isn't an integral we need to hash it. template - [[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t { + [[nodiscard]] constexpr static auto to64(Arg const& arg) -> std::uint64_t { if constexpr (std::is_integral_v || std::is_enum_v) { - return static_cast(arg); + return static_cast(arg); } else { return hash{}(arg); } } - [[nodiscard]] ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK static auto mix64(uint64_t state, uint64_t v) - -> uint64_t { - return detail::wyhash::mix(state + v, uint64_t{0x9ddfea08eb382d69}); + [[nodiscard]] ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK static auto mix64(std::uint64_t state, + std::uint64_t v) + -> std::uint64_t { + return detail::wyhash::mix(state + v, std::uint64_t{0x9ddfea08eb382d69}); } // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer. template - [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence /*unused*/) noexcept -> uint64_t { - auto h = uint64_t{}; + [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence /*unused*/) noexcept -> std::uint64_t { + auto h = std::uint64_t{}; ((h = mix64(h, to64(std::get(t)))), ...); return h; } @@ -419,7 +371,7 @@ struct tuple_hash_helper { template struct hash> : tuple_hash_helper { using is_avalanching = void; - auto operator()(std::tuple const& t) const noexcept -> uint64_t { + auto operator()(std::tuple const& t) const noexcept -> std::uint64_t { return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); } }; @@ -427,19 +379,19 @@ struct hash> : tuple_hash_helper { template struct hash> : tuple_hash_helper { using is_avalanching = void; - auto operator()(std::pair const& t) const noexcept -> uint64_t { + auto operator()(std::pair const& t) const noexcept -> std::uint64_t { return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); } }; // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ - template <> \ - struct hash { \ - using is_avalanching = void; \ - auto operator()(T const& obj) const noexcept -> uint64_t { \ - return detail::wyhash::hash(static_cast(obj)); \ - } \ +# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ + template <> \ + struct hash { \ + using is_avalanching = void; \ + auto operator()(T const& obj) const noexcept -> std::uint64_t { \ + return detail::wyhash::hash(static_cast(obj)); \ + } \ } # if defined(__GNUC__) && !defined(__clang__) @@ -475,19 +427,19 @@ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long); namespace bucket_type { struct standard { - static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint - static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint + static constexpr std::uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint + static constexpr std::uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint - uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash - uint32_t m_value_idx; // index into the m_values vector. + std::uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash + std::uint32_t m_value_idx; // index into the m_values vector. }; ANKERL_UNORDERED_DENSE_PACK(struct big { - static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint - static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint + static constexpr std::uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint + static constexpr std::uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint - uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash - size_t m_value_idx; // index into the m_values vector. + std::uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash + std::size_t m_value_idx; // index into the m_values vector. }); } // namespace bucket_type @@ -525,7 +477,7 @@ template using detect_iterator = typename T::iterator; template -using detect_reserve = decltype(std::declval().reserve(size_t{})); +using detect_reserve = decltype(std::declval().reserve(std::size_t{})); // enable_if helpers @@ -559,7 +511,7 @@ struct base_table_type_set {}; // It allocates blocks of equal size and puts them into the m_blocks vector. That means it can grow simply by adding a new // block to the back of m_blocks, and doesn't double its size like an std::vector. The disadvantage is that memory is not // linear and thus there is one more indirection necessary for indexing. -template , size_t MaxSegmentSizeBytes = 4096> +template , std::size_t MaxSegmentSizeBytes = 4096> class segmented_vector { template class iter_t; @@ -579,11 +531,11 @@ public: private: using vec_alloc = typename std::allocator_traits::template rebind_alloc; std::vector m_blocks{}; - size_t m_size{}; + std::size_t m_size{}; // Calculates the maximum number for x in (s << x) <= max_val - static constexpr auto num_bits_closest(size_t max_val, size_t s) -> size_t { - auto f = size_t{0}; + static constexpr auto num_bits_closest(std::size_t max_val, std::size_t s) -> std::size_t { + auto f = std::size_t{0}; while (s << (f + 1) <= max_val) { ++f; } @@ -602,7 +554,7 @@ private: class iter_t { using ptr_t = std::conditional_t; ptr_t m_data{}; - size_t m_idx{}; + std::size_t m_idx{}; template friend class iter_t; @@ -622,7 +574,7 @@ private: : m_data(other.m_data) , m_idx(other.m_idx) {} - constexpr iter_t(ptr_t data, size_t idx) noexcept + constexpr iter_t(ptr_t data, std::size_t idx) noexcept : m_data(data) , m_idx(idx) {} @@ -656,7 +608,7 @@ private: } [[nodiscard]] constexpr auto operator+(difference_type diff) const noexcept -> iter_t { - return {m_data, static_cast(static_cast(m_idx) + diff)}; + return {m_data, static_cast(static_cast(m_idx) + diff)}; } constexpr auto operator+=(difference_type diff) noexcept -> iter_t& { @@ -747,7 +699,7 @@ private: } } - [[nodiscard]] static constexpr auto calc_num_blocks_for_capacity(size_t capacity) { + [[nodiscard]] static constexpr auto calc_num_blocks_for_capacity(std::size_t capacity) { return (capacity + num_elements_in_block - 1U) / num_elements_in_block; } @@ -812,20 +764,20 @@ public: dealloc(); } - [[nodiscard]] constexpr auto size() const -> size_t { + [[nodiscard]] constexpr auto size() const -> std::size_t { return m_size; } - [[nodiscard]] constexpr auto capacity() const -> size_t { + [[nodiscard]] constexpr auto capacity() const -> std::size_t { return m_blocks.size() * num_elements_in_block; } // Indexing is highly performance critical - [[nodiscard]] constexpr auto operator[](size_t i) const noexcept -> T const& { + [[nodiscard]] constexpr auto operator[](std::size_t i) const noexcept -> T const& { return m_blocks[i >> num_bits][i & mask]; } - [[nodiscard]] constexpr auto operator[](size_t i) noexcept -> T& { + [[nodiscard]] constexpr auto operator[](std::size_t i) noexcept -> T& { return m_blocks[i >> num_bits][i & mask]; } @@ -865,7 +817,7 @@ public: return 0 == m_size; } - void reserve(size_t new_capacity) { + void reserve(std::size_t new_capacity) { m_blocks.reserve(calc_num_blocks_for_capacity(new_capacity)); while (new_capacity > capacity()) { increase_capacity(); @@ -913,7 +865,7 @@ public: void clear() { if constexpr (!std::is_trivially_destructible_v) { - for (size_t i = 0, s = size(); i < s; ++i) { + for (std::size_t i = 0, s = size(); i < s; ++i) { operator[](i).~T(); } } @@ -962,7 +914,7 @@ private: default_bucket_container_type, BucketContainer>; - static constexpr uint8_t initial_shifts = 64 - 2; // 2^(64-m_shift) number of buckets + static constexpr std::uint8_t initial_shifts = 64 - 2; // 2^(64-m_shift) number of buckets static constexpr float default_max_load_factor = 0.8F; public: @@ -990,11 +942,11 @@ private: value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes. bucket_container_type m_buckets{}; - size_t m_max_bucket_capacity = 0; + std::size_t m_max_bucket_capacity = 0; float m_max_load_factor = default_max_load_factor; Hash m_hash{}; KeyEqual m_equal{}; - uint8_t m_shifts = initial_shifts; + std::uint8_t m_shifts = initial_shifts; [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type { if (ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == bucket_count())) @@ -1006,15 +958,15 @@ private: } // Helper to access bucket through pointer types - [[nodiscard]] static constexpr auto at(bucket_container_type& bucket, size_t offset) -> Bucket& { + [[nodiscard]] static constexpr auto at(bucket_container_type& bucket, std::size_t offset) -> Bucket& { return bucket[offset]; } - [[nodiscard]] static constexpr auto at(const bucket_container_type& bucket, size_t offset) -> const Bucket& { + [[nodiscard]] static constexpr auto at(const bucket_container_type& bucket, std::size_t offset) -> const Bucket& { return bucket[offset]; } - // use the dist_inc and dist_dec functions so that uint16_t types work without warning + // use the dist_inc and dist_dec functions so that std::uint16_t types work without warning [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type { return static_cast(x + Bucket::dist_inc); } @@ -1025,10 +977,10 @@ private: // The goal of mixed_hash is to always produce a high quality 64bit hash. template - [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t { + [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> std::uint64_t { if constexpr (is_detected_v) { // we know that the hash is good because is_avalanching. - if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) { + if constexpr (sizeof(decltype(m_hash(key))) < sizeof(std::uint64_t)) { // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards return m_hash(key) * UINT64_C(0x9ddfea08eb382d69); } else { @@ -1041,11 +993,11 @@ private: } } - [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type { + [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(std::uint64_t hash) const -> dist_and_fingerprint_type { return Bucket::dist_inc | (static_cast(hash) & Bucket::fingerprint_mask); } - [[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type { + [[nodiscard]] constexpr auto bucket_idx_from_hash(std::uint64_t hash) const -> value_idx_type { return static_cast(hash >> m_shifts); } @@ -1079,13 +1031,24 @@ private: at(m_buckets, place) = bucket; } - [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t { - return (std::min)(max_bucket_count(), size_t{1} << (64U - shifts)); + void erase_and_shift_down(value_idx_type bucket_idx) { + // shift down until either empty or an element with correct spot is found + auto next_bucket_idx = next(bucket_idx); + while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) { + auto& next_bucket = at(m_buckets, next_bucket_idx); + at(m_buckets, bucket_idx) = {dist_dec(next_bucket.m_dist_and_fingerprint), next_bucket.m_value_idx}; + bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx)); + } + at(m_buckets, bucket_idx) = {}; + } + + [[nodiscard]] static constexpr auto calc_num_buckets(std::uint8_t shifts) -> std::size_t { + return (std::min)(max_bucket_count(), std::size_t{1} << (64U - shifts)); } - [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t { + [[nodiscard]] constexpr auto calc_shifts_for_size(std::size_t s) const -> std::uint8_t { auto shifts = initial_shifts; - while (shifts > 0 && static_cast(static_cast(calc_num_buckets(shifts)) * max_load_factor()) < s) { + while (shifts > 0 && static_cast(static_cast(calc_num_buckets(shifts)) * max_load_factor()) < s) { --shifts; } return shifts; @@ -1130,7 +1093,7 @@ private: if constexpr (has_reserve) { m_buckets.reserve(num_buckets); } - for (size_t i = m_buckets.size(); i < num_buckets; ++i) { + for (std::size_t i = m_buckets.size(); i < num_buckets; ++i) { m_buckets.emplace_back(); } } else { @@ -1183,15 +1146,7 @@ private: template void do_erase(value_idx_type bucket_idx, Op handle_erased_value) { auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx; - - // shift down until either empty or an element with correct spot is found - auto next_bucket_idx = next(bucket_idx); - while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) { - at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint), - at(m_buckets, next_bucket_idx).m_value_idx}; - bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx)); - } - at(m_buckets, bucket_idx) = {}; + erase_and_shift_down(bucket_idx); handle_erased_value(std::move(m_values[value_idx_to_remove])); // update m_values @@ -1201,9 +1156,7 @@ private: val = std::move(m_values.back()); // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx - auto mh = mixed_hash(get_key(val)); - bucket_idx = bucket_idx_from_hash(mh); - + bucket_idx = bucket_idx_from_hash(mixed_hash(get_key(val))); auto const values_idx_back = static_cast(m_values.size() - 1); while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) { bucket_idx = next(bucket_idx); @@ -1214,7 +1167,7 @@ private: } template - auto do_erase_key(K&& key, Op handle_erased_value) -> size_t { // NOLINT(cppcoreguidelines-missing-std-forward) + auto do_erase_key(K&& key, Op handle_erased_value) -> std::size_t { // NOLINT(cppcoreguidelines-missing-std-forward) if (empty()) { return 0; } @@ -1348,7 +1301,7 @@ private: } public: - explicit table(size_t bucket_count, + explicit table(std::size_t bucket_count, Hash const& hash = Hash(), KeyEqual const& equal = KeyEqual(), allocator_type const& alloc_or_container = allocator_type()) @@ -1367,10 +1320,10 @@ public: table() : table(0) {} - table(size_t bucket_count, allocator_type const& alloc) + table(std::size_t bucket_count, allocator_type const& alloc) : table(bucket_count, Hash(), KeyEqual(), alloc) {} - table(size_t bucket_count, Hash const& hash, allocator_type const& alloc) + table(std::size_t bucket_count, Hash const& hash, allocator_type const& alloc) : table(bucket_count, hash, KeyEqual(), alloc) {} explicit table(allocator_type const& alloc) @@ -1415,7 +1368,7 @@ public: } table(std::initializer_list ilist, - size_t bucket_count = 0, + std::size_t bucket_count = 0, Hash const& hash = Hash(), KeyEqual const& equal = KeyEqual(), allocator_type const& alloc = allocator_type()) @@ -1522,15 +1475,15 @@ public: return m_values.empty(); } - [[nodiscard]] auto size() const noexcept -> size_t { + [[nodiscard]] auto size() const noexcept -> std::size_t { return m_values.size(); } - [[nodiscard]] static constexpr auto max_size() noexcept -> size_t { - if constexpr ((std::numeric_limits::max)() == (std::numeric_limits::max)()) { - return size_t{1} << (sizeof(value_idx_type) * 8 - 1); + [[nodiscard]] static constexpr auto max_size() noexcept -> std::size_t { + if constexpr ((std::numeric_limits::max)() == (std::numeric_limits::max)()) { + return std::size_t{1} << (sizeof(value_idx_type) * 8 - 1); } else { - return size_t{1} << (sizeof(value_idx_type) * 8); + return std::size_t{1} << (sizeof(value_idx_type) * 8); } } @@ -1787,6 +1740,59 @@ public: return do_try_emplace(std::forward(key), std::forward(args)...).first; } + // Replaces the key at the given iterator with new_key. This does not change any other data in the underlying table, so + // all iterators and references remain valid. However, this operation can fail if new_key already exists in the table. + // In that case, returns {iterator to the already existing new_key, false} and no change is made. + // + // In the case of a set, this effectively removes the old key and inserts the new key at the same spot, which is more + // efficient than removing the old key and inserting the new key because it avoids repositioning the last element. + template + auto replace_key(iterator it, K&& new_key) -> std::pair { + auto const new_key_hash = mixed_hash(new_key); + + // first, check if new_key already exists and return if so + auto dist_and_fingerprint = dist_and_fingerprint_from_hash(new_key_hash); + auto bucket_idx = bucket_idx_from_hash(new_key_hash); + while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) { + auto const& bucket = at(m_buckets, bucket_idx); + if (dist_and_fingerprint == bucket.m_dist_and_fingerprint && + m_equal(new_key, get_key(m_values[bucket.m_value_idx]))) { + return {begin() + static_cast(bucket.m_value_idx), false}; + } + dist_and_fingerprint = dist_inc(dist_and_fingerprint); + bucket_idx = next(bucket_idx); + } + + // const_cast is needed because iterator for the set is always const, so adding another get_key overload is not + // feasible. + auto& target_key = const_cast(get_key(*it)); + auto const old_key_bucket_idx = bucket_idx_from_hash(mixed_hash(target_key)); + + // Replace the key before doing any bucket changes. If it throws, no harm done, we are still in a valid state as we + // have not modified any buckets yet. + target_key = std::forward(new_key); + + auto const value_idx = static_cast(it - begin()); + + // Find the bucket containing our value_idx. It's guaranteed we find it, so no other stopping condition needed. + bucket_idx = old_key_bucket_idx; + while (value_idx != at(m_buckets, bucket_idx).m_value_idx) { + bucket_idx = next(bucket_idx); + } + erase_and_shift_down(bucket_idx); + + // place the new bucket + dist_and_fingerprint = dist_and_fingerprint_from_hash(new_key_hash); + bucket_idx = bucket_idx_from_hash(new_key_hash); + while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) { + dist_and_fingerprint = dist_inc(dist_and_fingerprint); + bucket_idx = next(bucket_idx); + } + place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx); + + return {it, true}; + } + auto erase(iterator it) -> iterator { auto hash = mixed_hash(get_key(*it)); auto bucket_idx = bucket_idx_from_hash(hash); @@ -1851,7 +1857,7 @@ public: return begin() + idx_first; } - auto erase(Key const& key) -> size_t { + auto erase(Key const& key) -> std::size_t { return do_erase_key(key, [](value_type const& /*unused*/) { }); } @@ -1865,7 +1871,7 @@ public: } template , bool> = true> - auto erase(K&& key) -> size_t { + auto erase(K&& key) -> std::size_t { return do_erase_key(std::forward(key), [](value_type const& /*unused*/) { }); } @@ -1934,12 +1940,12 @@ public: return try_emplace(std::forward(key)).first->second; } - auto count(Key const& key) const -> size_t { + auto count(Key const& key) const -> std::size_t { return find(key) == end() ? 0 : 1; } template , bool> = true> - auto count(K const& key) const -> size_t { + auto count(K const& key) const -> std::size_t { return find(key) == end() ? 0 : 1; } @@ -1994,11 +2000,11 @@ public: // bucket interface /////////////////////////////////////////////////////// - auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard) + auto bucket_count() const noexcept -> std::size_t { // NOLINT(modernize-use-nodiscard) return m_buckets.size(); } - static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard) + static constexpr auto max_bucket_count() noexcept -> std::size_t { // NOLINT(modernize-use-nodiscard) return max_size(); } @@ -2019,7 +2025,7 @@ public: } } - void rehash(size_t count) { + void rehash(std::size_t count) { count = (std::min)(count, max_size()); auto shifts = calc_shifts_for_size((std::max)(count, size())); if (shifts != m_shifts) { @@ -2031,7 +2037,7 @@ public: } } - void reserve(size_t capa) { + void reserve(std::size_t capa) { capa = (std::min)(capa, max_size()); if constexpr (has_reserve) { // std::deque doesn't have reserve(). Make sure we only call when available @@ -2094,49 +2100,49 @@ public: } // namespace detail -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class AllocatorOrContainer = std::allocator>, - class Bucket = bucket_type::standard, - class BucketContainer = detail::default_container_t> +template , + class KeyEqual = std::equal_to, + class AllocatorOrContainer = std::allocator>, + class Bucket = bucket_type::standard, + class BucketContainer = detail::default_container_t> using map = detail::table; -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class AllocatorOrContainer = std::allocator>, - class Bucket = bucket_type::standard, - class BucketContainer = detail::default_container_t> +template , + class KeyEqual = std::equal_to, + class AllocatorOrContainer = std::allocator>, + class Bucket = bucket_type::standard, + class BucketContainer = detail::default_container_t> using segmented_map = detail::table; -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class AllocatorOrContainer = std::allocator, - class Bucket = bucket_type::standard, - class BucketContainer = detail::default_container_t> +template , + class KeyEqual = std::equal_to, + class AllocatorOrContainer = std::allocator, + class Bucket = bucket_type::standard, + class BucketContainer = detail::default_container_t> using set = detail::table; -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class AllocatorOrContainer = std::allocator, - class Bucket = bucket_type::standard, - class BucketContainer = detail::default_container_t> +template , + class KeyEqual = std::equal_to, + class AllocatorOrContainer = std::allocator, + class Bucket = bucket_type::standard, + class BucketContainer = detail::default_container_t> using segmented_set = detail::table; # if defined(ANKERL_UNORDERED_DENSE_PMR) namespace pmr { -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class Bucket = bucket_type::standard> +template , + class KeyEqual = std::equal_to, + class Bucket = bucket_type::standard> using map = detail::table; -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class Bucket = bucket_type::standard> +template , + class KeyEqual = std::equal_to, + class Bucket = bucket_type::standard> using segmented_map = detail::table; -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class Bucket = bucket_type::standard> +template , class KeyEqual = std::equal_to, class Bucket = bucket_type::standard> using set = detail::table; -ANKERL_UNORDERED_DENSE_EXPORT template , - class KeyEqual = std::equal_to, - class Bucket = bucket_type::standard> +template , class KeyEqual = std::equal_to, class Bucket = bucket_type::standard> using segmented_set = detail::table +template // NOLINTNEXTLINE(cert-dcl58-cpp) auto erase_if( ankerl::unordered_dense::detail::table& map, - Pred pred) -> size_t { + Pred pred) -> std::size_t { using map_t = ankerl::unordered_dense::detail:: table; diff --git a/gdbsupport/unordered_map.h b/gdbsupport/unordered_map.h index 10c0c512cb9..60644030953 100644 --- a/gdbsupport/unordered_map.h +++ b/gdbsupport/unordered_map.h @@ -18,7 +18,7 @@ #ifndef GDBSUPPORT_UNORDERED_MAP_H #define GDBSUPPORT_UNORDERED_MAP_H -#include "unordered_dense.h" +#include "unordered_dense/unordered_dense.h" namespace gdb { diff --git a/gdbsupport/unordered_set.h b/gdbsupport/unordered_set.h index aaa4579ffd0..3020fd70e01 100644 --- a/gdbsupport/unordered_set.h +++ b/gdbsupport/unordered_set.h @@ -18,7 +18,7 @@ #ifndef GDBSUPPORT_UNORDERED_SET_H #define GDBSUPPORT_UNORDERED_SET_H -#include "unordered_dense.h" +#include "unordered_dense/unordered_dense.h" namespace gdb {