# void std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >::_M_construct<bool>(char const*, size_t)
# and wide char version
_ZNSt7__cxx1112basic_stringI[cw]St11char_traitsI[cw]ESaI[cw]EE12_M_constructILb[01]EEEvPK[cw][jmy];
+
} GLIBCXX_3.4.33;
+# GCC 16.1.0
+GLIBCXX_3.4.35 {
+
+ _ZNSt8__detail11__wait_implEPKvRNS_16__wait_args_baseE;
+ _ZNSt8__detail13__notify_implEPKvbRKNS_16__wait_args_baseE;
+ _ZNSt8__detail17__wait_until_implEPKvRNS_16__wait_args_baseERKNSt6chrono8durationI[lx]St5ratioIL[lx]1EL[lx]1000000000EEEE;
+ _ZNSt8__detail11__wait_args22_M_load_proxy_wait_valEPKv;
+
+} GLIBCXX_3.4.34;
+
# Symbols in the support library (libsupc++) have their own tag.
CXXABI_1.3 {
#include <bits/atomic_wait.h>
#if __glibcxx_atomic_wait
-#include <bits/functional_hash.h>
#include <bits/this_thread_sleep.h>
#include <bits/chrono.h>
#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
#define _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
- // returns true if wait ended before timeout
- bool
- __platform_wait_until(const __platform_wait_t* __addr,
- __platform_wait_t __old,
- const __wait_clock_t::time_point& __atime) noexcept
- {
- auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
- auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
-
- struct timespec __rt =
- {
- static_cast<std::time_t>(__s.time_since_epoch().count()),
- static_cast<long>(__ns.count())
- };
-
- auto __e = syscall (SYS_futex, __addr,
- static_cast<int>(__futex_wait_flags::__wait_bitset_private),
- __old, &__rt, nullptr,
- static_cast<int>(__futex_wait_flags::__bitset_match_any));
- if (__e)
- {
- if (errno == ETIMEDOUT)
- return false;
- if (errno != EINTR && errno != EAGAIN)
- __throw_system_error(errno);
- }
- return true;
- }
#else
// define _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT and implement __platform_wait_until
// if there is a more efficient primitive supported by the platform
// (e.g. __ulock_wait) which is better than pthread_cond_clockwait.
#endif // ! HAVE_LINUX_FUTEX
-#ifdef _GLIBCXX_HAS_GTHREADS
- // Returns true if wait ended before timeout.
- inline bool
- __cond_wait_until(__condvar& __cv, mutex& __mx,
- const __wait_clock_t::time_point& __atime)
- {
- auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
- auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
-
- __gthread_time_t __ts =
- {
- static_cast<std::time_t>(__s.time_since_epoch().count()),
- static_cast<long>(__ns.count())
- };
-
-#ifdef _GLIBCXX_USE_PTHREAD_COND_CLOCKWAIT
- if constexpr (is_same_v<chrono::steady_clock, __wait_clock_t>)
- __cv.wait_until(__mx, CLOCK_MONOTONIC, __ts);
- else
-#endif
- __cv.wait_until(__mx, __ts);
- return __wait_clock_t::now() < __atime;
- }
-#endif // _GLIBCXX_HAS_GTHREADS
-
- inline __wait_result_type
- __spin_until_impl(const __platform_wait_t* __addr,
- const __wait_args_base& __args,
- const __wait_clock_t::time_point& __deadline)
- {
- auto __t0 = __wait_clock_t::now();
- using namespace literals::chrono_literals;
-
- __platform_wait_t __val{};
- auto __now = __wait_clock_t::now();
- for (; __now < __deadline; __now = __wait_clock_t::now())
- {
- auto __elapsed = __now - __t0;
-#ifndef _GLIBCXX_NO_SLEEP
- if (__elapsed > 128ms)
- this_thread::sleep_for(64ms);
- else if (__elapsed > 64us)
- this_thread::sleep_for(__elapsed / 2);
- else
-#endif
- if (__elapsed > 4us)
- __thread_yield();
- else if (auto __res = __detail::__spin_impl(__addr, __args); __res.first)
- return __res;
-
- __atomic_load(__addr, &__val, __args._M_order);
- if (__val != __args._M_old)
- return { true, __val };
- }
- return { false, __val };
- }
-
- inline __wait_result_type
- __wait_until_impl(const void* __addr, const __wait_args_base& __a,
- const __wait_clock_t::time_point& __atime)
- {
- __wait_args_base __args = __a;
- __waitable_state* __state = nullptr;
- const __platform_wait_t* __wait_addr;
- if (__args & __wait_flags::__proxy_wait)
- {
- __state = &__waitable_state::_S_state_for(__addr);
- __wait_addr = &__state->_M_ver;
- __atomic_load(__wait_addr, &__args._M_old, __args._M_order);
- }
- else
- __wait_addr = static_cast<const __platform_wait_t*>(__addr);
-
- if (__args & __wait_flags::__do_spin)
- {
- auto __res = __detail::__spin_until_impl(__wait_addr, __args, __atime);
- if (__res.first)
- return __res;
- if (__args & __wait_flags::__spin_only)
- return __res;
- }
-
- auto __tracker = __waitable_state::_S_track(__state, __args, __addr);
-
-#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
- if (__platform_wait_until(__wait_addr, __args._M_old, __atime))
- return { true, __args._M_old };
- else
- return { false, __args._M_old };
-#else
- __platform_wait_t __val{};
- __atomic_load(__wait_addr, &__val, __args._M_order);
- if (__val == __args._M_old)
- {
- if (!__state)
- __state = &__waitable_state::_S_state_for(__addr);
- lock_guard<mutex> __l{ __state->_M_mtx };
- __atomic_load(__wait_addr, &__val, __args._M_order);
- if (__val == __args._M_old
- && __cond_wait_until(__state->_M_cv, __state->_M_mtx, __atime))
- return { true, __val };
- }
- return { false, __val };
-#endif
- }
+ __wait_result_type
+ __wait_until_impl(const void* __addr, __wait_args_base& __args,
+ const __wait_clock_t::duration& __atime);
// Returns {true, val} if wait ended before a timeout.
template<typename _Clock, typename _Dur>
__wait_result_type
- __wait_until(const void* __addr, const __wait_args_base& __args,
+ __wait_until(const void* __addr, __wait_args_base& __args,
const chrono::time_point<_Clock, _Dur>& __atime) noexcept
{
auto __at = __detail::__to_wait_clock(__atime);
- auto __res = __detail::__wait_until_impl(__addr, __args, __at);
+ auto __res = __detail::__wait_until_impl(__addr, __args,
+ __at.time_since_epoch());
if constexpr (!is_same_v<__wait_clock_t, _Clock>)
if (!__res.first)
// Returns {true, val} if wait ended before a timeout.
template<typename _Rep, typename _Period>
__wait_result_type
- __wait_for(const void* __addr, const __wait_args_base& __args,
+ __wait_for(const void* __addr, __wait_args_base& __args,
const chrono::duration<_Rep, _Period>& __rtime) noexcept
{
if (!__rtime.count())
{
- __wait_args_base __a = __args;
// no rtime supplied, just spin a bit
- __a._M_flags |= __wait_flags::__do_spin | __wait_flags::__spin_only;
- return __detail::__wait_impl(__addr, __a);
+ __args._M_flags |= __wait_flags::__do_spin | __wait_flags::__spin_only;
+ return __detail::__wait_impl(__addr, __args);
}
auto const __reltime = chrono::ceil<__wait_clock_t::duration>(__rtime);
bool __bare_wait = false) noexcept
{
__detail::__wait_args __args{ __addr, __bare_wait };
- _Tp __val = __vfn();
+ _Tp __val = __args._M_prep_for_wait_on(__addr, __vfn);
while (!__pred(__val))
{
auto __res = __detail::__wait_until(__addr, __args, __atime);
if (!__res.first)
// timed out
return __res.first; // C++26 will also return last observed __val
- __val = __vfn();
+ __val = __args._M_prep_for_wait_on(__addr, __vfn);
}
return true; // C++26 will also return last observed __val
}
template<typename _Tp, typename _ValFn,
typename _Clock, typename _Dur>
bool
- __atomic_wait_address_until_v(const _Tp* __addr, _Tp&& __old, _ValFn&& __vfn,
+ __atomic_wait_address_until_v(const _Tp* __addr, _Tp&& __old,
+ _ValFn&& __vfn,
const chrono::time_point<_Clock, _Dur>& __atime,
bool __bare_wait = false) noexcept
{
auto __pfn = [&](const _Tp& __val) {
return !__detail::__atomic_eq(__old, __val);
};
- return __atomic_wait_address_until(__addr, __pfn, forward<_ValFn>(__vfn),
- __atime, __bare_wait);
+ return std::__atomic_wait_address_until(__addr, __pfn, __vfn, __atime,
+ __bare_wait);
}
template<typename _Tp,
bool __bare_wait = false) noexcept
{
__detail::__wait_args __args{ __addr, __bare_wait };
- _Tp __val = __vfn();
+ _Tp __val = __args._M_prep_for_wait_on(__addr, __vfn);
while (!__pred(__val))
{
auto __res = __detail::__wait_for(__addr, __args, __rtime);
if (!__res.first)
// timed out
return __res.first; // C++26 will also return last observed __val
- __val = __vfn();
+ __val = __args._M_prep_for_wait_on(__addr, __vfn);
}
return true; // C++26 will also return last observed __val
}
#include <bits/version.h>
#if __glibcxx_atomic_wait
-#include <cstdint>
-#include <bits/functional_hash.h>
#include <bits/gthr.h>
#include <ext/numeric_traits.h>
-#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
-# include <cerrno>
-# include <climits>
-# include <unistd.h>
-# include <syscall.h>
-# include <bits/functexcept.h>
-#endif
-
#include <bits/stl_pair.h>
-#include <bits/std_mutex.h> // std::mutex, std::__condvar
namespace std _GLIBCXX_VISIBILITY(default)
{
#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
= is_scalar_v<_Tp>
&& ((sizeof(_Tp) == sizeof(__detail::__platform_wait_t))
- && (alignof(_Tp*) >= __detail::__platform_wait_alignment));
+ && (alignof(_Tp) >= __detail::__platform_wait_alignment));
#else
= false;
#endif
namespace __detail
{
-#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
- enum class __futex_wait_flags : int
- {
-#ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
- __private_flag = 128,
-#else
- __private_flag = 0,
-#endif
- __wait = 0,
- __wake = 1,
- __wait_bitset = 9,
- __wake_bitset = 10,
- __wait_private = __wait | __private_flag,
- __wake_private = __wake | __private_flag,
- __wait_bitset_private = __wait_bitset | __private_flag,
- __wake_bitset_private = __wake_bitset | __private_flag,
- __bitset_match_any = -1
- };
-
- // If the futex *__addr is equal to __val, wait on the futex until woken.
- inline void
- __platform_wait(const int* __addr, int __val) noexcept
- {
- auto __e = syscall (SYS_futex, __addr,
- static_cast<int>(__futex_wait_flags::__wait_private),
- __val, nullptr);
- if (!__e || errno == EAGAIN)
- return;
- if (errno != EINTR)
- __throw_system_error(errno);
- }
-
- // Wake threads waiting on the futex *__addr.
- inline void
- __platform_notify(const int* __addr, bool __all) noexcept
- {
- syscall (SYS_futex, __addr,
- static_cast<int>(__futex_wait_flags::__wake_private),
- __all ? INT_MAX : 1);
- }
-#endif
-
inline void
__thread_yield() noexcept
{
#endif
}
- inline constexpr auto __atomic_spin_count_relax = 12;
- inline constexpr auto __atomic_spin_count = 16;
-
// return true if equal
template<typename _Tp>
inline bool
return __builtin_memcmp(&__a, &__b, sizeof(_Tp)) == 0;
}
- struct __wait_args_base;
-
- // The state used by atomic waiting and notifying functions.
- struct __waitable_state
- {
- // Don't use std::hardware_destructive_interference_size here because we
- // don't want the layout of library types to depend on compiler options.
- static constexpr auto _S_align = 64;
-
- // Count of threads blocked waiting on this state.
- alignas(_S_align) __platform_wait_t _M_waiters = 0;
-
-#ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
- mutex _M_mtx;
-#endif
-
- // If we can't do a platform wait on the atomic variable itself,
- // we use this member as a proxy for the atomic variable and we
- // use this for waiting and notifying functions instead.
- alignas(_S_align) __platform_wait_t _M_ver = 0;
-
-#ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
- __condvar _M_cv;
-#endif
-
- __waitable_state() = default;
-
- void
- _M_enter_wait() noexcept
- { __atomic_fetch_add(&_M_waiters, 1, __ATOMIC_SEQ_CST); }
-
- void
- _M_leave_wait() noexcept
- { __atomic_fetch_sub(&_M_waiters, 1, __ATOMIC_RELEASE); }
-
- bool
- _M_waiting() const noexcept
- {
- __platform_wait_t __res;
- __atomic_load(&_M_waiters, &__res, __ATOMIC_SEQ_CST);
- return __res != 0;
- }
-
- static __waitable_state&
- _S_state_for(const void* __addr) noexcept
- {
- constexpr __UINTPTR_TYPE__ __ct = 16;
- static __waitable_state __w[__ct];
- auto __key = ((__UINTPTR_TYPE__)__addr >> 2) % __ct;
- return __w[__key];
- }
-
- // Return an RAII type that calls _M_enter_wait() on construction
- // and _M_leave_wait() on destruction.
- static auto
- _S_track(__waitable_state*& __state, const __wait_args_base& __args,
- const void* __addr) noexcept;
- };
-
enum class __wait_flags : __UINT_LEAST32_TYPE__
{
__abi_version = 0,
__wait_flags _M_flags;
int _M_order = __ATOMIC_ACQUIRE;
__platform_wait_t _M_old = 0;
+ void* _M_wait_state = nullptr;
// Test whether _M_flags & __flags is non-zero.
bool
__wait_args(const __wait_args&) noexcept = default;
__wait_args& operator=(const __wait_args&) noexcept = default;
+ template<typename _ValFn,
+ typename _Tp = decay_t<decltype(std::declval<_ValFn&>()())>>
+ _Tp
+ _M_prep_for_wait_on(const void* __addr, _ValFn __vfn)
+ {
+ if constexpr (__platform_wait_uses_type<_Tp>)
+ {
+ _Tp __val = __vfn();
+ // If the wait is not proxied, set the value that we're waiting
+ // to change.
+ _M_old = __builtin_bit_cast(__platform_wait_t, __val);
+ return __val;
+ }
+ else
+ {
+ // Otherwise, it's a proxy wait and the proxy's _M_ver is used.
+ // This load must happen before the one done by __vfn().
+ _M_load_proxy_wait_val(__addr);
+ return __vfn();
+ }
+ }
+
private:
+ // Populates _M_wait_state and _M_old from the proxy for __addr.
+ void
+ _M_load_proxy_wait_val(const void* __addr);
+
template<typename _Tp>
static constexpr __wait_flags
_S_flags_for(const _Tp*, bool __bare_wait) noexcept
__res |= __proxy_wait;
return __res;
}
-
- // XXX what is this for? It's never used.
- template<typename _Tp>
- static int
- _S_memory_order_for(const _Tp*, int __order) noexcept
- {
- if constexpr (__platform_wait_uses_type<_Tp>)
- return __order;
- return __ATOMIC_ACQUIRE;
- }
};
- inline auto
- __waitable_state::_S_track(__waitable_state*& __state,
- const __wait_args_base& __args,
- const void* __addr) noexcept
- {
- struct _Tracker
- {
- _Tracker() noexcept : _M_st(nullptr) { }
-
- [[__gnu__::__nonnull__]]
- explicit
- _Tracker(__waitable_state* __st) noexcept
- : _M_st(__st)
- { __st->_M_enter_wait(); }
-
- _Tracker(const _Tracker&) = delete;
- _Tracker& operator=(const _Tracker&) = delete;
-
- ~_Tracker() { if (_M_st) _M_st->_M_leave_wait(); }
-
- __waitable_state* _M_st;
- };
-
- if (__args & __wait_flags::__track_contention)
- {
- // Caller does not externally track contention,
- // so we want to increment+decrement __state->_M_waiters
-
- // First make sure we have a waitable state for the address.
- if (!__state)
- __state = &__waitable_state::_S_state_for(__addr);
-
- // This object will increment the number of waiters and
- // decrement it again on destruction.
- return _Tracker{__state};
- }
- return _Tracker{}; // For bare waits caller tracks waiters.
- }
-
using __wait_result_type = pair<bool, __platform_wait_t>;
- inline __wait_result_type
- __spin_impl(const __platform_wait_t* __addr, const __wait_args_base& __args)
- {
- __platform_wait_t __val;
- for (auto __i = 0; __i < __atomic_spin_count; ++__i)
- {
- __atomic_load(__addr, &__val, __args._M_order);
- if (__val != __args._M_old)
- return { true, __val };
- if (__i < __atomic_spin_count_relax)
- __detail::__thread_relax();
- else
- __detail::__thread_yield();
- }
- return { false, __val };
- }
+ __wait_result_type
+ __wait_impl(const void* __addr, __wait_args_base&);
- inline __wait_result_type
- __wait_impl(const void* __addr, const __wait_args_base& __a)
- {
- __wait_args_base __args = __a;
- __waitable_state* __state = nullptr;
-
- const __platform_wait_t* __wait_addr;
- if (__args & __wait_flags::__proxy_wait)
- {
- __state = &__waitable_state::_S_state_for(__addr);
- __wait_addr = &__state->_M_ver;
- __atomic_load(__wait_addr, &__args._M_old, __args._M_order);
- }
- else
- __wait_addr = static_cast<const __platform_wait_t*>(__addr);
-
- if (__args & __wait_flags::__do_spin)
- {
- auto __res = __detail::__spin_impl(__wait_addr, __args);
- if (__res.first)
- return __res;
- if (__args & __wait_flags::__spin_only)
- return __res;
- }
-
- auto __tracker = __waitable_state::_S_track(__state, __args, __addr);
-
-#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
- __platform_wait(__wait_addr, __args._M_old);
- return { false, __args._M_old };
-#else
- __platform_wait_t __val;
- __atomic_load(__wait_addr, &__val, __args._M_order);
- if (__val == __args._M_old)
- {
- if (!__state)
- __state = &__waitable_state::_S_state_for(__addr);
- lock_guard<mutex> __l{ __state->_M_mtx };
- __atomic_load(__wait_addr, &__val, __args._M_order);
- if (__val == __args._M_old)
- __state->_M_cv.wait(__state->_M_mtx);
- }
- return { false, __val };
-#endif
- }
-
- inline void
- __notify_impl(const void* __addr, [[maybe_unused]] bool __all,
- const __wait_args_base& __args)
- {
- __waitable_state* __state = nullptr;
-
- const __platform_wait_t* __wait_addr;
- if (__args & __wait_flags::__proxy_wait)
- {
- __state = &__waitable_state::_S_state_for(__addr);
- // Waiting for *__addr is actually done on the proxy's _M_ver.
- __wait_addr = &__state->_M_ver;
- __atomic_fetch_add(&__state->_M_ver, 1, __ATOMIC_RELAXED);
- // Because the proxy might be shared by several waiters waiting
- // on different atomic variables, we need to wake them all so
- // they can re-evaluate their conditions to see if they should
- // stop waiting or should wait again.
- __all = true;
- }
- else // Use the atomic variable's own address.
- __wait_addr = static_cast<const __platform_wait_t*>(__addr);
-
- if (__args & __wait_flags::__track_contention)
- {
- if (!__state)
- __state = &__waitable_state::_S_state_for(__addr);
- if (!__state->_M_waiting())
- return;
- }
-
-#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
- __platform_notify(__wait_addr, __all);
-#else
- if (!__state)
- __state = &__waitable_state::_S_state_for(__addr);
- lock_guard<mutex> __l{ __state->_M_mtx };
- __state->_M_cv.notify_all();
-#endif
- }
+ void
+ __notify_impl(const void* __addr, bool __all, const __wait_args_base&);
} // namespace __detail
// Wait on __addr while __pred(__vfn()) is false.
bool __bare_wait = false) noexcept
{
__detail::__wait_args __args{ __addr, __bare_wait };
- _Tp __val = __vfn();
+ _Tp __val = __args._M_prep_for_wait_on(__addr, __vfn);
while (!__pred(__val))
{
- // If the wait is not proxied, set the value that we're waiting
- // to change.
- if constexpr (__platform_wait_uses_type<_Tp>)
- __args._M_old = __builtin_bit_cast(__detail::__platform_wait_t,
- __val);
- // Otherwise, it's a proxy wait and the proxy's _M_ver is used.
-
__detail::__wait_impl(__addr, __args);
- __val = __vfn();
+ __val = __args._M_prep_for_wait_on(__addr, __vfn);
}
// C++26 will return __val
}
{
auto __pfn = [&](const _Tp& __val)
{ return !__detail::__atomic_eq(__old, __val); };
- __atomic_wait_address(__addr, __pfn, forward<_ValFn>(__vfn));
+ std::__atomic_wait_address(__addr, __pfn, forward<_ValFn>(__vfn));
}
template<typename _Tp>
__detail::__wait_args __args{ __addr, __bare_wait };
__detail::__notify_impl(__addr, __all, __args);
}
+
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif // __glibcxx_atomic_wait
inst_sources =
endif
-sources = tzdb.cc format.cc
+sources = tzdb.cc format.cc atomic.cc
vpath % $(top_srcdir)/src/c++20
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libc__20convenience_la_LIBADD =
-am__objects_1 = tzdb.lo format.lo
+am__objects_1 = tzdb.lo format.lo atomic.lo
@ENABLE_EXTERN_TEMPLATE_TRUE@am__objects_2 = sstream-inst.lo
@GLIBCXX_HOSTED_TRUE@am_libc__20convenience_la_OBJECTS = \
@GLIBCXX_HOSTED_TRUE@ $(am__objects_1) $(am__objects_2)
@ENABLE_EXTERN_TEMPLATE_TRUE@inst_sources = \
@ENABLE_EXTERN_TEMPLATE_TRUE@ sstream-inst.cc
-sources = tzdb.cc format.cc
+sources = tzdb.cc format.cc atomic.cc
@GLIBCXX_HOSTED_FALSE@libc__20convenience_la_SOURCES =
@GLIBCXX_HOSTED_TRUE@libc__20convenience_la_SOURCES = $(sources) $(inst_sources)
--- /dev/null
+// Definitions for <atomic> wait/notify -*- C++ -*-
+
+// Copyright (C) 2020-2025 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+#include <bits/version.h>
+
+#if __glibcxx_atomic_wait
+#include <atomic>
+#include <bits/atomic_timed_wait.h>
+#include <bits/functional_hash.h>
+#include <cstdint>
+#include <bits/std_mutex.h> // std::mutex, std::__condvar
+
+#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
+# include <cerrno>
+# include <climits>
+# include <unistd.h>
+# include <syscall.h>
+# include <bits/functexcept.h>
+# include <sys/time.h>
+#endif
+
+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
+# ifndef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
+// __waitable_state assumes that we consistently use the same implementation
+// (i.e. futex vs mutex+condvar) for timed and untimed waiting.
+# error "This configuration is not currently supported"
+# endif
+#endif
+
+namespace std
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+namespace __detail
+{
+namespace
+{
+#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
+ enum class __futex_wait_flags : int
+ {
+#ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
+ __private_flag = 128,
+#else
+ __private_flag = 0,
+#endif
+ __wait = 0,
+ __wake = 1,
+ __wait_bitset = 9,
+ __wake_bitset = 10,
+ __wait_private = __wait | __private_flag,
+ __wake_private = __wake | __private_flag,
+ __wait_bitset_private = __wait_bitset | __private_flag,
+ __wake_bitset_private = __wake_bitset | __private_flag,
+ __bitset_match_any = -1
+ };
+
+ void
+ __platform_wait(const int* __addr, int __val) noexcept
+ {
+ auto __e = syscall (SYS_futex, __addr,
+ static_cast<int>(__futex_wait_flags::__wait_private),
+ __val, nullptr);
+ if (!__e || errno == EAGAIN)
+ return;
+ if (errno != EINTR)
+ __throw_system_error(errno);
+ }
+
+ void
+ __platform_notify(const int* __addr, bool __all) noexcept
+ {
+ syscall (SYS_futex, __addr,
+ static_cast<int>(__futex_wait_flags::__wake_private),
+ __all ? INT_MAX : 1);
+ }
+#endif
+
+ // The state used by atomic waiting and notifying functions.
+ struct __waitable_state
+ {
+ // Don't use std::hardware_destructive_interference_size here because we
+ // don't want the layout of library types to depend on compiler options.
+ static constexpr auto _S_align = 64;
+
+ // Count of threads blocked waiting on this state.
+ alignas(_S_align) __platform_wait_t _M_waiters = 0;
+
+#ifndef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
+ mutex _M_mtx;
+
+ // This type meets the Cpp17BasicLockable requirements.
+ void lock() { _M_mtx.lock(); }
+ void unlock() { _M_mtx.unlock(); }
+#else
+ void lock() { }
+ void unlock() { }
+#endif
+
+ // If we can't do a platform wait on the atomic variable itself,
+ // we use this member as a proxy for the atomic variable and we
+ // use this for waiting and notifying functions instead.
+ alignas(_S_align) __platform_wait_t _M_ver = 0;
+
+#ifndef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
+ __condvar _M_cv;
+#endif
+
+ __waitable_state() = default;
+
+ void
+ _M_enter_wait() noexcept
+ { __atomic_fetch_add(&_M_waiters, 1, __ATOMIC_SEQ_CST); }
+
+ void
+ _M_leave_wait() noexcept
+ { __atomic_fetch_sub(&_M_waiters, 1, __ATOMIC_RELEASE); }
+
+ bool
+ _M_waiting() const noexcept
+ {
+ __platform_wait_t __res;
+ __atomic_load(&_M_waiters, &__res, __ATOMIC_SEQ_CST);
+ return __res != 0;
+ }
+
+ static __waitable_state&
+ _S_state_for(const void* __addr) noexcept
+ {
+ constexpr __UINTPTR_TYPE__ __ct = 16;
+ static __waitable_state __w[__ct];
+ auto __key = ((__UINTPTR_TYPE__)__addr >> 2) % __ct;
+ return __w[__key];
+ }
+ };
+
+ // Scope-based contention tracking.
+ struct scoped_wait
+ {
+ // pre: if track_contention is in flags, then args._M_wait_state != nullptr
+ explicit
+ scoped_wait(const __wait_args_base& args) : _M_state(nullptr)
+ {
+ if (args & __wait_flags::__track_contention)
+ {
+ _M_state = static_cast<__waitable_state*>(args._M_wait_state);
+ _M_state->_M_enter_wait();
+ }
+ }
+
+ ~scoped_wait()
+ {
+ if (_M_state)
+ _M_state->_M_leave_wait();
+ }
+
+ scoped_wait(scoped_wait&&) = delete;
+
+ __waitable_state* _M_state;
+ };
+
+ // Scoped lock type
+ struct waiter_lock
+ {
+ // pre: args._M_state != nullptr
+ explicit
+ waiter_lock(const __wait_args_base& args)
+ : _M_state(*static_cast<__waitable_state*>(args._M_wait_state)),
+ _M_track_contention(args & __wait_flags::__track_contention)
+ {
+ _M_state.lock();
+ if (_M_track_contention)
+ _M_state._M_enter_wait();
+ }
+
+ waiter_lock(waiter_lock&&) = delete;
+
+ ~waiter_lock()
+ {
+ if (_M_track_contention)
+ _M_state._M_leave_wait();
+ _M_state.unlock();
+ }
+
+ __waitable_state& _M_state;
+ bool _M_track_contention;
+ };
+
+ constexpr auto __atomic_spin_count_relax = 12;
+ constexpr auto __atomic_spin_count = 16;
+
+ __wait_result_type
+ __spin_impl(const __platform_wait_t* __addr, const __wait_args_base& __args)
+ {
+ __platform_wait_t __val;
+ for (auto __i = 0; __i < __atomic_spin_count; ++__i)
+ {
+ __atomic_load(__addr, &__val, __args._M_order);
+ if (__val != __args._M_old)
+ return { true, __val };
+ if (__i < __atomic_spin_count_relax)
+ __thread_relax();
+ else
+ __thread_yield();
+ }
+ return { false, __val };
+ }
+
+ inline __waitable_state*
+ set_wait_state(const void* addr, __wait_args_base& args)
+ {
+ if (args._M_wait_state == nullptr)
+ args._M_wait_state = &__waitable_state::_S_state_for(addr);
+ return static_cast<__waitable_state*>(args._M_wait_state);
+ }
+
+} // namespace
+
+// Called for a proxy wait
+void
+__wait_args::_M_load_proxy_wait_val(const void* addr)
+{
+ // __glibcxx_assert( *this & __wait_flags::__proxy_wait );
+
+ // We always need a waitable state for proxy waits.
+ auto state = set_wait_state(addr, *this);
+
+ // Read the value of the _M_ver counter.
+ __atomic_load(&state->_M_ver, &_M_old, __ATOMIC_ACQUIRE);
+}
+
+__wait_result_type
+__wait_impl(const void* __addr, __wait_args_base& __args)
+{
+ auto __state = static_cast<__waitable_state*>(__args._M_wait_state);
+
+ const __platform_wait_t* __wait_addr;
+
+ if (__args & __wait_flags::__proxy_wait)
+ __wait_addr = &__state->_M_ver;
+ else
+ __wait_addr = static_cast<const __platform_wait_t*>(__addr);
+
+ if (__args & __wait_flags::__do_spin)
+ {
+ auto __res = __detail::__spin_impl(__wait_addr, __args);
+ if (__res.first)
+ return __res;
+ if (__args & __wait_flags::__spin_only)
+ return __res;
+ }
+
+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
+ if (__args & __wait_flags::__track_contention)
+ set_wait_state(__addr, __args);
+ scoped_wait s(__args);
+ __platform_wait(__wait_addr, __args._M_old);
+ return { false, __args._M_old };
+#else
+ waiter_lock l(__args);
+ __platform_wait_t __val;
+ __atomic_load(__wait_addr, &__val, __args._M_order);
+ if (__val == __args._M_old)
+ __state->_M_cv.wait(__state->_M_mtx);
+ return { false, __val };
+#endif
+}
+
+void
+__notify_impl(const void* __addr, [[maybe_unused]] bool __all,
+ const __wait_args_base& __args)
+{
+ auto __state = static_cast<__waitable_state*>(__args._M_wait_state);
+ if (!__state)
+ __state = &__waitable_state::_S_state_for(__addr);
+
+ [[maybe_unused]] const __platform_wait_t* __wait_addr;
+
+ // Lock mutex so that proxied waiters cannot race with incrementing _M_ver
+ // and see the old value, then sleep after the increment and notify_all().
+ lock_guard __l{ *__state };
+
+ if (__args & __wait_flags::__proxy_wait)
+ {
+ // Waiting for *__addr is actually done on the proxy's _M_ver.
+ __wait_addr = &__state->_M_ver;
+
+ // Increment _M_ver so that waiting threads see something changed.
+ // This has to be atomic because the load in _M_load_proxy_wait_val
+ // is done without the mutex locked.
+ __atomic_fetch_add(&__state->_M_ver, 1, __ATOMIC_RELEASE);
+
+ // Because the proxy might be shared by several waiters waiting
+ // on different atomic variables, we need to wake them all so
+ // they can re-evaluate their conditions to see if they should
+ // stop waiting or should wait again.
+ __all = true;
+ }
+ else // Use the atomic variable's own address.
+ __wait_addr = static_cast<const __platform_wait_t*>(__addr);
+
+ if (__args & __wait_flags::__track_contention)
+ {
+ if (!__state->_M_waiting())
+ return;
+ }
+
+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
+ __platform_notify(__wait_addr, __all);
+#else
+ __state->_M_cv.notify_all();
+#endif
+}
+
+// Timed atomic waiting functions
+
+namespace
+{
+#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
+// returns true if wait ended before timeout
+bool
+__platform_wait_until(const __platform_wait_t* __addr,
+ __platform_wait_t __old,
+ const __wait_clock_t::time_point& __atime) noexcept
+{
+ auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+ auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+ struct timespec __rt =
+ {
+ static_cast<std::time_t>(__s.time_since_epoch().count()),
+ static_cast<long>(__ns.count())
+ };
+
+ if (syscall (SYS_futex, __addr,
+ static_cast<int>(__futex_wait_flags::__wait_bitset_private),
+ __old, &__rt, nullptr,
+ static_cast<int>(__futex_wait_flags::__bitset_match_any)))
+ {
+ if (errno == ETIMEDOUT)
+ return false;
+ if (errno != EINTR && errno != EAGAIN)
+ __throw_system_error(errno);
+ }
+ return true;
+}
+#endif // HAVE_LINUX_FUTEX
+
+#ifndef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
+bool
+__cond_wait_until(__condvar& __cv, mutex& __mx,
+ const __wait_clock_t::time_point& __atime)
+{
+ auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+ auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast<std::time_t>(__s.time_since_epoch().count()),
+ static_cast<long>(__ns.count())
+ };
+
+#ifdef _GLIBCXX_USE_PTHREAD_COND_CLOCKWAIT
+ if constexpr (is_same_v<chrono::steady_clock, __wait_clock_t>)
+ __cv.wait_until(__mx, CLOCK_MONOTONIC, __ts);
+ else
+#endif
+ __cv.wait_until(__mx, __ts);
+ return __wait_clock_t::now() < __atime;
+}
+#endif // ! HAVE_PLATFORM_TIMED_WAIT
+
+__wait_result_type
+__spin_until_impl(const __platform_wait_t* __addr,
+ const __wait_args_base& __args,
+ const __wait_clock_t::time_point& __deadline)
+{
+ auto __t0 = __wait_clock_t::now();
+ using namespace literals::chrono_literals;
+
+ __platform_wait_t __val{};
+ auto __now = __wait_clock_t::now();
+ for (; __now < __deadline; __now = __wait_clock_t::now())
+ {
+ auto __elapsed = __now - __t0;
+#ifndef _GLIBCXX_NO_SLEEP
+ if (__elapsed > 128ms)
+ this_thread::sleep_for(64ms);
+ else if (__elapsed > 64us)
+ this_thread::sleep_for(__elapsed / 2);
+ else
+#endif
+ if (__elapsed > 4us)
+ __thread_yield();
+ else if (auto __res = __detail::__spin_impl(__addr, __args); __res.first)
+ return __res;
+
+ __atomic_load(__addr, &__val, __args._M_order);
+ if (__val != __args._M_old)
+ return { true, __val };
+ }
+ return { false, __val };
+}
+} // namespace
+
+__wait_result_type
+__wait_until_impl(const void* __addr, __wait_args_base& __args,
+ const __wait_clock_t::duration& __time)
+{
+ const __wait_clock_t::time_point __atime(__time);
+ auto __state = static_cast<__waitable_state*>(__args._M_wait_state);
+ const __platform_wait_t* __wait_addr;
+ if (__args & __wait_flags::__proxy_wait)
+ __wait_addr = &__state->_M_ver;
+ else
+ __wait_addr = static_cast<const __platform_wait_t*>(__addr);
+
+ if (__args & __wait_flags::__do_spin)
+ {
+ auto __res = __detail::__spin_until_impl(__wait_addr, __args, __atime);
+ if (__res.first)
+ return __res;
+ if (__args & __wait_flags::__spin_only)
+ return __res;
+ }
+
+#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT
+ if (__args & __wait_flags::__track_contention)
+ set_wait_state(__addr, __args);
+ scoped_wait s(__args);
+ if (__platform_wait_until(__wait_addr, __args._M_old, __atime))
+ return { true, __args._M_old };
+ else
+ return { false, __args._M_old };
+#else
+ waiter_lock l(__args);
+ __platform_wait_t __val;
+ __atomic_load(__wait_addr, &__val, __args._M_order);
+ if (__val == __args._M_old
+ && __cond_wait_until(__state->_M_cv, __state->_M_mtx, __atime))
+ return { true, __val };
+ return { false, __val };
+#endif
+}
+
+} // namespace __detail
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace std
+#endif
#endif
int truncate = 0;
-
-// { dg-xfail-if "PR libstdc++/99995" { c++20 } }
atom->store(0);
}
+#if 0
auto a = &std::__detail::__waitable_state::_S_state_for((void*)(atomics.a[0]));
auto b = &std::__detail::__waitable_state::_S_state_for((void*)(atomics.a[1]));
VERIFY( a == b );
+#endif
auto fut0 = std::async(std::launch::async, [&] { atomics.a[0]->wait(0); });
auto fut1 = std::async(std::launch::async, [&] { atomics.a[1]->wait(0); });
known_versions.push_back("GLIBCXX_3.4.32");
known_versions.push_back("GLIBCXX_3.4.33");
known_versions.push_back("GLIBCXX_3.4.34");
+ known_versions.push_back("GLIBCXX_3.4.35");
known_versions.push_back("GLIBCXX_LDBL_3.4.31");
known_versions.push_back("GLIBCXX_IEEE128_3.4.29");
known_versions.push_back("GLIBCXX_IEEE128_3.4.30");