// -*- C++ -*- header.
-// Copyright (C) 2015 Free Software Foundation, Inc.
+// Copyright (C) 2015-2020 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
#include <bits/c++config.h>
#include <atomic>
#include <chrono>
-#if !defined(_GLIBCXX_HAVE_LINUX_FUTEX)
+#if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
#include <mutex>
#include <condition_variable>
#endif
#ifndef _GLIBCXX_ALWAYS_INLINE
-#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((always_inline))
+#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
#endif
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
-#if defined(_GLIBCXX_HAVE_LINUX_FUTEX)
+#ifdef _GLIBCXX_HAS_GTHREADS
+#if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
struct __atomic_futex_unsigned_base
{
// Returns false iff a timeout occurred.
};
template <unsigned _Waiter_bit = 0x80000000>
- struct __atomic_futex_unsigned : __atomic_futex_unsigned_base
+ class __atomic_futex_unsigned : __atomic_futex_unsigned_base
{
typedef chrono::system_clock __clock_t;
- // XXX We expect this to be lock-free, and having the payload at offset 0.
-#if ATOMIC_INT_LOCK_FREE < 2
-# error We require lock-free atomic operations on int
-#endif
+ // This must be lock-free and at offset 0.
atomic<unsigned> _M_data;
+ public:
+ explicit
__atomic_futex_unsigned(unsigned __data) : _M_data(__data)
{ }
}
private:
-
// If a timeout occurs, returns a current value after the timeout;
// otherwise, returns the operand's value if equal is true or a different
// value if equal is false.
{
for (;;)
{
- // Don't bother checking the value again because we expect the caller to
- // have done it recently.
+ // Don't bother checking the value again because we expect the caller
+ // to have done it recently.
// memory_order_relaxed is sufficient because we can rely on just the
// modification order (store_notify uses an atomic RMW operation too),
// and the futex syscalls synchronize between themselves.
_M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
- bool __ret;
- __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
- __assumed | _Waiter_bit, __has_timeout, __s, __ns);
+ bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
+ __assumed | _Waiter_bit,
+ __has_timeout, __s, __ns);
// Fetch the current value after waiting (clears _Waiter_bit).
__assumed = _M_load(__mo);
if (!__ret || ((__operand == __assumed) == __equal))
bool __equal, memory_order __mo)
{
return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
- false, chrono::seconds(0), chrono::nanoseconds(0));
+ false, {}, {});
}
// If a timeout occurs, returns a current value after the timeout;
_M_load_when_not_equal(unsigned __val, memory_order __mo)
{
unsigned __i = _M_load(__mo);
- if ((__i & ~_Waiter_bit) != __val) return;
+ if ((__i & ~_Waiter_bit) != __val)
+ return (__i & ~_Waiter_bit);
// TODO Spin-wait first.
return _M_load_and_test(__i, __val, false, __mo);
}
// Returns false iff a timeout occurred.
template<typename _Rep, typename _Period>
- _GLIBCXX_ALWAYS_INLINE bool
- _M_load_when_equal_for(unsigned __val, memory_order __mo,
- const chrono::duration<_Rep, _Period>& __rtime)
- {
- return _M_load_when_equal_until(__val, __mo, __clock_t::now() + __rtime);
- }
+ _GLIBCXX_ALWAYS_INLINE bool
+ _M_load_when_equal_for(unsigned __val, memory_order __mo,
+ const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ return _M_load_when_equal_until(__val, __mo,
+ __clock_t::now() + __rtime);
+ }
// Returns false iff a timeout occurred.
template<typename _Clock, typename _Duration>
- _GLIBCXX_ALWAYS_INLINE bool
- _M_load_when_equal_until(unsigned __val, memory_order __mo,
- const chrono::time_point<_Clock, _Duration>& __atime)
- {
- // DR 887 - Sync unknown clock to known clock.
- const typename _Clock::time_point __c_entry = _Clock::now();
- const __clock_t::time_point __s_entry = __clock_t::now();
- const auto __delta = __atime - __c_entry;
- const auto __s_atime = __s_entry + __delta;
- return _M_load_when_equal_until(__val, __mo, __s_atime);
- }
+ _GLIBCXX_ALWAYS_INLINE bool
+ _M_load_when_equal_until(unsigned __val, memory_order __mo,
+ const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+ // DR 887 - Sync unknown clock to known clock.
+ const typename _Clock::time_point __c_entry = _Clock::now();
+ const __clock_t::time_point __s_entry = __clock_t::now();
+ const auto __delta = __atime - __c_entry;
+ const auto __s_atime = __s_entry + __delta;
+ return _M_load_when_equal_until(__val, __mo, __s_atime);
+ }
// Returns false iff a timeout occurred.
template<typename _Duration>
if (_M_data.exchange(__val, __mo) & _Waiter_bit)
_M_futex_notify_all(__futex);
}
-
};
-#else
+#else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
// If futexes are not available, use a mutex and a condvar to wait.
// Because we access the data only within critical sections, all accesses
// are sequentially consistent; thus, we satisfy any provided memory_order.
template <unsigned _Waiter_bit = 0x80000000>
- struct __atomic_futex_unsigned
+ class __atomic_futex_unsigned
{
typedef chrono::system_clock __clock_t;
mutex _M_mutex;
condition_variable _M_condvar;
+ public:
+ explicit
__atomic_futex_unsigned(unsigned __data) : _M_data(__data)
{ }
}
template<typename _Rep, typename _Period>
- _GLIBCXX_ALWAYS_INLINE bool
- _M_load_when_equal_for(unsigned __val, memory_order __mo,
- const chrono::duration<_Rep, _Period>& __rtime)
- {
- unique_lock<mutex> __lock(_M_mutex);
- return _M_condvar.wait_for(__lock, __rtime,
- [&] { return _M_data == __val;});
- }
+ _GLIBCXX_ALWAYS_INLINE bool
+ _M_load_when_equal_for(unsigned __val, memory_order __mo,
+ const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ unique_lock<mutex> __lock(_M_mutex);
+ return _M_condvar.wait_for(__lock, __rtime,
+ [&] { return _M_data == __val;});
+ }
template<typename _Clock, typename _Duration>
- _GLIBCXX_ALWAYS_INLINE bool
- _M_load_when_equal_until(unsigned __val, memory_order __mo,
- const chrono::time_point<_Clock, _Duration>& __atime)
- {
- unique_lock<mutex> __lock(_M_mutex);
- return _M_condvar.wait_until(__lock, __atime,
- [&] { return _M_data == __val;});
- }
+ _GLIBCXX_ALWAYS_INLINE bool
+ _M_load_when_equal_until(unsigned __val, memory_order __mo,
+ const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+ unique_lock<mutex> __lock(_M_mutex);
+ return _M_condvar.wait_until(__lock, __atime,
+ [&] { return _M_data == __val;});
+ }
_GLIBCXX_ALWAYS_INLINE void
_M_store_notify_all(unsigned __val, memory_order __mo)
_M_data = __val;
_M_condvar.notify_all();
}
-
};
-#endif
+#endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
+#endif // _GLIBCXX_HAS_GTHREADS
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std