class _Sp_atomic
{
using value_type = _Tp;
+ using element_type = typename _Tp::element_type;
friend struct atomic<_Tp>;
~_Atomic_count()
{
- auto __val = _M_val.load(memory_order_relaxed);
+ auto __val = _AtomicRef(_M_val).load(memory_order_relaxed);
_GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
__glibcxx_assert(!(__val & _S_lock_bit));
if (auto __pi = reinterpret_cast<pointer>(__val))
{
// To acquire the lock we flip the LSB from 0 to 1.
- auto __current = _M_val.load(memory_order_relaxed);
+ _AtomicRef __aref(_M_val);
+ auto __current = __aref.load(memory_order_relaxed);
while (__current & _S_lock_bit)
{
#if __glibcxx_atomic_wait
__detail::__thread_relax();
#endif
- __current = _M_val.load(memory_order_relaxed);
+ __current = __aref.load(memory_order_relaxed);
}
_GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
- while (!_M_val.compare_exchange_strong(__current,
+ while (!__aref.compare_exchange_strong(__current,
__current | _S_lock_bit,
__o,
memory_order_relaxed))
unlock(memory_order __o) const noexcept
{
_GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
- _M_val.fetch_sub(1, __o);
+ _AtomicRef(_M_val).fetch_sub(1, __o);
_GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
}
__o = memory_order_release;
auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
_GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
- __x = _M_val.exchange(__x, __o);
+ __x = _AtomicRef(_M_val).exchange(__x, __o);
_GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
__c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
}
#if __glibcxx_atomic_wait
// Precondition: caller holds lock!
void
- _M_wait_unlock(memory_order __o) const noexcept
+ _M_wait_unlock(const element_type* const& __ptr, memory_order __o) const noexcept
{
+ auto __old_ptr = __ptr;
_GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
- auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
+ uintptr_t __old_pi
+ = _AtomicRef(_M_val).fetch_sub(1, memory_order_relaxed) - 1u;
_GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
- _M_val.wait(__v & ~_S_lock_bit, __o);
+
+ // Ensure that the correct value of _M_ptr is visible after locking,
+ // by upgrading relaxed or consume to acquire.
+ auto __lo = __o;
+ if (__o != memory_order_seq_cst)
+ __lo = memory_order_acquire;
+
+ std::__atomic_wait_address(
+ &_M_val,
+ [=, &__ptr, this](uintptr_t __new_pi)
+ {
+ if (__old_pi != (__new_pi & ~_S_lock_bit))
+ // control block changed, we can wake up
+ return true;
+
+ // control block is same, we need to check if ptr changed,
+ // the lock needs to be taken first, the value of pi may have
+ // also been updated in meantime, so reload it
+ __new_pi = reinterpret_cast<uintptr_t>(this->lock(__lo));
+ auto __new_ptr = __ptr;
+ this->unlock(memory_order_relaxed);
+ // wake up if either of the values changed
+ return __new_pi != __old_pi || __new_ptr != __old_ptr;
+ },
+ [__o, this] { return _AtomicRef(_M_val).load(__o); });
}
void
notify_one() noexcept
{
_GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
- _M_val.notify_one();
+ _AtomicRef(_M_val).notify_one();
_GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
}
notify_all() noexcept
{
_GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
- _M_val.notify_all();
+ _AtomicRef(_M_val).notify_all();
_GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
}
#endif
private:
- mutable __atomic_base<uintptr_t> _M_val{0};
+ using _AtomicRef = __atomic_ref<uintptr_t>;
+ alignas(_AtomicRef::required_alignment) mutable uintptr_t _M_val{0};
static constexpr uintptr_t _S_lock_bit{1};
};
- typename _Tp::element_type* _M_ptr = nullptr;
+ element_type* _M_ptr = nullptr;
_Atomic_count _M_refcount;
static typename _Atomic_count::pointer
{
auto __pi = _M_refcount.lock(memory_order_acquire);
if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
- _M_refcount._M_wait_unlock(__o);
+ _M_refcount._M_wait_unlock(_M_ptr, __o);
else
_M_refcount.unlock(memory_order_relaxed);
}