__atomic_eq(const _Tp& __a, const _Tp& __b)
{
// TODO make this do the correct padding bit ignoring comparison
- return __builtin_memcmp(&__a, &__b, sizeof(_Tp)) == 0;
+ return __builtin_memcmp(std::addressof(__a), std::addressof(__b),
+ sizeof(_Tp)) == 0;
}
// Storage for up to 64 bits of value, should be considered opaque bits.
void
wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
{
- std::__atomic_wait_address_v(&_M_i, __old,
- [__m, this] { return this->load(__m); });
+ std::__atomic_wait_address_v(std::addressof(_M_i), __old,
+ [__m, this] { return this->load(__m); });
}
// TODO add const volatile overload
void
notify_one() noexcept
- { std::__atomic_notify_address(&_M_i, false); }
+ { std::__atomic_notify_address(std::addressof(_M_i), false); }
void
notify_all() noexcept
- { std::__atomic_notify_address(&_M_i, true); }
+ { std::__atomic_notify_address(std::addressof(_M_i), true); }
#endif // __cpp_lib_atomic_wait
-
};
/// Partial specialization for pointer types.