__xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
{ return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+ template<typename _Tp>
+ concept __atomic_fetch_addable
+ = requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
+
template<typename _Tp>
_Tp
__fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
{
- _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
- _Val<_Tp> __newval = __oldval + __i;
- while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
- memory_order_relaxed))
- __newval = __oldval + __i;
- return __oldval;
+ if constexpr (__atomic_fetch_addable<_Tp>)
+ return __atomic_fetch_add(__ptr, __i, int(__m));
+ else
+ {
+ _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval + __i;
+ while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
+ memory_order_relaxed))
+ __newval = __oldval + __i;
+ return __oldval;
+ }
}
+ template<typename _Tp>
+ concept __atomic_fetch_subtractable
+ = requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
+
template<typename _Tp>
_Tp
__fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
{
- _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
- _Val<_Tp> __newval = __oldval - __i;
- while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
- memory_order_relaxed))
- __newval = __oldval - __i;
- return __oldval;
+ if constexpr (__atomic_fetch_subtractable<_Tp>)
+ return __atomic_fetch_sub(__ptr, __i, int(__m));
+ else
+ {
+ _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval - __i;
+ while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
+ memory_order_relaxed))
+ __newval = __oldval - __i;
+ return __oldval;
+ }
}
+ template<typename _Tp>
+ concept __atomic_add_fetchable
+ = requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
+
template<typename _Tp>
_Tp
__add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
{
- _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
- _Val<_Tp> __newval = __oldval + __i;
- while (!compare_exchange_weak(__ptr, __oldval, __newval,
- memory_order_seq_cst,
- memory_order_relaxed))
- __newval = __oldval + __i;
- return __newval;
+ if constexpr (__atomic_add_fetchable<_Tp>)
+ return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
+ else
+ {
+ _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval + __i;
+ while (!compare_exchange_weak (__ptr, __oldval, __newval,
+ memory_order_seq_cst,
+ memory_order_relaxed))
+ __newval = __oldval + __i;
+ return __newval;
+ }
}
+ template<typename _Tp>
+ concept __atomic_sub_fetchable
+ = requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
+
template<typename _Tp>
_Tp
__sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
{
- _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
- _Val<_Tp> __newval = __oldval - __i;
- while (!compare_exchange_weak(__ptr, __oldval, __newval,
- memory_order_seq_cst,
- memory_order_relaxed))
- __newval = __oldval - __i;
- return __newval;
+ if constexpr (__atomic_sub_fetchable<_Tp>)
+ return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
+ else
+ {
+ _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval - __i;
+ while (!compare_exchange_weak (__ptr, __oldval, __newval,
+ memory_order_seq_cst,
+ memory_order_relaxed))
+ __newval = __oldval - __i;
+ return __newval;
+ }
}
} // namespace __atomic_impl