// NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
// 8 bytes, since that is what GCC built-in functions for atomic
// memory access expect.
+
+ namespace __atomic_impl
+ {
+ template<typename _Tp>
+ using _Val = typename remove_volatile<_Tp>::type;
+
+#if __glibcxx_atomic_min_max
+ template<typename _Tp>
+ _Tp
+ __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept;
+
+ template<typename _Tp>
+ _Tp
+ __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept;
+#endif
+ }
+
template<typename _ITp>
struct __atomic_base
{
fetch_xor(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile noexcept
{ return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
+
+#if __glibcxx_atomic_min_max
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_min(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_min(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_max(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_max(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
+#endif
};
return __ptr;
}
- // Remove volatile and create a non-deduced context for value arguments.
- template<typename _Tp>
- using _Val = typename remove_volatile<_Tp>::type;
-
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wc++17-extensions"
return __newval;
}
}
+
+#if __glibcxx_atomic_min_max
+ template<typename _Tp>
+ concept __atomic_fetch_minmaxable
+ = requires (_Tp __t) {
+ __atomic_fetch_min(&__t, __t, 0);
+ __atomic_fetch_max(&__t, __t, 0);
+ };
+
+ template<typename _Tp>
+ _Tp
+ __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ {
+ if constexpr (__atomic_fetch_minmaxable<_Tp>)
+ return __atomic_fetch_min(__ptr, __i, int(__m));
+ else
+ {
+ _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval < __i ? __oldval : __i;
+ while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
+ memory_order_relaxed))
+ __newval = __oldval < __i ? __oldval : __i;
+ return __oldval;
+ }
+ }
+
+ template<typename _Tp>
+ _Tp
+ __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ {
+ if constexpr (__atomic_fetch_minmaxable<_Tp>)
+ return __atomic_fetch_max(__ptr, __i, int(__m));
+ else
+ {
+ _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval > __i ? __oldval : __i;
+ while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
+ memory_order_relaxed))
+ __newval = __oldval > __i ? __oldval : __i;
+ return __oldval;
+ }
+ }
+#endif
} // namespace __atomic_impl
// base class for atomic<floating-point-type>
memory_order __m = memory_order_seq_cst) volatile noexcept
{ return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
+#if __glibcxx_atomic_min_max
+ value_type
+ fetch_min(value_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
+
+ value_type
+ fetch_min(value_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
+
+ value_type
+ fetch_max(value_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
+
+ value_type
+ fetch_max(value_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
+#endif
+
value_type
operator+=(value_type __i) noexcept
{ return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
memory_order __m = memory_order_seq_cst) const noexcept
{ return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }
+#if __glibcxx_atomic_min_max
+ value_type
+ fetch_min(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
+
+ value_type
+ fetch_max(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
+#endif
+
_GLIBCXX_ALWAYS_INLINE value_type
operator++(int) const noexcept
{ return fetch_add(1); }
memory_order __m = memory_order_seq_cst) const noexcept
{ return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }
+#if __glibcxx_atomic_min_max
+ value_type
+ fetch_min(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
+
+ value_type
+ fetch_max(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
+#endif
+
value_type
operator+=(value_type __i) const noexcept
{ return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }
};
};
+ftms = {
+ name = atomic_min_max;
+ values = {
+ v = 202403;
+ cxxmin = 26;
+ };
+};
+
ftms = {
name = atomic_lock_free_type_aliases;
values = {
#endif /* !defined(__cpp_lib_atomic_float) */
#undef __glibcxx_want_atomic_float
+#if !defined(__cpp_lib_atomic_min_max)
+# if (__cplusplus > 202302L)
+# define __glibcxx_atomic_min_max 202403L
+# if defined(__glibcxx_want_all) || defined(__glibcxx_want_atomic_min_max)
+# define __cpp_lib_atomic_min_max 202403L
+# endif
+# endif
+#endif /* !defined(__cpp_lib_atomic_min_max) */
+#undef __glibcxx_want_atomic_min_max
+
#if !defined(__cpp_lib_atomic_lock_free_type_aliases)
# if (__cplusplus >= 202002L) && ((__GCC_ATOMIC_INT_LOCK_FREE | __GCC_ATOMIC_LONG_LOCK_FREE | __GCC_ATOMIC_CHAR_LOCK_FREE) & 2)
# define __glibcxx_atomic_lock_free_type_aliases 201907L
#define __glibcxx_want_atomic_is_always_lock_free
#define __glibcxx_want_atomic_flag_test
#define __glibcxx_want_atomic_float
+#define __glibcxx_want_atomic_min_max
#define __glibcxx_want_atomic_ref
#define __glibcxx_want_atomic_lock_free_type_aliases
#define __glibcxx_want_atomic_value_initialization
memory_order __m) noexcept
{ return __a->fetch_xor(__i, __m); }
+#ifdef __cpp_lib_atomic_min_max
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_min_explicit(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_min(__i, __m); }
+
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_min_explicit(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_min(__i, __m); }
+
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_max_explicit(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_max(__i, __m); }
+
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_max_explicit(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_max(__i, __m); }
+#endif
+
template<typename _ITp>
inline _ITp
atomic_fetch_add(atomic<_ITp>* __a,
__atomic_val_t<_ITp> __i) noexcept
{ return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
+#ifdef __cpp_lib_atomic_min_max
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_min(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_min_explicit(__a, __i, memory_order_seq_cst); }
+
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_min(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_min_explicit(__a, __i, memory_order_seq_cst); }
+
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_max(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_max_explicit(__a, __i, memory_order_seq_cst); }
+
+ template<typename _ITp>
+ inline _ITp
+ atomic_fetch_max(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_max_explicit(__a, __i, memory_order_seq_cst); }
+#endif
+
#ifdef __cpp_lib_atomic_float
template<>
struct atomic<float> : __atomic_float<float>
using std::atomic_fetch_sub_explicit;
using std::atomic_fetch_xor;
using std::atomic_fetch_xor_explicit;
+#ifdef __cpp_lib_atomic_min_max
+ using std::atomic_fetch_min;
+ using std::atomic_fetch_min_explicit;
+ using std::atomic_fetch_max;
+ using std::atomic_fetch_max_explicit;
+#endif
using std::atomic_flag;
using std::atomic_flag_clear;
using std::atomic_flag_clear_explicit;
--- /dev/null
+// { dg-do compile { target c++26 } }
+// { dg-require-atomic-builtins "" }
+
+#include <atomic>
+
+void
+test01()
+{
+ volatile std::atomic<int> v;
+ std::atomic<long> a;
+ const std::memory_order mo = std::memory_order_seq_cst;
+ int i = 0;
+ long l = 0;
+
+ auto r1 = atomic_fetch_min(&v, i);
+ static_assert( std::is_same<decltype(r1), int>::value, "" );
+ auto r2 = atomic_fetch_min(&a, l);
+ static_assert( std::is_same<decltype(r2), long>::value, "" );
+ auto r3 = atomic_fetch_min_explicit(&v, i, mo);
+ static_assert( std::is_same<decltype(r3), int>::value, "" );
+ auto r4 = atomic_fetch_min_explicit(&a, l, mo);
+ static_assert( std::is_same<decltype(r4), long>::value, "" );
+
+ auto r5 = atomic_fetch_max(&v, i);
+ static_assert( std::is_same<decltype(r5), int>::value, "" );
+ auto r6 = atomic_fetch_max(&a, l);
+ static_assert( std::is_same<decltype(r6), long>::value, "" );
+ auto r7 = atomic_fetch_max_explicit(&v, i, mo);
+ static_assert( std::is_same<decltype(r7), int>::value, "" );
+ auto r8 = atomic_fetch_max_explicit(&a, l, mo);
+ static_assert( std::is_same<decltype(r8), long>::value, "" );
+}
+
+void
+test02()
+{
+ volatile std::atomic<long> v;
+ std::atomic<long> a;
+ std::memory_order mo = std::memory_order_seq_cst;
+ const int i = 0;
+
+ atomic_fetch_min(&v, i);
+ atomic_fetch_min(&a, i);
+ atomic_fetch_min_explicit(&v, i, mo);
+ atomic_fetch_min_explicit(&a, i, mo);
+ atomic_fetch_max(&v, i);
+ atomic_fetch_max(&a, i);
+ atomic_fetch_max_explicit(&v, i, mo);
+ atomic_fetch_max_explicit(&a, i, mo);
+}
--- /dev/null
+// { dg-do run { target c++26 } }
+// { dg-require-atomic-cmpxchg-word "" }
+// { dg-add-options libatomic }
+
+#include <atomic>
+#include <limits.h>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ int value;
+ const auto mo = std::memory_order_relaxed;
+
+ {
+ std::atomic_ref<int> a(value);
+
+ a = 100;
+ auto v = a.fetch_min(50);
+ VERIFY( v == 100 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_min(75, mo);
+ VERIFY( v == 50 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_min(25);
+ VERIFY( v == 50 );
+ VERIFY( a == 25 );
+
+ a = -10;
+ v = a.fetch_min(-20);
+ VERIFY( v == -10 );
+ VERIFY( a == -20 );
+
+ v = a.fetch_min(-5, mo);
+ VERIFY( v == -20 );
+ VERIFY( a == -20 );
+
+ a = 20;
+ v = a.fetch_max(50);
+ VERIFY( v == 20 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_max(30, mo);
+ VERIFY( v == 50 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_max(100);
+ VERIFY( v == 50 );
+ VERIFY( a == 100 );
+
+ a = -50;
+ v = a.fetch_max(-20);
+ VERIFY( v == -50 );
+ VERIFY( a == -20 );
+
+ v = a.fetch_max(-30, mo);
+ VERIFY( v == -20 );
+ VERIFY( a == -20 );
+ }
+
+ VERIFY( value == -20 );
+}
+
+void
+test02()
+{
+ unsigned short value;
+ const auto mo = std::memory_order_relaxed;
+
+ {
+ std::atomic_ref<unsigned short> a(value);
+
+ a = 100;
+ auto v = a.fetch_min(50);
+ VERIFY( v == 100 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_min(75, mo);
+ VERIFY( v == 50 );
+ VERIFY( a == 50 );
+
+ a = 20;
+ v = a.fetch_max(50);
+ VERIFY( v == 20 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_max(30, mo);
+ VERIFY( v == 50 );
+ VERIFY( a == 50 );
+
+ v = a.fetch_max(200);
+ VERIFY( v == 50 );
+ VERIFY( a == 200 );
+ }
+
+ VERIFY( value == 200 );
+}
+
+void
+test03()
+{
+ int i = INT_MIN;
+ std::atomic_ref<int> a(i);
+ a.fetch_min(INT_MAX);
+ VERIFY( a == INT_MIN );
+ a.fetch_max(INT_MAX);
+ VERIFY( a == INT_MAX );
+}
+
+int
+main()
+{
+ test01();
+ test02();
+ test03();
+}