From: Adhemerval Zanella Date: Thu, 13 Nov 2025 17:26:08 +0000 (-0300) Subject: Revert __HAVE_64B_ATOMICS configure check X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7fec8a5de6826ef9ae440238d698f0fe5a5fb372;p=thirdparty%2Fglibc.git Revert __HAVE_64B_ATOMICS configure check The 53807741fb44edb8e7c094cb5e7d4ff4e92a6ec1 added a configure check for 64-bit atomic operations that were not previously enabled on some 32-bit ABIs. However, the NPTL semaphore code casts a sem_t to a new_sem and issues a 64-bit atomic operation for __HAVE_64B_ATOMICS. Since sem_t has 32-bit alignment on 32-bit architectures, this prevents the use of 64-bit atomics even if the ABI supports them. Assume 64-bit atomic support from __WORDSIZE, which maps to how glibc defines it before the broken change. Also rename __HAVE_64B_ATOMICS to USE_64B_ATOMICS to define better the flag meaning. Checked on x86_64-linux-gnu and i686-linux-gnu. Reviewed-by: Wilco Dijkstra --- diff --git a/config.h.in b/config.h.in index 4204dbf123..a7cc17df8e 100644 --- a/config.h.in +++ b/config.h.in @@ -222,9 +222,6 @@ /* An integer used to scale the timeout of test programs. */ #define TIMEOUTFACTOR 1 -/* Set to 1 if 64 bit atomics are supported. */ -#undef __HAVE_64B_ATOMICS 0 - /* */ diff --git a/configure b/configure index df51b0c1a3..d1e956cc3d 100755 --- a/configure +++ b/configure @@ -7702,48 +7702,6 @@ if test "$libc_cv_gcc_builtin_memset" = yes ; then fi -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for 64-bit atomic support" >&5 -printf %s "checking for 64-bit atomic support... " >&6; } -if test ${libc_cv_gcc_has_64b_atomics+y} -then : - printf %s "(cached) " >&6 -else case e in #( - e) cat > conftest.c <<\EOF -typedef struct { long long t; } X; -extern void has_64b_atomics(void); -void f(void) -{ - X x; - /* Use address of structure with 64-bit type. This avoids incorrect - implementations which return true even if long long is not 64-bit aligned. - This works on GCC and LLVM - other cases have bugs and they disagree. */ - _Static_assert (__atomic_always_lock_free (sizeof (x), &x), "no_64b_atomics"); -} -EOF -if { ac_try='${CC-cc} -O2 -S conftest.c' - { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 - (eval $ac_try) 2>&5 - ac_status=$? - printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; -then - libc_cv_gcc_has_64b_atomics=yes -else - libc_cv_gcc_has_64b_atomics=no -fi -rm -f conftest* ;; -esac -fi -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_gcc_has_64b_atomics" >&5 -printf "%s\n" "$libc_cv_gcc_has_64b_atomics" >&6; } -if test "$libc_cv_gcc_has_64b_atomics" = yes; then - printf "%s\n" "#define __HAVE_64B_ATOMICS 1" >>confdefs.h - -else - printf "%s\n" "#define __HAVE_64B_ATOMICS 0" >>confdefs.h - - fi - { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for redirection of built-in functions" >&5 printf %s "checking for redirection of built-in functions... " >&6; } if test ${libc_cv_gcc_builtin_redirection+y} diff --git a/configure.ac b/configure.ac index dd0b7a4c7c..35f69f99c1 100644 --- a/configure.ac +++ b/configure.ac @@ -1493,33 +1493,6 @@ if test "$libc_cv_gcc_builtin_memset" = yes ; then AC_DEFINE(HAVE_BUILTIN_MEMSET) fi -AC_CACHE_CHECK(for 64-bit atomic support, libc_cv_gcc_has_64b_atomics, [dnl -cat > conftest.c <<\EOF -typedef struct { long long t; } X; -extern void has_64b_atomics(void); -void f(void) -{ - X x; - /* Use address of structure with 64-bit type. This avoids incorrect - implementations which return true even if long long is not 64-bit aligned. - This works on GCC and LLVM - other cases have bugs and they disagree. */ - _Static_assert (__atomic_always_lock_free (sizeof (x), &x), "no_64b_atomics"); -} -EOF -dnl -if AC_TRY_COMMAND([${CC-cc} -O2 -S conftest.c]); -then - libc_cv_gcc_has_64b_atomics=yes -else - libc_cv_gcc_has_64b_atomics=no -fi -rm -f conftest* ]) -if test "$libc_cv_gcc_has_64b_atomics" = yes; then - AC_DEFINE(__HAVE_64B_ATOMICS, 1) -else - AC_DEFINE(__HAVE_64B_ATOMICS, 0) - fi - AC_CACHE_CHECK(for redirection of built-in functions, libc_cv_gcc_builtin_redirection, [dnl cat > conftest.c <<\EOF extern char *strstr (const char *, const char *) __asm ("my_strstr"); diff --git a/htl/pt-internal.h b/htl/pt-internal.h index c0aa4aa9dc..63dcc0a6ba 100644 --- a/htl/pt-internal.h +++ b/htl/pt-internal.h @@ -343,7 +343,7 @@ libc_hidden_proto (__pthread_default_condattr) See nptl implementation for the details. */ struct new_sem { -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS /* The data field holds both value (in the least-significant 32 bits) and nwaiters. */ # if __BYTE_ORDER == __LITTLE_ENDIAN diff --git a/include/atomic.h b/include/atomic.h index bac7423ec5..ed0dfbde3f 100644 --- a/include/atomic.h +++ b/include/atomic.h @@ -117,11 +117,6 @@ #endif -/* This is equal to 1 iff the architecture supports 64b atomic operations. */ -#ifndef __HAVE_64B_ATOMICS -#error Unable to determine if 64-bit atomics are present. -#endif - /* The following functions are a subset of the atomic operations provided by C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */ @@ -129,7 +124,7 @@ /* We require 32b atomic operations; some archs also support 64b atomic operations. */ void __atomic_link_error (void); -# if __HAVE_64B_ATOMICS == 1 +# if USE_64B_ATOMICS == 1 # define __atomic_check_size(mem) \ if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \ __atomic_link_error (); @@ -142,7 +137,7 @@ void __atomic_link_error (void); need other atomic operations of such sizes, and restricting the support to loads and stores makes this easier for archs that do not have native support for atomic operations to less-than-word-sized data. */ -# if __HAVE_64B_ATOMICS == 1 +# if USE_64B_ATOMICS == 1 # define __atomic_check_size_ls(mem) \ if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \ && (sizeof (*mem) != 8)) \ diff --git a/include/atomic_wide_counter.h b/include/atomic_wide_counter.h index abf660f7d0..5b1979e601 100644 --- a/include/atomic_wide_counter.h +++ b/include/atomic_wide_counter.h @@ -22,7 +22,7 @@ #include #include -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS static inline uint64_t __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c) @@ -65,7 +65,7 @@ __atomic_wide_counter_fetch_xor_release (__atomic_wide_counter *c, return atomic_fetch_xor_release (&c->__value64, val); } -#else /* !__HAVE_64B_ATOMICS */ +#else /* !USE_64B_ATOMICS */ uint64_t __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c) attribute_hidden; @@ -98,6 +98,6 @@ __atomic_wide_counter_add_relaxed (__atomic_wide_counter *c, __atomic_wide_counter_fetch_add_relaxed (c, val); } -#endif /* !__HAVE_64B_ATOMICS */ +#endif /* !USE_64B_ATOMICS */ #endif /* _ATOMIC_WIDE_COUNTER_H */ diff --git a/misc/atomic_wide_counter.c b/misc/atomic_wide_counter.c index e96e595738..65f4288db2 100644 --- a/misc/atomic_wide_counter.c +++ b/misc/atomic_wide_counter.c @@ -18,7 +18,7 @@ #include -#if !__HAVE_64B_ATOMICS +#if !USE_64B_ATOMICS /* Values we add or xor are less than or equal to 1<<31, so we only have to make overflow-and-addition atomic wrt. to concurrent load @@ -124,4 +124,4 @@ __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c) return ((uint64_t) (h & ~((unsigned int) 1 << 31)) << 31) + l; } -#endif /* !__HAVE_64B_ATOMICS */ +#endif /* !USE_64B_ATOMICS */ diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c index 2708d26295..de3580b56f 100644 --- a/nptl/pthread_cond_common.c +++ b/nptl/pthread_cond_common.c @@ -52,7 +52,7 @@ __condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val) __atomic_wide_counter_add_relaxed (&cond->__data.__g1_start, val); } -#if __HAVE_64B_ATOMICS == 1 +#if USE_64B_ATOMICS == 1 static inline uint64_t __condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) @@ -60,7 +60,7 @@ __condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) return atomic_fetch_xor_release (&cond->__data.__wseq.__value64, val); } -#else /* !__HAVE_64B_ATOMICS */ +#else /* !USE_64B_ATOMICS */ /* The xor operation needs to be an atomic read-modify-write. The write itself is not an issue as it affects just the lower-order half but not bits @@ -103,7 +103,7 @@ __condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) return ((uint64_t) h << 31) + l2; } -#endif /* !__HAVE_64B_ATOMICS */ +#endif /* !USE_64B_ATOMICS */ /* The lock that signalers use. See pthread_cond_wait_common for uses. The lock is our normal three-state lock: not acquired (0) / acquired (1) / diff --git a/nptl/sem_getvalue.c b/nptl/sem_getvalue.c index 53012f785a..80b00be545 100644 --- a/nptl/sem_getvalue.c +++ b/nptl/sem_getvalue.c @@ -33,7 +33,7 @@ __new_sem_getvalue (sem_t *sem, int *sval) necessary, use a stronger MO here and elsewhere (e.g., potentially release MO in all places where we consume a token). */ -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS *sval = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK; #else *sval = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT; diff --git a/nptl/sem_init.c b/nptl/sem_init.c index 76e1aceb70..9cd81803af 100644 --- a/nptl/sem_init.c +++ b/nptl/sem_init.c @@ -38,7 +38,7 @@ __new_sem_init (sem_t *sem, int pshared, unsigned int value) struct new_sem *isem = (struct new_sem *) sem; /* Use the values the caller provided. */ -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS isem->data = value; #else isem->value = value << SEM_VALUE_SHIFT; diff --git a/nptl/sem_post.c b/nptl/sem_post.c index 659e931417..e2845dd1aa 100644 --- a/nptl/sem_post.c +++ b/nptl/sem_post.c @@ -34,7 +34,7 @@ __new_sem_post (sem_t *sem) struct new_sem *isem = (struct new_sem *) sem; int private = isem->private; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS /* Add a token to the semaphore. We use release MO to make sure that a thread acquiring this token synchronizes with us and other threads that added tokens before (the release sequence includes atomic RMW operations diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c index 95450ee35d..a58df01328 100644 --- a/nptl/sem_waitcommon.c +++ b/nptl/sem_waitcommon.c @@ -77,7 +77,7 @@ requirement because the semaphore must not be destructed while any sem_wait is still executing. */ -#if !__HAVE_64B_ATOMICS +#if !USE_64B_ATOMICS static void __sem_wait_32_finish (struct new_sem *sem); #endif @@ -87,7 +87,7 @@ __sem_wait_cleanup (void *arg) { struct new_sem *sem = (struct new_sem *) arg; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS /* Stop being registered as a waiter. See below for MO. */ atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT)); #else @@ -107,7 +107,7 @@ do_futex_wait (struct new_sem *sem, clockid_t clockid, { int err; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS err = __futex_abstimed_wait_cancelable64 ( (unsigned int *) &sem->data + SEM_VALUE_OFFSET, 0, clockid, abstime, @@ -132,7 +132,7 @@ __new_sem_wait_fast (struct new_sem *sem, int definitive_result) synchronize memory); thus, relaxed MO is sufficient for the initial load and the failure path of the CAS. If the weak CAS fails and we need a definitive result, retry. */ -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS uint64_t d = atomic_load_relaxed (&sem->data); do { @@ -166,7 +166,7 @@ __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid, { int err = 0; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS /* Add a waiter. Relaxed MO is sufficient because we can rely on the ordering provided by the RMW operations we use. */ uint64_t d = atomic_fetch_add_relaxed (&sem->data, @@ -280,7 +280,7 @@ __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid, /* If there is no token, wait. */ if ((v >> SEM_VALUE_SHIFT) == 0) { - /* See __HAVE_64B_ATOMICS variant. */ + /* See USE_64B_ATOMICS variant. */ err = do_futex_wait (sem, clockid, abstime); if (err == ETIMEDOUT || err == EINTR) { @@ -313,7 +313,7 @@ error: } /* Stop being a registered waiter (non-64b-atomics code only). */ -#if !__HAVE_64B_ATOMICS +#if !USE_64B_ATOMICS static void __sem_wait_32_finish (struct new_sem *sem) { diff --git a/nptl/semaphoreP.h b/nptl/semaphoreP.h index 39e0d152f8..36498529f1 100644 --- a/nptl/semaphoreP.h +++ b/nptl/semaphoreP.h @@ -24,7 +24,7 @@ static inline void __new_sem_open_init (struct new_sem *sem, unsigned value) { -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS sem->data = value; #else sem->value = value << SEM_VALUE_SHIFT; diff --git a/nptl/tst-sem11.c b/nptl/tst-sem11.c index 872cead8b5..7c946c30d1 100644 --- a/nptl/tst-sem11.c +++ b/nptl/tst-sem11.c @@ -52,7 +52,7 @@ do_test (void) puts ("sem_init failed"); return 1; } -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS if ((u.ns.data >> SEM_NWAITERS_SHIFT) != 0) #else if (u.ns.nwaiters != 0) @@ -89,7 +89,7 @@ do_test (void) goto again; } -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS if ((u.ns.data >> SEM_NWAITERS_SHIFT) != 0) #else if (u.ns.nwaiters != 0) diff --git a/nptl/tst-sem13.c b/nptl/tst-sem13.c index d7baa2ae58..17067dd572 100644 --- a/nptl/tst-sem13.c +++ b/nptl/tst-sem13.c @@ -30,7 +30,7 @@ do_test_wait (waitfn_t waitfn, const char *fnname) TEST_VERIFY_EXIT (waitfn (&u.s, &ts) < 0); TEST_COMPARE (errno, EINVAL); -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS unsigned int nwaiters = (u.ns.data >> SEM_NWAITERS_SHIFT); #else unsigned int nwaiters = u.ns.nwaiters; @@ -42,7 +42,7 @@ do_test_wait (waitfn_t waitfn, const char *fnname) errno = 0; TEST_VERIFY_EXIT (waitfn (&u.s, &ts) < 0); TEST_COMPARE (errno, ETIMEDOUT); -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS nwaiters = (u.ns.data >> SEM_NWAITERS_SHIFT); #else nwaiters = u.ns.nwaiters; diff --git a/stdlib/setenv.h b/stdlib/setenv.h index 7cbf9f2059..aed97efd4e 100644 --- a/stdlib/setenv.h +++ b/stdlib/setenv.h @@ -61,7 +61,7 @@ __environ_is_from_array_list (char **ep) but given that counter wrapround is probably impossible to hit (2**32 operations in unsetenv concurrently with getenv), using seems unnecessary. */ -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS typedef uint64_t environ_counter; #else typedef uint32_t environ_counter; diff --git a/sysdeps/alpha/atomic-machine.h b/sysdeps/alpha/atomic-machine.h index 9aec231748..b7ce8c4726 100644 --- a/sysdeps/alpha/atomic-machine.h +++ b/sysdeps/alpha/atomic-machine.h @@ -15,6 +15,11 @@ License along with the GNU C Library. If not, see . */ -#include +#ifndef _ALPHA_ATOMIC_MACHINE_H +#define _ALPHA_ATOMIC_MACHINE_H + +#include_next #define atomic_write_barrier() __asm ("wmb" : : : "memory"); + +#endif diff --git a/sysdeps/generic/atomic-machine.h b/sysdeps/generic/atomic-machine.h index 9f8528c1f2..ef72d9ba0d 100644 --- a/sysdeps/generic/atomic-machine.h +++ b/sysdeps/generic/atomic-machine.h @@ -34,4 +34,16 @@ and adaptive mutexes to optimize spin-wait loops. */ +#include + +/* NB: The NPTL semaphore code casts a sem_t to a new_sem and issues a 64-bit + atomic operation for USE_64B_ATOMICS. However, the sem_t has 32-bit + alignment on 32-bit architectures, which prevents using 64-bit atomics even + if the ABI supports it. */ +#if __WORDSIZE == 64 +# define USE_64B_ATOMICS 1 +#else +# define USE_64B_ATOMICS 0 +#endif + #endif /* atomic-machine.h */ diff --git a/sysdeps/htl/sem-destroy.c b/sysdeps/htl/sem-destroy.c index daecf1a7e4..dbf61b885e 100644 --- a/sysdeps/htl/sem-destroy.c +++ b/sysdeps/htl/sem-destroy.c @@ -28,7 +28,7 @@ __sem_destroy (sem_t *sem) { struct new_sem *isem = (struct new_sem *) sem; if ( -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS atomic_load_relaxed (&isem->data) >> SEM_NWAITERS_SHIFT #else atomic_load_relaxed (&isem->value) & SEM_NWAITERS_MASK diff --git a/sysdeps/htl/sem-getvalue.c b/sysdeps/htl/sem-getvalue.c index 799ddacc9b..73bb96576d 100644 --- a/sysdeps/htl/sem-getvalue.c +++ b/sysdeps/htl/sem-getvalue.c @@ -25,7 +25,7 @@ __sem_getvalue (sem_t *restrict sem, int *restrict value) { struct new_sem *isem = (struct new_sem *) sem; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS *value = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK; #else *value = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT; diff --git a/sysdeps/htl/sem-post.c b/sysdeps/htl/sem-post.c index 419a5ec2c4..f42fb28670 100644 --- a/sysdeps/htl/sem-post.c +++ b/sysdeps/htl/sem-post.c @@ -31,7 +31,7 @@ __sem_post (sem_t *sem) struct new_sem *isem = (struct new_sem *) sem; int flags = isem->pshared ? GSYNC_SHARED : 0; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS uint64_t d = atomic_load_relaxed (&isem->data); do diff --git a/sysdeps/htl/sem-timedwait.c b/sysdeps/htl/sem-timedwait.c index 8f2b4d3f8b..755d502f8c 100644 --- a/sysdeps/htl/sem-timedwait.c +++ b/sysdeps/htl/sem-timedwait.c @@ -27,7 +27,7 @@ #include #include -#if !__HAVE_64B_ATOMICS +#if !USE_64B_ATOMICS static void __sem_wait_32_finish (struct new_sem *isem); #endif @@ -37,7 +37,7 @@ __sem_wait_cleanup (void *arg) { struct new_sem *isem = arg; -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT)); #else __sem_wait_32_finish (isem); @@ -60,7 +60,7 @@ __sem_timedwait_internal (sem_t *restrict sem, int cancel_oldtype = LIBC_CANCEL_ASYNC(); -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS uint64_t d = atomic_fetch_add_relaxed (&isem->data, (uint64_t) 1 << SEM_NWAITERS_SHIFT); @@ -170,7 +170,7 @@ error: return ret; } -#if !__HAVE_64B_ATOMICS +#if !USE_64B_ATOMICS /* Stop being a registered waiter (non-64b-atomics code only). */ static void __sem_wait_32_finish (struct new_sem *isem) diff --git a/sysdeps/htl/sem-waitfast.c b/sysdeps/htl/sem-waitfast.c index 61bb1db6bf..3ee4d417c8 100644 --- a/sysdeps/htl/sem-waitfast.c +++ b/sysdeps/htl/sem-waitfast.c @@ -24,7 +24,7 @@ int __sem_waitfast (struct new_sem *isem, int definitive_result) { -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS uint64_t d = atomic_load_relaxed (&isem->data); do diff --git a/sysdeps/nptl/internaltypes.h b/sysdeps/nptl/internaltypes.h index efda6ea060..73ddd44872 100644 --- a/sysdeps/nptl/internaltypes.h +++ b/sysdeps/nptl/internaltypes.h @@ -159,7 +159,7 @@ struct pthread_key_struct /* Semaphore variable structure. */ struct new_sem { -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS /* The data field holds both value (in the least-significant 32 bits) and nwaiters. */ # if __BYTE_ORDER == __LITTLE_ENDIAN diff --git a/sysdeps/nptl/rseq-access.h b/sysdeps/nptl/rseq-access.h index 450f2dcca3..451cbf2743 100644 --- a/sysdeps/nptl/rseq-access.h +++ b/sysdeps/nptl/rseq-access.h @@ -28,7 +28,7 @@ /* Static assert for types that can't be loaded/stored atomically on the current architecture. */ -#if __HAVE_64B_ATOMICS +#if USE_64B_ATOMICS #define __RSEQ_ASSERT_ATOMIC(member) \ _Static_assert (sizeof (RSEQ_SELF()->member) == 1 \ || sizeof (RSEQ_SELF()->member) == 4 \ diff --git a/sysdeps/riscv/atomic-machine.h b/sysdeps/riscv/atomic-machine.h index b6494b3c83..e375c6665e 100644 --- a/sysdeps/riscv/atomic-machine.h +++ b/sysdeps/riscv/atomic-machine.h @@ -21,6 +21,8 @@ #ifdef __riscv_atomic +#include_next + /* Miscellaneous. */ # define asm_amo(which, ordering, mem, value) ({ \ diff --git a/sysdeps/sparc/atomic-machine.h b/sysdeps/sparc/atomic-machine.h index b8c1e96eb4..2ffcbb2fcf 100644 --- a/sysdeps/sparc/atomic-machine.h +++ b/sysdeps/sparc/atomic-machine.h @@ -16,8 +16,10 @@ License along with the GNU C Library; if not, see . */ -#ifndef _ATOMIC_MACHINE_H -#define _ATOMIC_MACHINE_H 1 +#ifndef _SPARC_ATOMIC_MACHINE_H +#define _SPARC_ATOMIC_MACHINE_H 1 + +#include_next #ifdef __sparc_v9__ # define atomic_full_barrier() \ diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h index 0681f57987..cfa6b62d32 100644 --- a/sysdeps/x86/atomic-machine.h +++ b/sysdeps/x86/atomic-machine.h @@ -19,6 +19,12 @@ #ifndef _X86_ATOMIC_MACHINE_H #define _X86_ATOMIC_MACHINE_H 1 +#ifdef __x86_64__ +# define USE_64B_ATOMICS 1 +#else +# define USE_64B_ATOMICS 0 +#endif + #define atomic_spin_nop() __asm ("pause") #endif /* atomic-machine.h */