/* An integer used to scale the timeout of test programs. */
#define TIMEOUTFACTOR 1
-/* Set to 1 if 64 bit atomics are supported. */
-#undef __HAVE_64B_ATOMICS 0
-
/*
\f */
fi
-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for 64-bit atomic support" >&5
-printf %s "checking for 64-bit atomic support... " >&6; }
-if test ${libc_cv_gcc_has_64b_atomics+y}
-then :
- printf %s "(cached) " >&6
-else case e in #(
- e) cat > conftest.c <<\EOF
-typedef struct { long long t; } X;
-extern void has_64b_atomics(void);
-void f(void)
-{
- X x;
- /* Use address of structure with 64-bit type. This avoids incorrect
- implementations which return true even if long long is not 64-bit aligned.
- This works on GCC and LLVM - other cases have bugs and they disagree. */
- _Static_assert (__atomic_always_lock_free (sizeof (x), &x), "no_64b_atomics");
-}
-EOF
-if { ac_try='${CC-cc} -O2 -S conftest.c'
- { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
- (eval $ac_try) 2>&5
- ac_status=$?
- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; }; };
-then
- libc_cv_gcc_has_64b_atomics=yes
-else
- libc_cv_gcc_has_64b_atomics=no
-fi
-rm -f conftest* ;;
-esac
-fi
-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_gcc_has_64b_atomics" >&5
-printf "%s\n" "$libc_cv_gcc_has_64b_atomics" >&6; }
-if test "$libc_cv_gcc_has_64b_atomics" = yes; then
- printf "%s\n" "#define __HAVE_64B_ATOMICS 1" >>confdefs.h
-
-else
- printf "%s\n" "#define __HAVE_64B_ATOMICS 0" >>confdefs.h
-
- fi
-
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for redirection of built-in functions" >&5
printf %s "checking for redirection of built-in functions... " >&6; }
if test ${libc_cv_gcc_builtin_redirection+y}
AC_DEFINE(HAVE_BUILTIN_MEMSET)
fi
-AC_CACHE_CHECK(for 64-bit atomic support, libc_cv_gcc_has_64b_atomics, [dnl
-cat > conftest.c <<\EOF
-typedef struct { long long t; } X;
-extern void has_64b_atomics(void);
-void f(void)
-{
- X x;
- /* Use address of structure with 64-bit type. This avoids incorrect
- implementations which return true even if long long is not 64-bit aligned.
- This works on GCC and LLVM - other cases have bugs and they disagree. */
- _Static_assert (__atomic_always_lock_free (sizeof (x), &x), "no_64b_atomics");
-}
-EOF
-dnl
-if AC_TRY_COMMAND([${CC-cc} -O2 -S conftest.c]);
-then
- libc_cv_gcc_has_64b_atomics=yes
-else
- libc_cv_gcc_has_64b_atomics=no
-fi
-rm -f conftest* ])
-if test "$libc_cv_gcc_has_64b_atomics" = yes; then
- AC_DEFINE(__HAVE_64B_ATOMICS, 1)
-else
- AC_DEFINE(__HAVE_64B_ATOMICS, 0)
- fi
-
AC_CACHE_CHECK(for redirection of built-in functions, libc_cv_gcc_builtin_redirection, [dnl
cat > conftest.c <<\EOF
extern char *strstr (const char *, const char *) __asm ("my_strstr");
See nptl implementation for the details. */
struct new_sem
{
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
/* The data field holds both value (in the least-significant 32 bits) and
nwaiters. */
# if __BYTE_ORDER == __LITTLE_ENDIAN
#endif
-/* This is equal to 1 iff the architecture supports 64b atomic operations. */
-#ifndef __HAVE_64B_ATOMICS
-#error Unable to determine if 64-bit atomics are present.
-#endif
-
/* The following functions are a subset of the atomic operations provided by
C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
/* We require 32b atomic operations; some archs also support 64b atomic
operations. */
void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if USE_64B_ATOMICS == 1
# define __atomic_check_size(mem) \
if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
__atomic_link_error ();
need other atomic operations of such sizes, and restricting the support to
loads and stores makes this easier for archs that do not have native
support for atomic operations to less-than-word-sized data. */
-# if __HAVE_64B_ATOMICS == 1
+# if USE_64B_ATOMICS == 1
# define __atomic_check_size_ls(mem) \
if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
&& (sizeof (*mem) != 8)) \
#include <atomic.h>
#include <bits/atomic_wide_counter.h>
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
static inline uint64_t
__atomic_wide_counter_load_relaxed (__atomic_wide_counter *c)
return atomic_fetch_xor_release (&c->__value64, val);
}
-#else /* !__HAVE_64B_ATOMICS */
+#else /* !USE_64B_ATOMICS */
uint64_t __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c)
attribute_hidden;
__atomic_wide_counter_fetch_add_relaxed (c, val);
}
-#endif /* !__HAVE_64B_ATOMICS */
+#endif /* !USE_64B_ATOMICS */
#endif /* _ATOMIC_WIDE_COUNTER_H */
#include <atomic_wide_counter.h>
-#if !__HAVE_64B_ATOMICS
+#if !USE_64B_ATOMICS
/* Values we add or xor are less than or equal to 1<<31, so we only
have to make overflow-and-addition atomic wrt. to concurrent load
return ((uint64_t) (h & ~((unsigned int) 1 << 31)) << 31) + l;
}
-#endif /* !__HAVE_64B_ATOMICS */
+#endif /* !USE_64B_ATOMICS */
__atomic_wide_counter_add_relaxed (&cond->__data.__g1_start, val);
}
-#if __HAVE_64B_ATOMICS == 1
+#if USE_64B_ATOMICS == 1
static inline uint64_t
__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val)
return atomic_fetch_xor_release (&cond->__data.__wseq.__value64, val);
}
-#else /* !__HAVE_64B_ATOMICS */
+#else /* !USE_64B_ATOMICS */
/* The xor operation needs to be an atomic read-modify-write. The write
itself is not an issue as it affects just the lower-order half but not bits
return ((uint64_t) h << 31) + l2;
}
-#endif /* !__HAVE_64B_ATOMICS */
+#endif /* !USE_64B_ATOMICS */
/* The lock that signalers use. See pthread_cond_wait_common for uses.
The lock is our normal three-state lock: not acquired (0) / acquired (1) /
necessary, use a stronger MO here and elsewhere (e.g., potentially
release MO in all places where we consume a token). */
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
*sval = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
#else
*sval = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
struct new_sem *isem = (struct new_sem *) sem;
/* Use the values the caller provided. */
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
isem->data = value;
#else
isem->value = value << SEM_VALUE_SHIFT;
struct new_sem *isem = (struct new_sem *) sem;
int private = isem->private;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
/* Add a token to the semaphore. We use release MO to make sure that a
thread acquiring this token synchronizes with us and other threads that
added tokens before (the release sequence includes atomic RMW operations
requirement because the semaphore must not be destructed while any sem_wait
is still executing. */
-#if !__HAVE_64B_ATOMICS
+#if !USE_64B_ATOMICS
static void
__sem_wait_32_finish (struct new_sem *sem);
#endif
{
struct new_sem *sem = (struct new_sem *) arg;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
/* Stop being registered as a waiter. See below for MO. */
atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
#else
{
int err;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
err = __futex_abstimed_wait_cancelable64 (
(unsigned int *) &sem->data + SEM_VALUE_OFFSET, 0,
clockid, abstime,
synchronize memory); thus, relaxed MO is sufficient for the initial load
and the failure path of the CAS. If the weak CAS fails and we need a
definitive result, retry. */
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
uint64_t d = atomic_load_relaxed (&sem->data);
do
{
{
int err = 0;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
/* Add a waiter. Relaxed MO is sufficient because we can rely on the
ordering provided by the RMW operations we use. */
uint64_t d = atomic_fetch_add_relaxed (&sem->data,
/* If there is no token, wait. */
if ((v >> SEM_VALUE_SHIFT) == 0)
{
- /* See __HAVE_64B_ATOMICS variant. */
+ /* See USE_64B_ATOMICS variant. */
err = do_futex_wait (sem, clockid, abstime);
if (err == ETIMEDOUT || err == EINTR)
{
}
/* Stop being a registered waiter (non-64b-atomics code only). */
-#if !__HAVE_64B_ATOMICS
+#if !USE_64B_ATOMICS
static void
__sem_wait_32_finish (struct new_sem *sem)
{
static inline void __new_sem_open_init (struct new_sem *sem, unsigned value)
{
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
sem->data = value;
#else
sem->value = value << SEM_VALUE_SHIFT;
puts ("sem_init failed");
return 1;
}
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
if ((u.ns.data >> SEM_NWAITERS_SHIFT) != 0)
#else
if (u.ns.nwaiters != 0)
goto again;
}
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
if ((u.ns.data >> SEM_NWAITERS_SHIFT) != 0)
#else
if (u.ns.nwaiters != 0)
TEST_VERIFY_EXIT (waitfn (&u.s, &ts) < 0);
TEST_COMPARE (errno, EINVAL);
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
unsigned int nwaiters = (u.ns.data >> SEM_NWAITERS_SHIFT);
#else
unsigned int nwaiters = u.ns.nwaiters;
errno = 0;
TEST_VERIFY_EXIT (waitfn (&u.s, &ts) < 0);
TEST_COMPARE (errno, ETIMEDOUT);
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
nwaiters = (u.ns.data >> SEM_NWAITERS_SHIFT);
#else
nwaiters = u.ns.nwaiters;
but given that counter wrapround is probably impossible to hit
(2**32 operations in unsetenv concurrently with getenv), using
<atomic_wide_counter.h> seems unnecessary. */
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
typedef uint64_t environ_counter;
#else
typedef uint32_t environ_counter;
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
-#include <stdint.h>
+#ifndef _ALPHA_ATOMIC_MACHINE_H
+#define _ALPHA_ATOMIC_MACHINE_H
+
+#include_next <atomic-machine.h>
#define atomic_write_barrier() __asm ("wmb" : : : "memory");
+
+#endif
and adaptive mutexes to optimize spin-wait loops.
*/
+#include <bits/wordsize.h>
+
+/* NB: The NPTL semaphore code casts a sem_t to a new_sem and issues a 64-bit
+ atomic operation for USE_64B_ATOMICS. However, the sem_t has 32-bit
+ alignment on 32-bit architectures, which prevents using 64-bit atomics even
+ if the ABI supports it. */
+#if __WORDSIZE == 64
+# define USE_64B_ATOMICS 1
+#else
+# define USE_64B_ATOMICS 0
+#endif
+
#endif /* atomic-machine.h */
{
struct new_sem *isem = (struct new_sem *) sem;
if (
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
atomic_load_relaxed (&isem->data) >> SEM_NWAITERS_SHIFT
#else
atomic_load_relaxed (&isem->value) & SEM_NWAITERS_MASK
{
struct new_sem *isem = (struct new_sem *) sem;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
*value = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
#else
*value = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
struct new_sem *isem = (struct new_sem *) sem;
int flags = isem->pshared ? GSYNC_SHARED : 0;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
uint64_t d = atomic_load_relaxed (&isem->data);
do
#include <pt-internal.h>
#include <shlib-compat.h>
-#if !__HAVE_64B_ATOMICS
+#if !USE_64B_ATOMICS
static void
__sem_wait_32_finish (struct new_sem *isem);
#endif
{
struct new_sem *isem = arg;
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
#else
__sem_wait_32_finish (isem);
int cancel_oldtype = LIBC_CANCEL_ASYNC();
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
uint64_t d = atomic_fetch_add_relaxed (&isem->data,
(uint64_t) 1 << SEM_NWAITERS_SHIFT);
return ret;
}
-#if !__HAVE_64B_ATOMICS
+#if !USE_64B_ATOMICS
/* Stop being a registered waiter (non-64b-atomics code only). */
static void
__sem_wait_32_finish (struct new_sem *isem)
int
__sem_waitfast (struct new_sem *isem, int definitive_result)
{
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
uint64_t d = atomic_load_relaxed (&isem->data);
do
/* Semaphore variable structure. */
struct new_sem
{
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
/* The data field holds both value (in the least-significant 32 bits) and
nwaiters. */
# if __BYTE_ORDER == __LITTLE_ENDIAN
/* Static assert for types that can't be loaded/stored atomically on the
current architecture. */
-#if __HAVE_64B_ATOMICS
+#if USE_64B_ATOMICS
#define __RSEQ_ASSERT_ATOMIC(member) \
_Static_assert (sizeof (RSEQ_SELF()->member) == 1 \
|| sizeof (RSEQ_SELF()->member) == 4 \
#ifdef __riscv_atomic
+#include_next <atomic-machine.h>
+
/* Miscellaneous. */
# define asm_amo(which, ordering, mem, value) ({ \
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-#ifndef _ATOMIC_MACHINE_H
-#define _ATOMIC_MACHINE_H 1
+#ifndef _SPARC_ATOMIC_MACHINE_H
+#define _SPARC_ATOMIC_MACHINE_H 1
+
+#include_next <atomic-machine.h>
#ifdef __sparc_v9__
# define atomic_full_barrier() \
#ifndef _X86_ATOMIC_MACHINE_H
#define _X86_ATOMIC_MACHINE_H 1
+#ifdef __x86_64__
+# define USE_64B_ATOMICS 1
+#else
+# define USE_64B_ATOMICS 0
+#endif
+
#define atomic_spin_nop() __asm ("pause")
#endif /* atomic-machine.h */