/* Define if __builtin_fmod/__builtin_remainder is inlined on x86. */
#undef HAVE_X86_INLINE_FMOD
+/* Set to 1 if 64 bit atomics are supported. */
+#undef HAVE_64B_ATOMICS
+
/*
\f */
fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for 64-bit atomic support" >&5
+printf %s "checking for 64-bit atomic support... " >&6; }
+if test ${libc_cv_gcc_has_64b_atomics+y}
+then :
+ printf %s "(cached) " >&6
+else case e in #(
+ e) cat > conftest.c <<\EOF
+typedef struct { long long t; } X;
+extern void has_64b_atomics(void);
+void f(void)
+{
+ X x;
+ /* Use address of structure with 64-bit type. This avoids incorrect
+ implementations which return true even if long long is not 64-bit aligned.
+ This works on GCC and LLVM - other cases have bugs and they disagree. */
+ _Static_assert (__atomic_always_lock_free (sizeof (x), &x), "no_64b_atomics");
+}
+EOF
+if { ac_try='${CC-cc} -O2 -S conftest.c'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; };
+then
+ libc_cv_gcc_has_64b_atomics=yes
+else
+ libc_cv_gcc_has_64b_atomics=no
+fi
+rm -f conftest* ;;
+esac
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_gcc_has_64b_atomics" >&5
+printf "%s\n" "$libc_cv_gcc_has_64b_atomics" >&6; }
+if test "$libc_cv_gcc_has_64b_atomics" = yes; then
+ printf "%s\n" "#define HAVE_64B_ATOMICS 1" >>confdefs.h
+
+else
+ printf "%s\n" "#define HAVE_64B_ATOMICS 0" >>confdefs.h
+
+fi
+
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for compiler option to disable generation of FMA instructions" >&5
printf %s "checking for compiler option to disable generation of FMA instructions... " >&6; }
if test ${libc_cv_cc_nofma+y}
AC_DEFINE(HAVE_BUILTIN_MEMSET)
fi
+AC_CACHE_CHECK(for 64-bit atomic support, libc_cv_gcc_has_64b_atomics, [dnl
+cat > conftest.c <<\EOF
+typedef struct { long long t; } X;
+extern void has_64b_atomics(void);
+void f(void)
+{
+ X x;
+ /* Use address of structure with 64-bit type. This avoids incorrect
+ implementations which return true even if long long is not 64-bit aligned.
+ This works on GCC and LLVM - other cases have bugs and they disagree. */
+ _Static_assert (__atomic_always_lock_free (sizeof (x), &x), "no_64b_atomics");
+}
+EOF
+dnl
+if AC_TRY_COMMAND([${CC-cc} -O2 -S conftest.c]);
+then
+ libc_cv_gcc_has_64b_atomics=yes
+else
+ libc_cv_gcc_has_64b_atomics=no
+fi
+rm -f conftest* ])
+if test "$libc_cv_gcc_has_64b_atomics" = yes; then
+ AC_DEFINE(HAVE_64B_ATOMICS, 1)
+else
+ AC_DEFINE(HAVE_64B_ATOMICS, 0)
+fi
+
dnl Determine how to disable generation of FMA instructions.
AC_CACHE_CHECK([for compiler option to disable generation of FMA instructions],
libc_cv_cc_nofma, [dnl
- support functions like barriers. They also have the prefix
"atomic_".
- Architectures must provide a few lowlevel macros (the compare
- and exchange definitions). All others are optional. They
- should only be provided if the architecture has specific
- support for the operation.
-
As <atomic.h> macros are usually heavily nested and often use local
variables to make sure side-effects are evaluated properly, use for
macro local variables a per-macro unique prefix. This file uses
#include <atomic-machine.h>
-# undef atomic_compare_and_exchange_val_acq
# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
({ \
__typeof (*(mem)) __atg3_old = (oldval); \
__atg3_old; \
})
-# undef atomic_compare_and_exchange_val_rel
# define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
({ \
__typeof (*(mem)) __atg3_old = (oldval); \
__atg3_old; \
})
-# undef atomic_compare_and_exchange_bool_acq
# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
({ \
__typeof (*(mem)) __atg3_old = (oldval); \
C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
-/* We require 32b atomic operations; some archs also support 64b atomic
- operations. */
-void __atomic_link_error (void);
-# if USE_64B_ATOMICS == 1
-# define __atomic_check_size(mem) \
- if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
- __atomic_link_error ();
-# else
-# define __atomic_check_size(mem) \
- if (sizeof (*mem) != 4) \
- __atomic_link_error ();
-# endif
-/* We additionally provide 8b and 16b atomic loads and stores; we do not yet
- need other atomic operations of such sizes, and restricting the support to
- loads and stores makes this easier for archs that do not have native
- support for atomic operations to less-than-word-sized data. */
-# if USE_64B_ATOMICS == 1
-# define __atomic_check_size_ls(mem) \
- if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
- && (sizeof (*mem) != 8)) \
- __atomic_link_error ();
-# else
-# define __atomic_check_size_ls(mem) \
- if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \
- __atomic_link_error ();
-# endif
-
-# define atomic_thread_fence_acquire() \
- __atomic_thread_fence (__ATOMIC_ACQUIRE)
-# define atomic_thread_fence_release() \
- __atomic_thread_fence (__ATOMIC_RELEASE)
-# define atomic_thread_fence_seq_cst() \
- __atomic_thread_fence (__ATOMIC_SEQ_CST)
+/* Check atomic operations are lock free. Since this doesn't work correctly
+ on all targets (eg. if uint64_t is 4-byte aligned), use__HAVE_64B_ATOMICS
+ for 64-bit types. */
+#define __atomic_check_size(mem) \
+ _Static_assert (__atomic_always_lock_free (sizeof (*(mem)), 0) && \
+ !(sizeof (*(mem)) == 8 && HAVE_64B_ATOMICS == 0), \
+ "atomic not lock free!")
+
+#define atomic_thread_fence_acquire() __atomic_thread_fence (__ATOMIC_ACQUIRE)
+#define atomic_thread_fence_release() __atomic_thread_fence (__ATOMIC_RELEASE)
+#define atomic_thread_fence_seq_cst() __atomic_thread_fence (__ATOMIC_SEQ_CST)
# define atomic_load_relaxed(mem) \
- ({ __atomic_check_size_ls((mem)); \
+ ({ __atomic_check_size((mem)); \
__atomic_load_n ((mem), __ATOMIC_RELAXED); })
# define atomic_load_acquire(mem) \
- ({ __atomic_check_size_ls((mem)); \
+ ({ __atomic_check_size((mem)); \
__atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
# define atomic_store_relaxed(mem, val) \
do { \
- __atomic_check_size_ls((mem)); \
+ __atomic_check_size((mem)); \
__atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
} while (0)
# define atomic_store_release(mem, val) \
do { \
- __atomic_check_size_ls((mem)); \
+ __atomic_check_size((mem)); \
__atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
} while (0)
#include <atomic.h>
#include <bits/atomic_wide_counter.h>
-#if USE_64B_ATOMICS
+#if HAVE_64B_ATOMICS
static inline uint64_t
__atomic_wide_counter_load_relaxed (__atomic_wide_counter *c)
return atomic_fetch_xor_release (&c->__value64, val);
}
-#else /* !USE_64B_ATOMICS */
+#else /* !HAVE_64B_ATOMICS */
uint64_t __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c)
attribute_hidden;
__atomic_wide_counter_fetch_add_relaxed (c, val);
}
-#endif /* !USE_64B_ATOMICS */
+#endif /* !HAVE_64B_ATOMICS */
#endif /* _ATOMIC_WIDE_COUNTER_H */
#include <atomic_wide_counter.h>
-#if !USE_64B_ATOMICS
+#if !HAVE_64B_ATOMICS
/* Values we add or xor are less than or equal to 1<<31, so we only
have to make overflow-and-addition atomic wrt. to concurrent load
return ((uint64_t) (h & ~((unsigned int) 1 << 31)) << 31) + l;
}
-#endif /* !USE_64B_ATOMICS */
+#endif /* !HAVE_64B_ATOMICS */
__atomic_wide_counter_add_relaxed (&cond->__data.__g1_start, val);
}
-#if USE_64B_ATOMICS == 1
+#if HAVE_64B_ATOMICS == 1
static inline uint64_t
__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val)
return atomic_fetch_xor_release (&cond->__data.__wseq.__value64, val);
}
-#else /* !USE_64B_ATOMICS */
+#else /* !HAVE_64B_ATOMICS */
/* The xor operation needs to be an atomic read-modify-write. The write
itself is not an issue as it affects just the lower-order half but not bits
return ((uint64_t) h << 31) + l2;
}
-#endif /* !USE_64B_ATOMICS */
+#endif /* !HAVE_64B_ATOMICS */
/* The lock that signalers use. See pthread_cond_wait_common for uses.
The lock is our normal three-state lock: not acquired (0) / acquired (1) /
but given that counter wrapround is probably impossible to hit
(2**32 operations in unsetenv concurrently with getenv), using
<atomic_wide_counter.h> seems unnecessary. */
-#if USE_64B_ATOMICS
+#if HAVE_64B_ATOMICS
typedef uint64_t environ_counter;
#else
typedef uint32_t environ_counter;
#ifndef _ALPHA_ATOMIC_MACHINE_H
#define _ALPHA_ATOMIC_MACHINE_H
-#include_next <atomic-machine.h>
+#include <stdint.h>
#define atomic_write_barrier() __asm ("wmb" : : : "memory");
and adaptive mutexes to optimize spin-wait loops.
*/
-#include <bits/wordsize.h>
-
-/* NB: The NPTL semaphore code casts a sem_t to a new_sem and issues a 64-bit
- atomic operation for USE_64B_ATOMICS. However, the sem_t has 32-bit
- alignment on 32-bit architectures, which prevents using 64-bit atomics even
- if the ABI supports it. */
-#if __WORDSIZE == 64
-# define USE_64B_ATOMICS 1
-#else
-# define USE_64B_ATOMICS 0
-#endif
-
#endif /* atomic-machine.h */
#include <atomic-machine.h>
#include <sem_t-align.h>
-#if USE_64B_ATOMICS && (SEM_T_ALIGN >= 8 \
- || defined HAVE_UNALIGNED_64B_ATOMICS)
+#if HAVE_64B_ATOMICS && (SEM_T_ALIGN >= 8 \
+ || defined HAVE_UNALIGNED_64B_ATOMICS)
# define USE_64B_ATOMICS_ON_SEM_T 1
#else
# define USE_64B_ATOMICS_ON_SEM_T 0
/* Static assert for types that can't be loaded/stored atomically on the
current architecture. */
-#if USE_64B_ATOMICS
+#if HAVE_64B_ATOMICS
#define __RSEQ_ASSERT_ATOMIC(member) \
_Static_assert (sizeof (RSEQ_SELF()->member) == 1 \
|| sizeof (RSEQ_SELF()->member) == 4 \
#ifdef __riscv_atomic
-#include_next <atomic-machine.h>
-
/* Miscellaneous. */
# define asm_amo(which, ordering, mem, value) ({ \
<https://www.gnu.org/licenses/>. */
#ifndef _SPARC_ATOMIC_MACHINE_H
-#define _SPARC_ATOMIC_MACHINE_H 1
-
-#include_next <atomic-machine.h>
+#define _SPARC_ATOMIC_MACHINE_H
#ifdef __sparc_v9__
# define atomic_full_barrier() \
# define atomic_spin_nop() __cpu_relax ()
#endif
-#endif /* _ATOMIC_MACHINE_H */
+#endif
gen-as-const-headers += ucontext_i.sym
endif
-# When I get this to work, this is the right thing
-ifeq ($(subdir),elf)
-CFLAGS-rtld.c += -mcpu=v8
-#rtld-routines += dl-sysdepsparc
-endif # elf
-
ifeq ($(subdir),math)
# These 2 routines are normally in libgcc{.a,_s.so.1}.
# However, sparc32 -mlong-double-128 libgcc relies on
#ifndef _X86_ATOMIC_MACHINE_H
#define _X86_ATOMIC_MACHINE_H 1
-#ifdef __x86_64__
-# define USE_64B_ATOMICS 1
-#else
-# define USE_64B_ATOMICS 0
-#endif
-
#define atomic_spin_nop() __asm ("pause")
#endif /* atomic-machine.h */