2015-11-04 Joseph Myers <joseph@codesourcery.com>
+ * sysdeps/arm/atomic-machine.h
+ [__GNUC_PREREQ (4, 7) && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4]:
+ Change conditional to [__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4].
+ [__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 && !__GNUC_PREREQ (4, 7)]:
+ Remove conditional code.
+ [!__GNUC_PREREQ (4, 7) || !__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4]:
+ Change conditional to [!__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4].
+ * sysdeps/i386/sysdep.h [__ASSEMBLER__ && __GNUC_PREREQ (4, 7)]:
+ Change conditional to [__ASSEMBLER__].
+ [__ASSEMBLER__ && !__GNUC_PREREQ (4, 7)]: Remove conditional code.
+ [!__ASSEMBLER__ && __GNUC_PREREQ (4, 7)]: Change conditional to
+ [!__ASSEMBLER__].
+ [!__ASSEMBLER__ && !__GNUC_PREREQ (4, 7)]: Remove conditional
+ code.
+ * sysdeps/unix/sysv/linux/sh/atomic-machine.h (rNOSP): Remove
+ conditional macro definitions.
+ (__arch_compare_and_exchange_val_8_acq): Use "u" instead of rNOSP.
+ (__arch_compare_and_exchange_val_16_acq): Likewise.
+ (__arch_compare_and_exchange_val_32_acq): Likewise.
+ (atomic_exchange_and_add): Likewise.
+ (atomic_add): Likewise.
+ (atomic_add_negative): Likewise.
+ (atomic_add_zero): Likewise.
+ (atomic_bit_set): Likewise.
+ (atomic_bit_test_set): Likewise.
+ * sysdeps/x86_64/atomic-machine.h [__GNUC_PREREQ (4, 7)]: Make
+ code unconditional.
+ [!__GNUC_PREREQ (4, 7)]: Remove conditional code.
+
* math/test-math-errno.h: New file.
* math/test-math-inline.h (TEST_INLINE): Define to 1 instead of
empty.
/* Use the atomic builtins provided by GCC in case the backend provides
a pattern to do this efficiently. */
-#if __GNUC_PREREQ (4, 7) && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
# define atomic_exchange_acq(mem, value) \
__atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
({__arm_link_error (); oldval; })
-#elif defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
-/* Atomic compare and exchange. */
-# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- __sync_val_compare_and_swap ((mem), (oldval), (newval))
#else
# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
__arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval))
#endif
-#if !__GNUC_PREREQ (4, 7) || !defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
/* We don't support atomic operations on any non-word types.
So make them link errors. */
# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
r1: saved stack pointer
*/
-#if __GNUC_PREREQ (4, 7)
-# define rNOSP "u"
-#else
-# define rNOSP "r"
-#endif
-
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
({ __typeof (*(mem)) __result; \
__asm __volatile ("\
bf 1f\n\
mov.b %2,@%1\n\
1: mov r1,r15"\
- : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \
+ : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
: "r0", "r1", "t", "memory"); \
__result; })
bf 1f\n\
mov.w %2,@%1\n\
1: mov r1,r15"\
- : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \
+ : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
: "r0", "r1", "t", "memory"); \
__result; })
bf 1f\n\
mov.l %2,@%1\n\
1: mov r1,r15"\
- : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \
+ : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
: "r0", "r1", "t", "memory"); \
__result; })
add %0,r2\n\
mov.b r2,@%2\n\
1: mov r1,r15"\
- : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("\
add %0,r2\n\
mov.w r2,@%2\n\
1: mov r1,r15"\
- : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("\
add %0,r2\n\
mov.l r2,@%2\n\
1: mov r1,r15"\
- : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "memory"); \
else \
{ \
add %0,r2\n\
mov.b r2,@%1\n\
1: mov r1,r15"\
- : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \
+ : "=&r" (__tmp) : "u" (mem), "0" (__value) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("\
add %0,r2\n\
mov.w r2,@%1\n\
1: mov r1,r15"\
- : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \
+ : "=&r" (__tmp) : "u" (mem), "0" (__value) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("\
add %0,r2\n\
mov.l r2,@%1\n\
1: mov r1,r15"\
- : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \
+ : "=&r" (__tmp) : "u" (mem), "0" (__value) \
: "r0", "r1", "r2", "memory"); \
else \
{ \
1: mov r1,r15\n\
shal r2\n\
movt %0"\
- : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("\
1: mov r1,r15\n\
shal r2\n\
movt %0"\
- : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("\
1: mov r1,r15\n\
shal r2\n\
movt %0"\
- : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "t", "memory"); \
else \
abort (); \
1: mov r1,r15\n\
tst r2,r2\n\
movt %0"\
- : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("\
1: mov r1,r15\n\
tst r2,r2\n\
movt %0"\
- : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("\
1: mov r1,r15\n\
tst r2,r2\n\
movt %0"\
- : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \
+ : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
: "r0", "r1", "r2", "t", "memory"); \
else \
abort (); \
or %1,r2\n\
mov.b r2,@%0\n\
1: mov r1,r15"\
- : : rNOSP (mem), rNOSP (__mask) \
+ : : "u" (mem), "u" (__mask) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("\
or %1,r2\n\
mov.w r2,@%0\n\
1: mov r1,r15"\
- : : rNOSP (mem), rNOSP (__mask) \
+ : : "u" (mem), "u" (__mask) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("\
or %1,r2\n\
mov.l r2,@%0\n\
1: mov r1,r15"\
- : : rNOSP (mem), rNOSP (__mask) \
+ : : "u" (mem), "u" (__mask) \
: "r0", "r1", "r2", "memory"); \
else \
abort (); \
1: mov r1,r15\n\
and r3,%0"\
: "=&r" (__result), "=&r" (__mask) \
- : rNOSP (mem), "0" (__result), "1" (__mask) \
+ : "u" (mem), "0" (__result), "1" (__mask) \
: "r0", "r1", "r2", "r3", "memory"); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("\
1: mov r1,r15\n\
and r3,%0"\
: "=&r" (__result), "=&r" (__mask) \
- : rNOSP (mem), "0" (__result), "1" (__mask) \
+ : "u" (mem), "0" (__result), "1" (__mask) \
: "r0", "r1", "r2", "r3", "memory"); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("\
1: mov r1,r15\n\
and r3,%0"\
: "=&r" (__result), "=&r" (__mask) \
- : rNOSP (mem), "0" (__result), "1" (__mask) \
+ : "u" (mem), "0" (__result), "1" (__mask) \
: "r0", "r1", "r2", "r3", "memory"); \
else \
abort (); \