#ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
+# define atomic_full_barrier() __atomic_thread_fence (__ATOMIC_SEQ_CST)
#endif
#define __HAVE_64B_ATOMICS 1
#define ATOMIC_EXCHANGE_USES_CAS 0
-/* Barrier macro. */
-#define atomic_full_barrier() __sync_synchronize()
-
#endif
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define atomic_full_barrier() __asm ("mb" : : : "memory");
#define atomic_read_barrier() __asm ("mb" : : : "memory");
#define atomic_write_barrier() __asm ("wmb" : : : "memory");
is not as optimal as LLOCK/SCOND specially for SMP. */
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define atomic_full_barrier() ({ asm volatile ("dmb 3":::"memory"); })
-
#endif /* _ARC_BITS_ATOMIC_H */
#define __HAVE_64B_ATOMICS 0
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-#define atomic_full_barrier() __sync_synchronize ()
#include <atomic.h>
-#ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
-#endif
-
#ifndef atomic_read_barrier
# define atomic_read_barrier() atomic_full_barrier ()
#endif
#ifndef _ATOMIC_MACHINE_H
#define _ATOMIC_MACHINE_H 1
-#define atomic_full_barrier() __sync_synchronize ()
-
#define __HAVE_64B_ATOMICS 0
/* XXX Is this actually correct? */
#ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H
#define _LINUX_LOONGARCH_BITS_ATOMIC_H 1
-#define atomic_full_barrier() __sync_synchronize ()
-
#define __HAVE_64B_ATOMICS (__loongarch_grlen >= 64)
#define ATOMIC_EXCHANGE_USES_CAS 0
#define __HAVE_64B_ATOMICS 1
#endif
-/* See the comments in <sys/asm.h> about the use of the sync instruction. */
-#ifndef MIPS_SYNC
-# define MIPS_SYNC sync
-#endif
-
-#define MIPS_SYNC_STR_2(X) #X
-#define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X)
-#define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC)
-
/* MIPS is an LL/SC machine. However, XLP has a direct atomic exchange
instruction which will be used by __atomic_exchange_n. */
#ifdef _MIPS_ARCH_XLP
# define ATOMIC_EXCHANGE_USES_CAS 1
#endif
-#ifdef __mips16
-# define atomic_full_barrier() __sync_synchronize ()
-
-#else /* !__mips16 */
-# define atomic_full_barrier() \
- __asm__ __volatile__ (".set push\n\t" \
- MIPS_PUSH_MIPS2 \
- MIPS_SYNC_STR "\n\t" \
- ".set pop" : : : "memory")
-#endif /* !__mips16 */
-
#endif /* atomic-machine.h */
# define MTC0 dmtc0
#endif
-/* The MIPS architectures do not have a uniform memory model. Particular
- platforms may provide additional guarantees - for instance, the R4000
- LL and SC instructions implicitly perform a SYNC, and the 4K promises
- strong ordering.
-
- However, in the absence of those guarantees, we must assume weak ordering
- and SYNC explicitly where necessary.
-
- Some obsolete MIPS processors may not support the SYNC instruction. This
- applies to "true" MIPS I processors; most of the processors which compile
- using MIPS I implement parts of MIPS II. */
-
-#ifndef MIPS_SYNC
-# define MIPS_SYNC sync
-#endif
-
#endif /* sys/asm.h */
#define __HAVE_64B_ATOMICS 0
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define atomic_full_barrier() ({ asm volatile ("l.msync" ::: "memory"); })
-
#endif /* atomic-machine.h */
# define atomic_write_barrier() __asm ("sync" ::: "memory")
#endif
-#define atomic_full_barrier() __asm ("sync" ::: "memory")
-
#endif
#ifndef _LINUX_RISCV_BITS_ATOMIC_H
#define _LINUX_RISCV_BITS_ATOMIC_H 1
-#define atomic_full_barrier() __sync_synchronize ()
-
#ifdef __riscv_atomic
# define __HAVE_64B_ATOMICS (__riscv_xlen >= 64)
#define ATOMIC_EXCHANGE_USES_CAS 0
-#define atomic_full_barrier() __sync_synchronize ()
#define atomic_read_barrier() __asm ("" ::: "memory")
#define atomic_write_barrier() __asm ("" ::: "memory")